initial commit after bringing over cleaned up code and testing some examples
This commit is contained in:
parent
c4858e6e0d
commit
00f4ebf5a9
93 changed files with 15189 additions and 13083 deletions
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
"""core
|
||||
"""
|
||||
core
|
||||
|
||||
Top-level Python package containing CORE components.
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,333 +0,0 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
data.py: constant definitions for the CORE API, enumerating the
|
||||
different message and TLV types (these constants are also found in coreapi.h)
|
||||
'''
|
||||
|
||||
def enumdict(d):
|
||||
for k, v in d.iteritems():
|
||||
exec "%s = %s" % (v, k) in globals()
|
||||
|
||||
# Constants
|
||||
|
||||
CORE_API_VER = "1.23"
|
||||
CORE_API_PORT = 4038
|
||||
|
||||
# Message types
|
||||
|
||||
message_types = {
|
||||
0x01: "CORE_API_NODE_MSG",
|
||||
0x02: "CORE_API_LINK_MSG",
|
||||
0x03: "CORE_API_EXEC_MSG",
|
||||
0x04: "CORE_API_REG_MSG",
|
||||
0x05: "CORE_API_CONF_MSG",
|
||||
0x06: "CORE_API_FILE_MSG",
|
||||
0x07: "CORE_API_IFACE_MSG",
|
||||
0x08: "CORE_API_EVENT_MSG",
|
||||
0x09: "CORE_API_SESS_MSG",
|
||||
0x0A: "CORE_API_EXCP_MSG",
|
||||
0x0B: "CORE_API_MSG_MAX",
|
||||
}
|
||||
|
||||
enumdict(message_types)
|
||||
|
||||
# Generic Message Flags
|
||||
|
||||
message_flags = {
|
||||
0x01: "CORE_API_ADD_FLAG",
|
||||
0x02: "CORE_API_DEL_FLAG",
|
||||
0x04: "CORE_API_CRI_FLAG",
|
||||
0x08: "CORE_API_LOC_FLAG",
|
||||
0x10: "CORE_API_STR_FLAG",
|
||||
0x20: "CORE_API_TXT_FLAG",
|
||||
0x40: "CORE_API_TTY_FLAG",
|
||||
}
|
||||
|
||||
enumdict(message_flags)
|
||||
|
||||
# Node Message TLV Types
|
||||
|
||||
node_tlvs = {
|
||||
0x01: "CORE_TLV_NODE_NUMBER",
|
||||
0x02: "CORE_TLV_NODE_TYPE",
|
||||
0x03: "CORE_TLV_NODE_NAME",
|
||||
0x04: "CORE_TLV_NODE_IPADDR",
|
||||
0x05: "CORE_TLV_NODE_MACADDR",
|
||||
0x06: "CORE_TLV_NODE_IP6ADDR",
|
||||
0x07: "CORE_TLV_NODE_MODEL",
|
||||
0x08: "CORE_TLV_NODE_EMUSRV",
|
||||
0x0A: "CORE_TLV_NODE_SESSION",
|
||||
0x20: "CORE_TLV_NODE_XPOS",
|
||||
0x21: "CORE_TLV_NODE_YPOS",
|
||||
0x22: "CORE_TLV_NODE_CANVAS",
|
||||
0x23: "CORE_TLV_NODE_EMUID",
|
||||
0x24: "CORE_TLV_NODE_NETID",
|
||||
0x25: "CORE_TLV_NODE_SERVICES",
|
||||
0x30: "CORE_TLV_NODE_LAT",
|
||||
0x31: "CORE_TLV_NODE_LONG",
|
||||
0x32: "CORE_TLV_NODE_ALT",
|
||||
0x42: "CORE_TLV_NODE_ICON",
|
||||
0x50: "CORE_TLV_NODE_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(node_tlvs)
|
||||
|
||||
node_types = dict(enumerate([
|
||||
"CORE_NODE_DEF",
|
||||
"CORE_NODE_PHYS",
|
||||
"CORE_NODE_XEN",
|
||||
"CORE_NODE_TBD",
|
||||
"CORE_NODE_SWITCH",
|
||||
"CORE_NODE_HUB",
|
||||
"CORE_NODE_WLAN",
|
||||
"CORE_NODE_RJ45",
|
||||
"CORE_NODE_TUNNEL",
|
||||
"CORE_NODE_KTUNNEL",
|
||||
"CORE_NODE_EMANE",
|
||||
]))
|
||||
|
||||
enumdict(node_types)
|
||||
|
||||
rj45_models = dict(enumerate([
|
||||
"RJ45_MODEL_LINKED",
|
||||
"RJ45_MODEL_WIRELESS",
|
||||
"RJ45_MODEL_INSTALLED",
|
||||
]))
|
||||
|
||||
enumdict(rj45_models)
|
||||
|
||||
# Link Message TLV Types
|
||||
|
||||
link_tlvs = {
|
||||
0x01: "CORE_TLV_LINK_N1NUMBER",
|
||||
0x02: "CORE_TLV_LINK_N2NUMBER",
|
||||
0x03: "CORE_TLV_LINK_DELAY",
|
||||
0x04: "CORE_TLV_LINK_BW",
|
||||
0x05: "CORE_TLV_LINK_PER",
|
||||
0x06: "CORE_TLV_LINK_DUP",
|
||||
0x07: "CORE_TLV_LINK_JITTER",
|
||||
0x08: "CORE_TLV_LINK_MER",
|
||||
0x09: "CORE_TLV_LINK_BURST",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_LINK_SESSION",
|
||||
0x10: "CORE_TLV_LINK_MBURST",
|
||||
0x20: "CORE_TLV_LINK_TYPE",
|
||||
0x21: "CORE_TLV_LINK_GUIATTR",
|
||||
0x22: "CORE_TLV_LINK_UNI",
|
||||
0x23: "CORE_TLV_LINK_EMUID",
|
||||
0x24: "CORE_TLV_LINK_NETID",
|
||||
0x25: "CORE_TLV_LINK_KEY",
|
||||
0x30: "CORE_TLV_LINK_IF1NUM",
|
||||
0x31: "CORE_TLV_LINK_IF1IP4",
|
||||
0x32: "CORE_TLV_LINK_IF1IP4MASK",
|
||||
0x33: "CORE_TLV_LINK_IF1MAC",
|
||||
0x34: "CORE_TLV_LINK_IF1IP6",
|
||||
0x35: "CORE_TLV_LINK_IF1IP6MASK",
|
||||
0x36: "CORE_TLV_LINK_IF2NUM",
|
||||
0x37: "CORE_TLV_LINK_IF2IP4",
|
||||
0x38: "CORE_TLV_LINK_IF2IP4MASK",
|
||||
0x39: "CORE_TLV_LINK_IF2MAC",
|
||||
0x40: "CORE_TLV_LINK_IF2IP6",
|
||||
0x41: "CORE_TLV_LINK_IF2IP6MASK",
|
||||
0x42: "CORE_TLV_LINK_IF1NAME",
|
||||
0x43: "CORE_TLV_LINK_IF2NAME",
|
||||
0x50: "CORE_TLV_LINK_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(link_tlvs)
|
||||
|
||||
link_types = dict(enumerate([
|
||||
"CORE_LINK_WIRELESS",
|
||||
"CORE_LINK_WIRED",
|
||||
]))
|
||||
|
||||
enumdict(link_types)
|
||||
|
||||
# Execute Message TLV Types
|
||||
|
||||
exec_tlvs = {
|
||||
0x01: "CORE_TLV_EXEC_NODE",
|
||||
0x02: "CORE_TLV_EXEC_NUM",
|
||||
0x03: "CORE_TLV_EXEC_TIME",
|
||||
0x04: "CORE_TLV_EXEC_CMD",
|
||||
0x05: "CORE_TLV_EXEC_RESULT",
|
||||
0x06: "CORE_TLV_EXEC_STATUS",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_EXEC_SESSION",
|
||||
}
|
||||
|
||||
enumdict(exec_tlvs)
|
||||
|
||||
# Register Message TLV Types
|
||||
|
||||
reg_tlvs = {
|
||||
0x01: "CORE_TLV_REG_WIRELESS",
|
||||
0x02: "CORE_TLV_REG_MOBILITY",
|
||||
0x03: "CORE_TLV_REG_UTILITY",
|
||||
0x04: "CORE_TLV_REG_EXECSRV",
|
||||
0x05: "CORE_TLV_REG_GUI",
|
||||
0x06: "CORE_TLV_REG_EMULSRV",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_REG_SESSION",
|
||||
}
|
||||
|
||||
enumdict(reg_tlvs)
|
||||
|
||||
# Configuration Message TLV Types
|
||||
|
||||
conf_tlvs = {
|
||||
0x01: "CORE_TLV_CONF_NODE",
|
||||
0x02: "CORE_TLV_CONF_OBJ",
|
||||
0x03: "CORE_TLV_CONF_TYPE",
|
||||
0x04: "CORE_TLV_CONF_DATA_TYPES",
|
||||
0x05: "CORE_TLV_CONF_VALUES",
|
||||
0x06: "CORE_TLV_CONF_CAPTIONS",
|
||||
0x07: "CORE_TLV_CONF_BITMAP",
|
||||
0x08: "CORE_TLV_CONF_POSSIBLE_VALUES",
|
||||
0x09: "CORE_TLV_CONF_GROUPS",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_CONF_SESSION",
|
||||
0x0B: "CORE_TLV_CONF_IFNUM",
|
||||
CORE_TLV_NODE_NETID: "CORE_TLV_CONF_NETID",
|
||||
0x50: "CORE_TLV_CONF_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(conf_tlvs)
|
||||
|
||||
conf_flags = {
|
||||
0x00: "CONF_TYPE_FLAGS_NONE",
|
||||
0x01: "CONF_TYPE_FLAGS_REQUEST",
|
||||
0x02: "CONF_TYPE_FLAGS_UPDATE",
|
||||
0x03: "CONF_TYPE_FLAGS_RESET",
|
||||
}
|
||||
|
||||
enumdict(conf_flags)
|
||||
|
||||
conf_data_types = {
|
||||
0x01: "CONF_DATA_TYPE_UINT8",
|
||||
0x02: "CONF_DATA_TYPE_UINT16",
|
||||
0x03: "CONF_DATA_TYPE_UINT32",
|
||||
0x04: "CONF_DATA_TYPE_UINT64",
|
||||
0x05: "CONF_DATA_TYPE_INT8",
|
||||
0x06: "CONF_DATA_TYPE_INT16",
|
||||
0x07: "CONF_DATA_TYPE_INT32",
|
||||
0x08: "CONF_DATA_TYPE_INT64",
|
||||
0x09: "CONF_DATA_TYPE_FLOAT",
|
||||
0x0A: "CONF_DATA_TYPE_STRING",
|
||||
0x0B: "CONF_DATA_TYPE_BOOL",
|
||||
}
|
||||
|
||||
enumdict(conf_data_types)
|
||||
|
||||
# File Message TLV Types
|
||||
|
||||
file_tlvs = {
|
||||
0x01: "CORE_TLV_FILE_NODE",
|
||||
0x02: "CORE_TLV_FILE_NAME",
|
||||
0x03: "CORE_TLV_FILE_MODE",
|
||||
0x04: "CORE_TLV_FILE_NUM",
|
||||
0x05: "CORE_TLV_FILE_TYPE",
|
||||
0x06: "CORE_TLV_FILE_SRCNAME",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_FILE_SESSION",
|
||||
0x10: "CORE_TLV_FILE_DATA",
|
||||
0x11: "CORE_TLV_FILE_CMPDATA",
|
||||
}
|
||||
|
||||
enumdict(file_tlvs)
|
||||
|
||||
# Interface Message TLV Types
|
||||
|
||||
iface_tlvs = {
|
||||
0x01: "CORE_TLV_IFACE_NODE",
|
||||
0x02: "CORE_TLV_IFACE_NUM",
|
||||
0x03: "CORE_TLV_IFACE_NAME",
|
||||
0x04: "CORE_TLV_IFACE_IPADDR",
|
||||
0x05: "CORE_TLV_IFACE_MASK",
|
||||
0x06: "CORE_TLV_IFACE_MACADDR",
|
||||
0x07: "CORE_TLV_IFACE_IP6ADDR",
|
||||
0x08: "CORE_TLV_IFACE_IP6MASK",
|
||||
0x09: "CORE_TLV_IFACE_TYPE",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_IFACE_SESSION",
|
||||
0x0B: "CORE_TLV_IFACE_STATE",
|
||||
CORE_TLV_NODE_EMUID: "CORE_TLV_IFACE_EMUID",
|
||||
CORE_TLV_NODE_NETID: "CORE_TLV_IFACE_NETID",
|
||||
}
|
||||
|
||||
enumdict(iface_tlvs)
|
||||
|
||||
# Event Message TLV Types
|
||||
|
||||
event_tlvs = {
|
||||
0x01: "CORE_TLV_EVENT_NODE",
|
||||
0x02: "CORE_TLV_EVENT_TYPE",
|
||||
0x03: "CORE_TLV_EVENT_NAME",
|
||||
0x04: "CORE_TLV_EVENT_DATA",
|
||||
0x05: "CORE_TLV_EVENT_TIME",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_EVENT_SESSION",
|
||||
}
|
||||
|
||||
enumdict(event_tlvs)
|
||||
|
||||
event_types = dict(enumerate([
|
||||
"CORE_EVENT_NONE",
|
||||
"CORE_EVENT_DEFINITION_STATE",
|
||||
"CORE_EVENT_CONFIGURATION_STATE",
|
||||
"CORE_EVENT_INSTANTIATION_STATE",
|
||||
"CORE_EVENT_RUNTIME_STATE",
|
||||
"CORE_EVENT_DATACOLLECT_STATE",
|
||||
"CORE_EVENT_SHUTDOWN_STATE",
|
||||
"CORE_EVENT_START",
|
||||
"CORE_EVENT_STOP",
|
||||
"CORE_EVENT_PAUSE",
|
||||
"CORE_EVENT_RESTART",
|
||||
"CORE_EVENT_FILE_OPEN",
|
||||
"CORE_EVENT_FILE_SAVE",
|
||||
"CORE_EVENT_SCHEDULED",
|
||||
"CORE_EVENT_RECONFIGURE",
|
||||
"CORE_EVENT_INSTANTIATION_COMPLETE",
|
||||
]))
|
||||
|
||||
enumdict(event_types)
|
||||
|
||||
# Session Message TLV Types
|
||||
|
||||
session_tlvs = {
|
||||
0x01: "CORE_TLV_SESS_NUMBER",
|
||||
0x02: "CORE_TLV_SESS_NAME",
|
||||
0x03: "CORE_TLV_SESS_FILE",
|
||||
0x04: "CORE_TLV_SESS_NODECOUNT",
|
||||
0x05: "CORE_TLV_SESS_DATE",
|
||||
0x06: "CORE_TLV_SESS_THUMB",
|
||||
0x07: "CORE_TLV_SESS_USER",
|
||||
0x0A: "CORE_TLV_SESS_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(session_tlvs)
|
||||
|
||||
# Exception Message TLV Types
|
||||
|
||||
exception_tlvs = {
|
||||
0x01: "CORE_TLV_EXCP_NODE",
|
||||
0x02: "CORE_TLV_EXCP_SESSION",
|
||||
0x03: "CORE_TLV_EXCP_LEVEL",
|
||||
0x04: "CORE_TLV_EXCP_SOURCE",
|
||||
0x05: "CORE_TLV_EXCP_DATE",
|
||||
0x06: "CORE_TLV_EXCP_TEXT",
|
||||
0x0A: "CORE_TLV_EXCP_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(exception_tlvs)
|
||||
|
||||
exception_levels = dict(enumerate([
|
||||
"CORE_EXCP_LEVEL_NONE",
|
||||
"CORE_EXCP_LEVEL_FATAL",
|
||||
"CORE_EXCP_LEVEL_ERROR",
|
||||
"CORE_EXCP_LEVEL_WARNING",
|
||||
"CORE_EXCP_LEVEL_NOTICE",
|
||||
]))
|
||||
|
||||
enumdict(exception_levels)
|
||||
|
||||
del enumdict
|
44
daemon/core/api/dataconversion.py
Normal file
44
daemon/core/api/dataconversion.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
"""
|
||||
Converts CORE data objects into legacy API messages.
|
||||
"""
|
||||
from core.api import coreapi
|
||||
from core.enumerations import NodeTlvs
|
||||
from core.misc import log
|
||||
from core.misc import structutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
def convert_node(node_data):
|
||||
"""
|
||||
Callback to handle an node broadcast out from a session.
|
||||
|
||||
:param core.data.NodeData node_data: node data to handle
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("converting node data to message: %s", node_data)
|
||||
|
||||
tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [
|
||||
(NodeTlvs.NUMBER, node_data.id),
|
||||
(NodeTlvs.TYPE, node_data.node_type),
|
||||
(NodeTlvs.NAME, node_data.name),
|
||||
(NodeTlvs.IP_ADDRESS, node_data.ip_address),
|
||||
(NodeTlvs.MAC_ADDRESS, node_data.mac_address),
|
||||
(NodeTlvs.IP6_ADDRESS, node_data.ip6_address),
|
||||
(NodeTlvs.MODEL, node_data.model),
|
||||
(NodeTlvs.EMULATION_ID, node_data.emulation_id),
|
||||
(NodeTlvs.EMULATION_SERVER, node_data.emulation_server),
|
||||
(NodeTlvs.SESSION, node_data.session),
|
||||
(NodeTlvs.X_POSITION, node_data.x_position),
|
||||
(NodeTlvs.Y_POSITION, node_data.y_position),
|
||||
(NodeTlvs.CANVAS, node_data.canvas),
|
||||
(NodeTlvs.NETWORK_ID, node_data.network_id),
|
||||
(NodeTlvs.SERVICES, node_data.services),
|
||||
(NodeTlvs.LATITUDE, node_data.latitude),
|
||||
(NodeTlvs.LONGITUDE, node_data.longitude),
|
||||
(NodeTlvs.ALTITUDE, node_data.altitude),
|
||||
(NodeTlvs.ICON, node_data.icon),
|
||||
(NodeTlvs.OPAQUE, node_data.opaque)
|
||||
])
|
||||
|
||||
return coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data)
|
File diff suppressed because it is too large
Load diff
|
@ -3,63 +3,71 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
|
||||
"""
|
||||
netgraph.py: Netgraph helper functions; for now these are wrappers around
|
||||
ngctl commands.
|
||||
'''
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
checkexec([NGCTL_BIN])
|
||||
from core import constants
|
||||
from core.misc import utils
|
||||
|
||||
utils.checkexec([constants.NGCTL_BIN])
|
||||
|
||||
|
||||
def createngnode(type, hookstr, name=None):
|
||||
''' Create a new Netgraph node of type and optionally assign name. The
|
||||
hook string hookstr should contain two names. This is a string so
|
||||
other commands may be inserted after the two names.
|
||||
Return the name and netgraph ID of the new node.
|
||||
'''
|
||||
"""
|
||||
Create a new Netgraph node of type and optionally assign name. The
|
||||
hook string hookstr should contain two names. This is a string so
|
||||
other commands may be inserted after the two names.
|
||||
Return the name and netgraph ID of the new node.
|
||||
"""
|
||||
hook1 = hookstr.split()[0]
|
||||
ngcmd = "mkpeer %s %s \n show .%s" % (type, hookstr, hook1)
|
||||
cmd = [NGCTL_BIN, "-f", "-"]
|
||||
cmdid = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.STDOUT)
|
||||
result, err = cmdid.communicate(input = ngcmd) # err will always be None
|
||||
cmd = [constants.NGCTL_BIN, "-f", "-"]
|
||||
cmdid = subprocess.Popen(cmd, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
# err will always be None
|
||||
result, err = cmdid.communicate(input=ngcmd)
|
||||
status = cmdid.wait()
|
||||
if status > 0:
|
||||
raise Exception, "error creating Netgraph node %s (%s): %s" % \
|
||||
(type, ngcmd, result)
|
||||
raise Exception("error creating Netgraph node %s (%s): %s" % (type, ngcmd, result))
|
||||
results = result.split()
|
||||
ngname = results[1]
|
||||
ngid = results[5]
|
||||
if name:
|
||||
check_call([NGCTL_BIN, "name", "[0x%s]:" % ngid, name])
|
||||
return (ngname, ngid)
|
||||
utils.check_call([constants.NGCTL_BIN, "name", "[0x%s]:" % ngid, name])
|
||||
return ngname, ngid
|
||||
|
||||
|
||||
def destroyngnode(name):
|
||||
''' Shutdown a Netgraph node having the given name.
|
||||
'''
|
||||
check_call([NGCTL_BIN, "shutdown", "%s:" % name])
|
||||
""" Shutdown a Netgraph node having the given name.
|
||||
"""
|
||||
utils.check_call([constants.NGCTL_BIN, "shutdown", "%s:" % name])
|
||||
|
||||
|
||||
def connectngnodes(name1, name2, hook1, hook2):
|
||||
''' Connect two hooks of two Netgraph nodes given by their names.
|
||||
'''
|
||||
""" Connect two hooks of two Netgraph nodes given by their names.
|
||||
"""
|
||||
node1 = "%s:" % name1
|
||||
node2 = "%s:" % name2
|
||||
check_call([NGCTL_BIN, "connect", node1, node2, hook1, hook2])
|
||||
utils.check_call([constants.NGCTL_BIN, "connect", node1, node2, hook1, hook2])
|
||||
|
||||
|
||||
def ngmessage(name, msg):
|
||||
''' Send a Netgraph message to the node named name.
|
||||
'''
|
||||
cmd = [NGCTL_BIN, "msg", "%s:" % name] + msg
|
||||
check_call(cmd)
|
||||
""" Send a Netgraph message to the node named name.
|
||||
"""
|
||||
cmd = [constants.NGCTL_BIN, "msg", "%s:" % name] + msg
|
||||
utils.check_call(cmd)
|
||||
|
||||
|
||||
def ngloadkernelmodule(name):
|
||||
''' Load a kernel module by invoking kldstat. This is needed for the
|
||||
""" Load a kernel module by invoking kldstat. This is needed for the
|
||||
ng_ether module which automatically creates Netgraph nodes when loaded.
|
||||
'''
|
||||
mutecall(["kldload", name])
|
||||
"""
|
||||
utils.mutecall(["kldload", name])
|
||||
|
|
|
@ -6,172 +6,173 @@
|
|||
# author: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
|
||||
'''
|
||||
"""
|
||||
nodes.py: definition of CoreNode classes and other node classes that inherit
|
||||
from the CoreNode, implementing specific node types.
|
||||
'''
|
||||
"""
|
||||
|
||||
from vnode import *
|
||||
from vnet import *
|
||||
from core.constants import *
|
||||
from core.misc.ipaddr import *
|
||||
import socket
|
||||
|
||||
from core import constants
|
||||
from core.api import coreapi
|
||||
from core.bsd.netgraph import connectngnodes
|
||||
from core.bsd.netgraph import ngloadkernelmodule
|
||||
from core.bsd.vnet import NetgraphNet
|
||||
from core.bsd.vnet import NetgraphPipeNet
|
||||
from core.bsd.vnode import JailNode
|
||||
from core.enumerations import LinkTlvs
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import ipaddress
|
||||
from core.misc import utils
|
||||
|
||||
utils.checkexec([constants.IFCONFIG_BIN])
|
||||
|
||||
checkexec([IFCONFIG_BIN])
|
||||
|
||||
class CoreNode(JailNode):
|
||||
apitype = coreapi.CORE_NODE_DEF
|
||||
apitype = NodeTypes.DEFAULT.value
|
||||
|
||||
|
||||
class PtpNet(NetgraphPipeNet):
|
||||
def tonodemsg(self, flags):
|
||||
''' Do not generate a Node Message for point-to-point links. They are
|
||||
""" Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
'''
|
||||
"""
|
||||
pass
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API TLVs for a point-to-point link. One Link message
|
||||
""" Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
'''
|
||||
"""
|
||||
tlvdata = ""
|
||||
if len(self._netif) != 2:
|
||||
return tlvdata
|
||||
(if1, if2) = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if2.node.objid)
|
||||
delay = if1.getparam('delay')
|
||||
bw = if1.getparam('bw')
|
||||
loss = if1.getparam('loss')
|
||||
duplicate = if1.getparam('duplicate')
|
||||
jitter = if1.getparam('jitter')
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, if2.node.objid)
|
||||
delay = if1.getparam("delay")
|
||||
bw = if1.getparam("bw")
|
||||
loss = if1.getparam("loss")
|
||||
duplicate = if1.getparam("duplicate")
|
||||
jitter = if1.getparam("jitter")
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DELAY.value, delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.BANDWIDTH.value, bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.PER.value, str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DUP.value, str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.JITTER.value, jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, self.linktype)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE1_NUMBER.value, if1.node.getifindex(if1))
|
||||
if if1.hwaddr:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1MAC,
|
||||
if1.hwaddr)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE1_MAC.value, if1.hwaddr)
|
||||
for addr in if1.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
(ip, sep, mask) = addr.partition("/")
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = socket.AF_INET
|
||||
tlvtypeip = LinkTlvs.INTERFACE1_IP4.value
|
||||
tlvtypemask = LinkTlvs.INTERFACE1_IP4_MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK
|
||||
family = socket.AF_INET6
|
||||
tlvtypeip = LinkTlvs.INTERFACE1_IP6.value
|
||||
tlvtypemask = LinkTlvs.INTERFACE1_IP6_MASK.value
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, ipaddress.IpAddress(af=family, address=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, if2.node.getifindex(if2))
|
||||
if if2.hwaddr:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2MAC,
|
||||
if2.hwaddr)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_MAC.value, if2.hwaddr)
|
||||
for addr in if2.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
(ip, sep, mask) = addr.partition("/")
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = socket.AF_INET
|
||||
tlvtypeip = LinkTlvs.INTERFACE2_IP4.value
|
||||
tlvtypemask = LinkTlvs.INTERFACE2_IP4_MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
family = socket.AF_INET6
|
||||
tlvtypeip = LinkTlvs.INTERFACE2_IP6.value
|
||||
tlvtypemask = LinkTlvs.INTERFACE2_IP6_MASK.value
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, ipaddress.IpAddress(af=family, address=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
return [msg,]
|
||||
return [msg, ]
|
||||
|
||||
|
||||
class SwitchNode(NetgraphNet):
|
||||
ngtype = "bridge"
|
||||
nghooks = "link0 link0\nmsg .link0 setpersistent"
|
||||
apitype = coreapi.CORE_NODE_SWITCH
|
||||
apitype = NodeTypes.SWITCH.value
|
||||
policy = "ACCEPT"
|
||||
|
||||
|
||||
class HubNode(NetgraphNet):
|
||||
ngtype = "hub"
|
||||
nghooks = "link0 link0\nmsg .link0 setpersistent"
|
||||
apitype = coreapi.CORE_NODE_HUB
|
||||
apitype = NodeTypes.HUB.value
|
||||
policy = "ACCEPT"
|
||||
|
||||
|
||||
|
||||
class WlanNode(NetgraphNet):
|
||||
ngtype = "wlan"
|
||||
nghooks = "anchor anchor"
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
apitype = NodeTypes.WIRELESS_LAN.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
|
||||
def __init__(self, session, objid=None, name=None, verbose=False,
|
||||
start=True, policy=None):
|
||||
NetgraphNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
self.mobility = None
|
||||
|
||||
|
||||
def attach(self, netif):
|
||||
NetgraphNet.attach(self, netif)
|
||||
if self.model:
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is None:
|
||||
return
|
||||
(x,y,z) = netif.node.position.get()
|
||||
x, y, z = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' Mobility and wireless model.
|
||||
'''
|
||||
if (self.verbose):
|
||||
""" Mobility and wireless model.
|
||||
"""
|
||||
if self.verbose:
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
if model._type == RegisterTlvs.WIRELESS.value:
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
if self.model._positioncallback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
(x, y, z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
elif model._type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
|
||||
class RJ45Node(NetgraphPipeNet):
|
||||
apitype = coreapi.CORE_NODE_RJ45
|
||||
apitype = NodeTypes.RJ45.value
|
||||
policy = "ACCEPT"
|
||||
|
||||
def __init__(self, session, objid, name, verbose, start = True):
|
||||
def __init__(self, session, objid, name, verbose, start=True):
|
||||
if start:
|
||||
ngloadkernelmodule("ng_ether")
|
||||
NetgraphPipeNet.__init__(self, session, objid, name, verbose, start)
|
||||
|
@ -186,18 +187,18 @@ class RJ45Node(NetgraphPipeNet):
|
|||
p = "promisc"
|
||||
if not promisc:
|
||||
p = "-" + p
|
||||
check_call([IFCONFIG_BIN, self.name, "up", p])
|
||||
utils.check_call([constants.IFCONFIG_BIN, self.name, "up", p])
|
||||
|
||||
def attach(self, netif):
|
||||
if len(self._netif) > 0:
|
||||
raise ValueError, \
|
||||
"RJ45 networks support at most 1 network interface"
|
||||
"RJ45 networks support at most 1 network interface"
|
||||
NetgraphPipeNet.attach(self, netif)
|
||||
connectngnodes(self.ngname, self.name, self.gethook(), "lower")
|
||||
|
||||
|
||||
class TunnelNode(NetgraphNet):
|
||||
ngtype = "pipe"
|
||||
nghooks = "upper lower"
|
||||
apitype = coreapi.CORE_NODE_TUNNEL
|
||||
apitype = NodeTypes.TUNNEL.value
|
||||
policy = "ACCEPT"
|
||||
|
||||
|
|
|
@ -5,25 +5,25 @@
|
|||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
|
||||
"""
|
||||
vnet.py: NetgraphNet and NetgraphPipeNet classes that implement virtual networks
|
||||
using the FreeBSD Netgraph subsystem.
|
||||
'''
|
||||
"""
|
||||
|
||||
import sys, threading
|
||||
from core.bsd.netgraph import connectngnodes
|
||||
from core.bsd.netgraph import createngnode
|
||||
from core.bsd.netgraph import destroyngnode
|
||||
from core.bsd.netgraph import ngmessage
|
||||
from core.coreobj import PyCoreNet
|
||||
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreNet, PyCoreObj
|
||||
from core.bsd.netgraph import *
|
||||
from core.bsd.vnode import VEth
|
||||
|
||||
class NetgraphNet(PyCoreNet):
|
||||
ngtype = None
|
||||
nghooks = ()
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
def __init__(self, session, objid=None, name=None, verbose=False,
|
||||
start=True, policy=None):
|
||||
PyCoreNet.__init__(self, session, objid, name)
|
||||
if name is None:
|
||||
name = str(self.objid)
|
||||
|
@ -40,8 +40,7 @@ class NetgraphNet(PyCoreNet):
|
|||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
tmp, self.ngid = createngnode(type=self.ngtype, hookstr=self.nghooks,
|
||||
name=self.ngname)
|
||||
tmp, self.ngid = createngnode(type=self.ngtype, hookstr=self.nghooks, name=self.ngname)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -62,13 +61,12 @@ class NetgraphNet(PyCoreNet):
|
|||
destroyngnode(self.ngname)
|
||||
|
||||
def attach(self, netif):
|
||||
''' Attach an interface to this netgraph node. Create a pipe between
|
||||
""" Attach an interface to this netgraph node. Create a pipe between
|
||||
the interface and the hub/switch/wlan node.
|
||||
(Note that the PtpNet subclass overrides this method.)
|
||||
'''
|
||||
"""
|
||||
if self.up:
|
||||
pipe = self.session.addobj(cls = NetgraphPipeNet,
|
||||
verbose = self.verbose, start = True)
|
||||
pipe = self.session.addobj(cls=NetgraphPipeNet, verbose=self.verbose, start=True)
|
||||
pipe.attach(netif)
|
||||
hook = "link%d" % len(self._netif)
|
||||
pipe.attachnet(self, hook)
|
||||
|
@ -82,14 +80,16 @@ class NetgraphNet(PyCoreNet):
|
|||
def linked(self, netif1, netif2):
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[netif1] != netif1:
|
||||
raise ValueError, "inconsistency for netif %s" % netif1.name
|
||||
raise ValueError("inconsistency for netif %s" % netif1.name)
|
||||
if self._netif[netif2] != netif2:
|
||||
raise ValueError, "inconsistency for netif %s" % netif2.name
|
||||
raise ValueError("inconsistency for netif %s" % netif2.name)
|
||||
|
||||
try:
|
||||
linked = self._linked[netif1][netif2]
|
||||
except KeyError:
|
||||
linked = False
|
||||
self._linked[netif1][netif2] = linked
|
||||
|
||||
return linked
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
|
@ -109,108 +109,106 @@ class NetgraphNet(PyCoreNet):
|
|||
self._linked[netif1][netif2] = True
|
||||
|
||||
def linknet(self, net):
|
||||
''' Link this bridge with another by creating a veth pair and installing
|
||||
""" Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
'''
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2=None):
|
||||
''' Set link effects by modifying the pipe connected to an interface.
|
||||
'''
|
||||
def linkconfig(self, netif, bw=None, delay=None,
|
||||
loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
""" Set link effects by modifying the pipe connected to an interface.
|
||||
"""
|
||||
if not netif.pipe:
|
||||
self.warn("linkconfig for %s but interface %s has no pipe" % \
|
||||
(self.name, netif.name))
|
||||
self.warn("linkconfig for %s but interface %s has no pipe" % (self.name, netif.name))
|
||||
return
|
||||
return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter,
|
||||
netif2)
|
||||
return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter, netif2)
|
||||
|
||||
|
||||
class NetgraphPipeNet(NetgraphNet):
|
||||
ngtype = "pipe"
|
||||
nghooks = "upper lower"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
def __init__(self, session, objid=None, name=None, verbose=False,
|
||||
start=True, policy=None):
|
||||
NetgraphNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
if start:
|
||||
# account for Ethernet header
|
||||
ngmessage(self.ngname, ["setcfg", "{", "header_offset=14", "}"])
|
||||
|
||||
def attach(self, netif):
|
||||
''' Attach an interface to this pipe node.
|
||||
""" Attach an interface to this pipe node.
|
||||
The first interface is connected to the "upper" hook, the second
|
||||
connected to the "lower" hook.
|
||||
'''
|
||||
"""
|
||||
if len(self._netif) > 1:
|
||||
raise ValueError, \
|
||||
"Netgraph pipes support at most 2 network interfaces"
|
||||
"Netgraph pipes support at most 2 network interfaces"
|
||||
if self.up:
|
||||
hook = self.gethook()
|
||||
connectngnodes(self.ngname, netif.localname, hook, netif.hook)
|
||||
if netif.pipe:
|
||||
raise ValueError, \
|
||||
"Interface %s already attached to pipe %s" % \
|
||||
(netif.name, netif.pipe.name)
|
||||
"Interface %s already attached to pipe %s" % \
|
||||
(netif.name, netif.pipe.name)
|
||||
netif.pipe = self
|
||||
self._netif[netif] = netif
|
||||
self._linked[netif] = {}
|
||||
|
||||
def attachnet(self, net, hook):
|
||||
''' Attach another NetgraphNet to this pipe node.
|
||||
'''
|
||||
""" Attach another NetgraphNet to this pipe node.
|
||||
"""
|
||||
localhook = self.gethook()
|
||||
connectngnodes(self.ngname, net.ngname, localhook, hook)
|
||||
|
||||
def gethook(self):
|
||||
''' Returns the first hook (e.g. "upper") then the second hook
|
||||
""" Returns the first hook (e.g. "upper") then the second hook
|
||||
(e.g. "lower") based on the number of connections.
|
||||
'''
|
||||
"""
|
||||
hooks = self.nghooks.split()
|
||||
if len(self._netif) == 0:
|
||||
return hooks[0]
|
||||
else:
|
||||
return hooks[1]
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Set link effects by sending a Netgraph setcfg message to the pipe.
|
||||
'''
|
||||
netif.setparam('bw', bw)
|
||||
netif.setparam('delay', delay)
|
||||
netif.setparam('loss', loss)
|
||||
netif.setparam('duplicate', duplicate)
|
||||
netif.setparam('jitter', jitter)
|
||||
def linkconfig(self, netif, bw=None, delay=None,
|
||||
loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
""" Set link effects by sending a Netgraph setcfg message to the pipe.
|
||||
"""
|
||||
netif.setparam("bw", bw)
|
||||
netif.setparam("delay", delay)
|
||||
netif.setparam("loss", loss)
|
||||
netif.setparam("duplicate", duplicate)
|
||||
netif.setparam("jitter", jitter)
|
||||
if not self.up:
|
||||
return
|
||||
params = []
|
||||
upstream = []
|
||||
downstream = []
|
||||
if bw is not None:
|
||||
if str(bw)=="0":
|
||||
bw="-1"
|
||||
params += ["bandwidth=%s" % bw,]
|
||||
if str(bw) == "0":
|
||||
bw = "-1"
|
||||
params += ["bandwidth=%s" % bw, ]
|
||||
if delay is not None:
|
||||
if str(delay)=="0":
|
||||
delay="-1"
|
||||
params += ["delay=%s" % delay,]
|
||||
if str(delay) == "0":
|
||||
delay = "-1"
|
||||
params += ["delay=%s" % delay, ]
|
||||
if loss is not None:
|
||||
if str(loss)=="0":
|
||||
loss="-1"
|
||||
upstream += ["BER=%s" % loss,]
|
||||
downstream += ["BER=%s" % loss,]
|
||||
if str(loss) == "0":
|
||||
loss = "-1"
|
||||
upstream += ["BER=%s" % loss, ]
|
||||
downstream += ["BER=%s" % loss, ]
|
||||
if duplicate is not None:
|
||||
if str(duplicate)=="0":
|
||||
duplicate="-1"
|
||||
upstream += ["duplicate=%s" % duplicate,]
|
||||
downstream += ["duplicate=%s" % duplicate,]
|
||||
if str(duplicate) == "0":
|
||||
duplicate = "-1"
|
||||
upstream += ["duplicate=%s" % duplicate, ]
|
||||
downstream += ["duplicate=%s" % duplicate, ]
|
||||
if jitter:
|
||||
self.warn("jitter parameter ignored for link %s" % self.name)
|
||||
if len(params) > 0 or len(upstream) > 0 or len(downstream) > 0:
|
||||
setcfg = ["setcfg", "{",] + params
|
||||
setcfg = ["setcfg", "{", ] + params
|
||||
if len(upstream) > 0:
|
||||
setcfg += ["upstream={",] + upstream + ["}",]
|
||||
setcfg += ["upstream={", ] + upstream + ["}", ]
|
||||
if len(downstream) > 0:
|
||||
setcfg += ["downstream={",] + downstream + ["}",]
|
||||
setcfg += ["}",]
|
||||
setcfg += ["downstream={", ] + downstream + ["}", ]
|
||||
setcfg += ["}", ]
|
||||
ngmessage(self.ngname, setcfg)
|
||||
|
||||
|
|
|
@ -3,27 +3,32 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
|
||||
"""
|
||||
vnode.py: SimpleJailNode and JailNode classes that implement the FreeBSD
|
||||
jail-based virtual node.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os, signal, sys, subprocess, threading, string
|
||||
import random, time
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.bsd.netgraph import *
|
||||
import os
|
||||
import subprocess
|
||||
import threading
|
||||
|
||||
from core import constants
|
||||
from core.bsd.netgraph import createngnode
|
||||
from core.bsd.netgraph import destroyngnode
|
||||
from core.coreobj import PyCoreNetIf
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.misc import utils
|
||||
|
||||
utils.checkexec([constants.IFCONFIG_BIN, constants.VIMAGE_BIN])
|
||||
|
||||
checkexec([IFCONFIG_BIN, VIMAGE_BIN])
|
||||
|
||||
class VEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
def __init__(self, node, name, localname, mtu=1500, net=None,
|
||||
start=True):
|
||||
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
|
||||
# name is the device name (e.g. ngeth0, ngeth1, etc.) before it is
|
||||
# installed in a node; the Netgraph name is renamed to localname
|
||||
# e.g. before install: name = ngeth0 localname = n0_0_123
|
||||
|
@ -45,7 +50,7 @@ class VEth(PyCoreNetIf):
|
|||
name=self.localname)
|
||||
self.name = ngname
|
||||
self.ngid = ngid
|
||||
check_call([IFCONFIG_BIN, ngname, "up"])
|
||||
utils.check_call([constants.IFCONFIG_BIN, ngname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -74,15 +79,18 @@ class VEth(PyCoreNetIf):
|
|||
def sethwaddr(self, addr):
|
||||
self.hwaddr = addr
|
||||
|
||||
|
||||
class TunTap(PyCoreNetIf):
|
||||
'''TUN/TAP virtual device in TAP mode'''
|
||||
def __init__(self, node, name, localname, mtu = None, net = None,
|
||||
start = True):
|
||||
"""TUN/TAP virtual device in TAP mode"""
|
||||
|
||||
def __init__(self, node, name, localname, mtu=None, net=None,
|
||||
start=True):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SimpleJailNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None, nodedir = None,
|
||||
verbose = False):
|
||||
def __init__(self, session, objid=None, name=None, nodedir=None,
|
||||
verbose=False):
|
||||
PyCoreNode.__init__(self, session, objid, name)
|
||||
self.nodedir = nodedir
|
||||
self.verbose = verbose
|
||||
|
@ -94,17 +102,17 @@ class SimpleJailNode(PyCoreNode):
|
|||
def startup(self):
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
vimg = [VIMAGE_BIN, "-c", self.name]
|
||||
vimg = [constants.VIMAGE_BIN, "-c", self.name]
|
||||
try:
|
||||
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
|
||||
os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, *vimg)
|
||||
except OSError:
|
||||
raise Exception, ("vimage command not found while running: %s" % \
|
||||
vimg)
|
||||
vimg)
|
||||
self.info("bringing up loopback interface")
|
||||
self.cmd([IFCONFIG_BIN, "lo0", "127.0.0.1"])
|
||||
self.cmd([constants.IFCONFIG_BIN, "lo0", "127.0.0.1"])
|
||||
self.info("setting hostname: %s" % self.name)
|
||||
self.cmd(["hostname", self.name])
|
||||
self.cmd([SYSCTL_BIN, "vfs.morphing_symlinks=1"])
|
||||
self.cmd([constants.SYSCTL_BIN, "vfs.morphing_symlinks=1"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -114,27 +122,26 @@ class SimpleJailNode(PyCoreNode):
|
|||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
del self.session
|
||||
vimg = [VIMAGE_BIN, "-d", self.name]
|
||||
vimg = [constants.VIMAGE_BIN, "-d", self.name]
|
||||
try:
|
||||
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
|
||||
os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, *vimg)
|
||||
except OSError:
|
||||
raise Exception, ("vimage command not found while running: %s" % \
|
||||
vimg)
|
||||
raise Exception("vimage command not found while running: %s" % vimg)
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
def cmd(self, args, wait=True):
|
||||
if wait:
|
||||
mode = os.P_WAIT
|
||||
else:
|
||||
mode = os.P_NOWAIT
|
||||
tmp = call([VIMAGE_BIN, self.name] + args, cwd=self.nodedir)
|
||||
tmp = utils.call([constants.VIMAGE_BIN, self.name] + args, cwd=self.nodedir)
|
||||
if not wait:
|
||||
tmp = None
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def cmdresult(self, args, wait = True):
|
||||
def cmdresult(self, args, wait=True):
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(args)
|
||||
result = cmdout.read()
|
||||
result += cmderr.read()
|
||||
|
@ -145,30 +152,30 @@ class SimpleJailNode(PyCoreNode):
|
|||
status = cmdid.wait()
|
||||
else:
|
||||
status = 0
|
||||
return (status, result)
|
||||
return status, result
|
||||
|
||||
def popen(self, args):
|
||||
cmd = [VIMAGE_BIN, self.name]
|
||||
cmd = [constants.VIMAGE_BIN, self.name]
|
||||
cmd.extend(args)
|
||||
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE, cwd=self.nodedir)
|
||||
tmp = subprocess.Popen(cmd, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, cwd=self.nodedir)
|
||||
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
return os.spawnlp(os.P_WAIT, VIMAGE_BIN, VIMAGE_BIN, self.name, *args)
|
||||
return os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, constants.VIMAGE_BIN, self.name, *args)
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut",
|
||||
"-title", self.name, "-e", VIMAGE_BIN, self.name, sh)
|
||||
def term(self, sh="/bin/sh"):
|
||||
return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut",
|
||||
"-title", self.name, "-e", constants.VIMAGE_BIN, self.name, sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' We add 'sudo' to the command string because the GUI runs as a
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
""" We add "sudo" to the command string because the GUI runs as a
|
||||
normal user.
|
||||
'''
|
||||
return "cd %s && sudo %s %s %s" % (self.nodedir, VIMAGE_BIN, self.name, sh)
|
||||
"""
|
||||
return "cd %s && sudo %s %s %s" % (self.nodedir, constants.VIMAGE_BIN, self.name, sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
def shcmd(self, cmdstr, sh="/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def boot(self):
|
||||
|
@ -180,9 +187,9 @@ class SimpleJailNode(PyCoreNode):
|
|||
self.addsymlink(path=target, file=None)
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
self.info("unmounting %s" % target)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None):
|
||||
def newveth(self, ifindex=None, ifname=None, net=None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
|
@ -191,15 +198,17 @@ class SimpleJailNode(PyCoreNode):
|
|||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s_%s_%s" % (self.objid, ifindex, sessionid)
|
||||
localname = name
|
||||
localname = name
|
||||
ifclass = VEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
veth = ifclass(node=self, name=name, localname=localname,
|
||||
mtu=1500, net=net, start=self.up)
|
||||
if self.up:
|
||||
# install into jail
|
||||
check_call([IFCONFIG_BIN, veth.name, "vnet", self.name])
|
||||
utils.check_call([constants.IFCONFIG_BIN, veth.name, "vnet", self.name])
|
||||
|
||||
# rename from "ngeth0" to "eth0"
|
||||
self.cmd([IFCONFIG_BIN, veth.name, "name", ifname])
|
||||
self.cmd([constants.IFCONFIG_BIN, veth.name, "name", ifname])
|
||||
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
|
@ -214,17 +223,17 @@ class SimpleJailNode(PyCoreNode):
|
|||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "link",
|
||||
str(addr)])
|
||||
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), "link",
|
||||
str(addr)])
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
if ':' in addr:
|
||||
if ":" in addr:
|
||||
family = "inet6"
|
||||
else:
|
||||
family = "inet"
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "alias",
|
||||
str(addr)])
|
||||
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), family, "alias",
|
||||
str(addr)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
|
@ -233,40 +242,40 @@ class SimpleJailNode(PyCoreNode):
|
|||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
if ':' in addr:
|
||||
if ":" in addr:
|
||||
family = "inet6"
|
||||
else:
|
||||
family = "inet"
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "-alias",
|
||||
str(addr)])
|
||||
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), family, "-alias",
|
||||
str(addr)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def delalladdr(self, ifindex, addrtypes=valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan=True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
self.getaddr(self.ifname(ifindex), rescan=True)
|
||||
|
||||
def ifup(self, ifindex):
|
||||
if self.up:
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "up"])
|
||||
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
def newnetif(self, net=None, addrlist=[], hwaddr=None,
|
||||
ifindex=None, ifname=None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
for addr in utils.maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
|
@ -280,18 +289,17 @@ class SimpleJailNode(PyCoreNode):
|
|||
self._netif[ifindex].detachnet()
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
(filename, srcname, filename)
|
||||
shcmd = 'mkdir -p $(dirname "%s") && mv "%s" "%s" && sync' % (filename, srcname, filename)
|
||||
self.shcmd(shcmd)
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
return None
|
||||
#return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
# return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
|
||||
def addsymlink(self, path, file):
|
||||
''' Create a symbolic link from /path/name/file ->
|
||||
""" Create a symbolic link from /path/name/file ->
|
||||
/tmp/pycore.nnnnn/@.conf/path.name/file
|
||||
'''
|
||||
"""
|
||||
dirname = path
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
|
@ -316,14 +324,14 @@ class SimpleJailNode(PyCoreNode):
|
|||
self.info("creating symlink %s -> %s" % (pathname, sym))
|
||||
os.symlink(sym, pathname)
|
||||
|
||||
class JailNode(SimpleJailNode):
|
||||
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True):
|
||||
super(JailNode, self).__init__(session = session, objid = objid,
|
||||
name = name, nodedir = nodedir,
|
||||
verbose = verbose)
|
||||
class JailNode(SimpleJailNode):
|
||||
def __init__(self, session, objid=None, name=None,
|
||||
nodedir=None, bootsh="boot.sh", verbose=False,
|
||||
start=True):
|
||||
super(JailNode, self).__init__(session=session, objid=objid,
|
||||
name=name, nodedir=nodedir,
|
||||
verbose=verbose)
|
||||
self.bootsh = bootsh
|
||||
if not start:
|
||||
return
|
||||
|
@ -341,8 +349,8 @@ class JailNode(SimpleJailNode):
|
|||
self.lock.acquire()
|
||||
try:
|
||||
super(JailNode, self).startup()
|
||||
#self.privatedir("/var/run")
|
||||
#self.privatedir("/var/log")
|
||||
# self.privatedir("/var/run")
|
||||
# self.privatedir("/var/log")
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
|
@ -351,7 +359,7 @@ class JailNode(SimpleJailNode):
|
|||
return
|
||||
self.lock.acquire()
|
||||
# services are instead stopped when session enters datacollect state
|
||||
#self.session.services.stopnodeservices(self)
|
||||
# self.session.services.stopnodeservices(self)
|
||||
try:
|
||||
super(JailNode, self).shutdown()
|
||||
finally:
|
||||
|
@ -362,7 +370,7 @@ class JailNode(SimpleJailNode):
|
|||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir,
|
||||
os.path.normpath(path).strip('/').replace('/', '.'))
|
||||
os.path.normpath(path).strip("/").replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
|
@ -371,9 +379,9 @@ class JailNode(SimpleJailNode):
|
|||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
def opennodefile(self, filename, mode="w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
#self.addsymlink(path=dirname, file=basename)
|
||||
# self.addsymlink(path=dirname, file=basename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
|
@ -381,14 +389,13 @@ class JailNode(SimpleJailNode):
|
|||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
os.makedirs(dirname, mode=0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
def nodefile(self, filename, contents, mode=0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
self.info("created nodefile: %s; mode: 0%o" % (f.name, mode))
|
||||
|
|
|
@ -1,81 +1,108 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
conf.py: common support for configurable objects
|
||||
'''
|
||||
"""
|
||||
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.data import ConfigData
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class ConfigurableManager(object):
|
||||
''' A generic class for managing Configurables. This class can register
|
||||
with a session to receive Config Messages for setting some parameters
|
||||
for itself or for the Configurables that it manages.
|
||||
'''
|
||||
"""
|
||||
A generic class for managing Configurables. This class can register
|
||||
with a session to receive Config Messages for setting some parameters
|
||||
for itself or for the Configurables that it manages.
|
||||
"""
|
||||
# name corresponds to configuration object field
|
||||
_name = ""
|
||||
# type corresponds with register message types
|
||||
_type = None
|
||||
|
||||
def __init__(self, session=None):
|
||||
self.session = session
|
||||
self.session.addconfobj(self._name, self._type, self.configure)
|
||||
# Configurable key=values, indexed by node number
|
||||
name = ""
|
||||
|
||||
# type corresponds with register message types
|
||||
config_type = None
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a ConfigurableManager instance.
|
||||
|
||||
:param core.session.Session session: session this manager is tied to
|
||||
:return: nothing
|
||||
"""
|
||||
# configurable key=values, indexed by node number
|
||||
self.configs = {}
|
||||
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configure messages. The configuration message sent to a
|
||||
ConfigurableManager usually is used to:
|
||||
1. Request a list of Configurables (request flag)
|
||||
2. Reset manager and clear configs (reset flag)
|
||||
3. Send values that configure the manager or one of its
|
||||
Configurables
|
||||
|
||||
Returns any reply messages.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
return self.configure_request(msg)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all" or objname == self._name:
|
||||
return self.configure_reset(msg)
|
||||
def configure(self, session, config_data):
|
||||
"""
|
||||
Handle configure messages. The configuration message sent to a
|
||||
ConfigurableManager usually is used to:
|
||||
1. Request a list of Configurables (request flag)
|
||||
2. Reset manager and clear configs (reset flag)
|
||||
3. Send values that configure the manager or one of its
|
||||
Configurables
|
||||
|
||||
Returns any reply messages.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: response messages
|
||||
"""
|
||||
|
||||
if config_data.type == ConfigFlags.REQUEST.value:
|
||||
return self.configure_request(config_data)
|
||||
elif config_data.type == ConfigFlags.RESET.value:
|
||||
return self.configure_reset(config_data)
|
||||
else:
|
||||
return self.configure_values(msg,
|
||||
msg.gettlv(coreapi.CORE_TLV_CONF_VALUES))
|
||||
return self.configure_values(config_data)
|
||||
|
||||
def configure_request(self, msg):
|
||||
''' Request configuration data.
|
||||
'''
|
||||
def configure_request(self, config_data):
|
||||
"""
|
||||
Request configuration data.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_reset(self, msg):
|
||||
''' By default, resets this manager to clear configs.
|
||||
'''
|
||||
def configure_reset(self, config_data):
|
||||
"""
|
||||
By default, resets this manager to clear configs.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: reset response messages, or None
|
||||
"""
|
||||
return self.reset()
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Values have been sent to this manager.
|
||||
'''
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Values have been sent to this manager.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_values_keyvalues(self, msg, values, target, keys):
|
||||
''' Helper that can be used for configure_values for parsing in
|
||||
'key=value' strings from a values field. The key name must be
|
||||
in the keys list, and target.key=value is set.
|
||||
'''
|
||||
|
||||
def configure_values_keyvalues(self, config_data, target, keys):
|
||||
"""
|
||||
Helper that can be used for configure_values for parsing in
|
||||
'key=value' strings from a values field. The key name must be
|
||||
in the keys list, and target.key=value is set.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:param target: target to set attribute values on
|
||||
:param keys: list of keys to verify validity
|
||||
:return: None
|
||||
"""
|
||||
values = config_data.data_values
|
||||
|
||||
if values is None:
|
||||
return None
|
||||
|
||||
kvs = values.split('|')
|
||||
for kv in kvs:
|
||||
try:
|
||||
# key=value
|
||||
(key, value) = kv.split('=', 1)
|
||||
key, value = kv.split('=', 1)
|
||||
if value is not None and not value.strip():
|
||||
value = None
|
||||
except ValueError:
|
||||
|
@ -83,25 +110,28 @@ class ConfigurableManager(object):
|
|||
key = keys[kvs.index(kv)]
|
||||
value = kv
|
||||
if key not in keys:
|
||||
raise ValueError, "invalid key: %s" % key
|
||||
raise ValueError("invalid key: %s" % key)
|
||||
if value is not None:
|
||||
setattr(target, key, value)
|
||||
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
return None
|
||||
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
''' add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
'''
|
||||
"""
|
||||
add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
"""
|
||||
logger.info("setting config for node(%s): %s - %s", nodenum, conftype, values)
|
||||
conflist = []
|
||||
if nodenum in self.configs:
|
||||
oldlist = self.configs[nodenum]
|
||||
found = False
|
||||
for (t, v) in oldlist:
|
||||
if (t == conftype):
|
||||
for t, v in oldlist:
|
||||
if t == conftype:
|
||||
# replace existing config
|
||||
found = True
|
||||
conflist.append((conftype, values))
|
||||
|
@ -114,34 +144,39 @@ class ConfigurableManager(object):
|
|||
self.configs[nodenum] = conflist
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
''' get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied
|
||||
'''
|
||||
"""
|
||||
get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied
|
||||
"""
|
||||
logger.info("getting config for node(%s): %s - default(%s)",
|
||||
nodenum, conftype, defaultvalues)
|
||||
if nodenum in self.configs:
|
||||
# return configured values
|
||||
conflist = self.configs[nodenum]
|
||||
for (t, v) in conflist:
|
||||
if (conftype is None) or (t == conftype):
|
||||
return (t, v)
|
||||
for t, v in conflist:
|
||||
if conftype is None or t == conftype:
|
||||
return t, v
|
||||
# return default values provided (may be None)
|
||||
return (conftype, defaultvalues)
|
||||
|
||||
return conftype, defaultvalues
|
||||
|
||||
def getallconfigs(self, use_clsmap=True):
|
||||
''' Return (nodenum, conftype, values) tuples for all stored configs.
|
||||
"""
|
||||
Return (nodenum, conftype, values) tuples for all stored configs.
|
||||
Used when reconnecting to a session.
|
||||
'''
|
||||
"""
|
||||
r = []
|
||||
for nodenum in self.configs:
|
||||
for (t, v) in self.configs[nodenum]:
|
||||
for t, v in self.configs[nodenum]:
|
||||
if use_clsmap:
|
||||
t = self._modelclsmap[t]
|
||||
r.append( (nodenum, t, v) )
|
||||
r.append((nodenum, t, v))
|
||||
return r
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
''' remove configuration values for the specified node;
|
||||
when nodenum is None, remove all configuration values
|
||||
'''
|
||||
"""
|
||||
remove configuration values for the specified node;
|
||||
when nodenum is None, remove all configuration values
|
||||
"""
|
||||
if nodenum is None:
|
||||
self.configs = {}
|
||||
return
|
||||
|
@ -149,10 +184,11 @@ class ConfigurableManager(object):
|
|||
self.configs.pop(nodenum)
|
||||
|
||||
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
|
||||
''' keyvalues list of tuples
|
||||
'''
|
||||
"""
|
||||
keyvalues list of tuples
|
||||
"""
|
||||
if conftype not in self._modelclsmap:
|
||||
self.warn("Unknown model type '%s'" % (conftype))
|
||||
logger.warn("Unknown model type '%s'" % conftype)
|
||||
return
|
||||
model = self._modelclsmap[conftype]
|
||||
keys = model.getnames()
|
||||
|
@ -160,19 +196,20 @@ class ConfigurableManager(object):
|
|||
values = list(model.getdefaultvalues())
|
||||
for key, value in keyvalues:
|
||||
if key not in keys:
|
||||
self.warn("Skipping unknown configuration key for %s: '%s'" % \
|
||||
(conftype, key))
|
||||
logger.warn("Skipping unknown configuration key for %s: '%s'" % \
|
||||
(conftype, key))
|
||||
continue
|
||||
i = keys.index(key)
|
||||
values[i] = value
|
||||
self.setconfig(nodenum, conftype, values)
|
||||
|
||||
def getmodels(self, n):
|
||||
''' Return a list of model classes and values for a net if one has been
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
This assumes self.configs contains an iterable of (model-names, values)
|
||||
and a self._modelclsmapdict exists.
|
||||
'''
|
||||
"""
|
||||
r = []
|
||||
if n.objid in self.configs:
|
||||
v = self.configs[n.objid]
|
||||
|
@ -183,92 +220,91 @@ class ConfigurableManager(object):
|
|||
return r
|
||||
|
||||
|
||||
def info(self, msg):
|
||||
self.session.info(msg)
|
||||
|
||||
def warn(self, msg):
|
||||
self.session.warn(msg)
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
''' A generic class for managing configuration parameters.
|
||||
Parameters are sent via Configuration Messages, which allow the GUI
|
||||
to build dynamic dialogs depending on what is being configured.
|
||||
'''
|
||||
_name = ""
|
||||
"""
|
||||
A generic class for managing configuration parameters.
|
||||
Parameters are sent via Configuration Messages, which allow the GUI
|
||||
to build dynamic dialogs depending on what is being configured.
|
||||
"""
|
||||
name = ""
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = []
|
||||
_confgroups = None
|
||||
_bitmap = None
|
||||
|
||||
def __init__(self, session=None, objid=None):
|
||||
config_matrix = []
|
||||
config_groups = None
|
||||
bitmap = None
|
||||
|
||||
def __init__(self, session=None, object_id=None):
|
||||
"""
|
||||
Creates a Configurable instance.
|
||||
|
||||
:param core.session.Session session: session for this configurable
|
||||
:param object_id:
|
||||
:return:
|
||||
"""
|
||||
self.session = session
|
||||
self.objid = objid
|
||||
|
||||
self.object_id = object_id
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
|
||||
def register(self):
|
||||
pass
|
||||
|
||||
|
||||
@classmethod
|
||||
def getdefaultvalues(cls):
|
||||
return tuple( map(lambda x: x[2], cls._confmatrix) )
|
||||
|
||||
return tuple(map(lambda x: x[2], cls.config_matrix))
|
||||
|
||||
@classmethod
|
||||
def getnames(cls):
|
||||
return tuple( map( lambda x: x[0], cls._confmatrix) )
|
||||
return tuple(map(lambda x: x[0], cls.config_matrix))
|
||||
|
||||
@classmethod
|
||||
def configure(cls, mgr, msg):
|
||||
''' Handle configuration messages for this object.
|
||||
'''
|
||||
def configure(cls, manager, config_data):
|
||||
"""
|
||||
Handle configuration messages for this object.
|
||||
"""
|
||||
reply = None
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
|
||||
ifacenum = msg.gettlv(coreapi.CORE_TLV_CONF_IFNUM)
|
||||
if ifacenum is not None:
|
||||
nodenum = nodenum*1000 + ifacenum
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
config_type = config_data.type
|
||||
interface_id = config_data.interface_number
|
||||
values_str = config_data.data_values
|
||||
|
||||
if mgr.verbose:
|
||||
mgr.info("received configure message for %s nodenum:%s" % (cls._name, str(nodenum)))
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
if mgr.verbose:
|
||||
mgr.info("replying to configure request for %s model" %
|
||||
cls._name)
|
||||
if interface_id is not None:
|
||||
node_id = node_id * 1000 + interface_id
|
||||
|
||||
logger.info("received configure message for %s nodenum:%s", cls.name, str(node_id))
|
||||
if config_type == ConfigFlags.REQUEST.value:
|
||||
logger.info("replying to configure request for %s model", cls.name)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if objname == "all":
|
||||
if object_name == "all":
|
||||
defaults = None
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
else:
|
||||
defaults = cls.getdefaultvalues()
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
|
||||
typeflags = ConfigFlags.coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = manager.getconfig(node_id, cls.name, defaults)[1]
|
||||
if values is None:
|
||||
# node has no active config for this model (don't send defaults)
|
||||
return None
|
||||
# reply with config options
|
||||
reply = cls.toconfmsg(0, nodenum, typeflags, values)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all":
|
||||
mgr.clearconfig(nodenum)
|
||||
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
reply = cls.config_data(0, node_id, typeflags, values)
|
||||
elif config_type == ConfigFlags.RESET.value:
|
||||
if object_name == "all":
|
||||
manager.clearconfig(node_id)
|
||||
# elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the node
|
||||
# object has been created
|
||||
if objname is None:
|
||||
mgr.info("no configuration object for node %s" % nodenum)
|
||||
if object_name is None:
|
||||
logger.info("no configuration object for node %s", node_id)
|
||||
return None
|
||||
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
|
||||
defaults = cls.getdefaultvalues()
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
|
||||
values = manager.getconfig(node_id, cls.name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
|
@ -282,59 +318,49 @@ class Configurable(object):
|
|||
try:
|
||||
new_values[keys.index(key)] = value
|
||||
except ValueError:
|
||||
mgr.info("warning: ignoring invalid key '%s'" % key)
|
||||
logger.info("warning: ignoring invalid key '%s'" % key)
|
||||
values = new_values
|
||||
mgr.setconfig(nodenum, objname, values)
|
||||
manager.setconfig(node_id, object_name, values)
|
||||
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def toconfmsg(cls, flags, nodenum, typeflags, values):
|
||||
''' Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
'''
|
||||
def config_data(cls, flags, node_id, type_flags, values):
|
||||
"""
|
||||
Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
"""
|
||||
keys = cls.getnames()
|
||||
keyvalues = map(lambda a,b: "%s=%s" % (a,b), keys, values)
|
||||
keyvalues = map(lambda a, b: "%s=%s" % (a, b), keys, values)
|
||||
values_str = string.join(keyvalues, '|')
|
||||
tlvdata = ""
|
||||
if nodenum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
|
||||
nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
cls._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
typeflags)
|
||||
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
values_str)
|
||||
captions = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[4], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
possiblevals = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[3], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if cls._bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
|
||||
cls._bitmap)
|
||||
if cls._confgroups is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
cls._confgroups)
|
||||
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
datatypes = tuple(map(lambda x: x[1], cls.config_matrix))
|
||||
captions = reduce(lambda a, b: a + '|' + b, map(lambda x: x[4], cls.config_matrix))
|
||||
possible_valuess = reduce(lambda a, b: a + '|' + b, map(lambda x: x[3], cls.config_matrix))
|
||||
|
||||
return ConfigData(
|
||||
message_type=flags,
|
||||
node=node_id,
|
||||
object=cls.name,
|
||||
type=type_flags,
|
||||
data_types=datatypes,
|
||||
data_values=values_str,
|
||||
captions=captions,
|
||||
possible_values=possible_valuess,
|
||||
bitmap=cls.bitmap,
|
||||
groups=cls.config_groups
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def booltooffon(value):
|
||||
''' Convenience helper turns bool into on (True) or off (False) string.
|
||||
'''
|
||||
"""
|
||||
Convenience helper turns bool into on (True) or off (False) string.
|
||||
"""
|
||||
if value == "1" or value == "true" or value == "on":
|
||||
return "on"
|
||||
else:
|
||||
return "off"
|
||||
|
||||
|
||||
@staticmethod
|
||||
def offontobool(value):
|
||||
if type(value) == str:
|
||||
|
@ -345,36 +371,37 @@ class Configurable(object):
|
|||
return value
|
||||
|
||||
@classmethod
|
||||
def valueof(cls, name, values):
|
||||
''' Helper to return a value by the name defined in confmatrix.
|
||||
Checks if it is boolean'''
|
||||
def valueof(cls, name, values):
|
||||
"""
|
||||
Helper to return a value by the name defined in confmatrix.
|
||||
Checks if it is boolean
|
||||
"""
|
||||
i = cls.getnames().index(name)
|
||||
if cls._confmatrix[i][1] == coreapi.CONF_DATA_TYPE_BOOL and \
|
||||
values[i] != "":
|
||||
if cls.config_matrix[i][1] == ConfigDataTypes.BOOL.value and values[i] != "":
|
||||
return cls.booltooffon(values[i])
|
||||
else:
|
||||
return values[i]
|
||||
|
||||
@staticmethod
|
||||
def haskeyvalues(values):
|
||||
''' Helper to check for list of key=value pairs versus a plain old
|
||||
list of values. Returns True if all elements are "key=value".
|
||||
'''
|
||||
"""
|
||||
Helper to check for list of key=value pairs versus a plain old
|
||||
list of values. Returns True if all elements are "key=value".
|
||||
"""
|
||||
if len(values) == 0:
|
||||
return False
|
||||
for v in values:
|
||||
if "=" not in v:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def getkeyvaluelist(self):
|
||||
''' Helper to return a list of (key, value) tuples. Keys come from
|
||||
"""
|
||||
Helper to return a list of (key, value) tuples. Keys come from
|
||||
self._confmatrix and values are instance attributes.
|
||||
'''
|
||||
"""
|
||||
r = []
|
||||
for k in self.getnames():
|
||||
if hasattr(self, k):
|
||||
r.append((k, getattr(self, k)))
|
||||
return r
|
||||
|
||||
|
||||
|
|
1692
daemon/core/corehandlers.py
Normal file
1692
daemon/core/corehandlers.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,32 +1,37 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
coreobj.py: defines the basic objects for emulation: the PyCoreObj base class,
|
||||
"""
|
||||
coreobj.py: defines the basic objects for emulation: the PyCoreObj base class,
|
||||
along with PyCoreNode, PyCoreNet, and PyCoreNetIf
|
||||
'''
|
||||
import sys, threading, os, shutil
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import threading
|
||||
from socket import AF_INET
|
||||
from socket import AF_INET6
|
||||
|
||||
from core.api import coreapi
|
||||
from core.misc.ipaddr import *
|
||||
from core.data import NodeData, LinkData
|
||||
from core.enumerations import LinkTlvs
|
||||
from core.enumerations import LinkTypes
|
||||
from core.misc import ipaddress
|
||||
|
||||
|
||||
class Position(object):
|
||||
''' Helper class for Cartesian coordinate position
|
||||
'''
|
||||
def __init__(self, x = None, y = None, z = None):
|
||||
"""
|
||||
Helper class for Cartesian coordinate position
|
||||
"""
|
||||
|
||||
def __init__(self, x=None, y=None, z=None):
|
||||
self.x = None
|
||||
self.y = None
|
||||
self.z = None
|
||||
self.set(x, y, z)
|
||||
|
||||
def set(self, x = None, y = None, z = None):
|
||||
''' Returns True if the position has actually changed.
|
||||
'''
|
||||
def set(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Returns True if the position has actually changed.
|
||||
"""
|
||||
if self.x == x and self.y == y and self.z == z:
|
||||
return False
|
||||
self.x = x
|
||||
|
@ -35,20 +40,22 @@ class Position(object):
|
|||
return True
|
||||
|
||||
def get(self):
|
||||
''' Fetch the (x,y,z) position tuple.
|
||||
'''
|
||||
return (self.x, self.y, self.z)
|
||||
"""
|
||||
Fetch the (x,y,z) position tuple.
|
||||
"""
|
||||
return self.x, self.y, self.z
|
||||
|
||||
|
||||
class PyCoreObj(object):
|
||||
''' Base class for pycore objects (nodes and nets)
|
||||
'''
|
||||
"""
|
||||
Base class for pycore objects (nodes and nets)
|
||||
"""
|
||||
apitype = None
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
self.session = session
|
||||
if objid is None:
|
||||
objid = session.getobjid()
|
||||
objid = session.get_object_id()
|
||||
self.objid = objid
|
||||
if name is None:
|
||||
name = "o%s" % self.objid
|
||||
|
@ -59,45 +66,50 @@ class PyCoreObj(object):
|
|||
self.canvas = None
|
||||
self.icon = None
|
||||
self.opaque = None
|
||||
self.verbose = verbose
|
||||
self.position = Position()
|
||||
|
||||
def startup(self):
|
||||
''' Each object implements its own startup method.
|
||||
'''
|
||||
"""
|
||||
Each object implements its own startup method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
''' Each object implements its own shutdown method.
|
||||
'''
|
||||
"""
|
||||
Each object implements its own shutdown method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def setposition(self, x = None, y = None, z = None):
|
||||
''' Set the (x,y,z) position of the object.
|
||||
'''
|
||||
return self.position.set(x = x, y = y, z = z)
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Set the (x,y,z) position of the object.
|
||||
"""
|
||||
return self.position.set(x=x, y=y, z=z)
|
||||
|
||||
def getposition(self):
|
||||
''' Return an (x,y,z) tuple representing this object's position.
|
||||
'''
|
||||
"""
|
||||
Return an (x,y,z) tuple representing this object's position.
|
||||
"""
|
||||
return self.position.get()
|
||||
|
||||
def ifname(self, ifindex):
|
||||
return self.netif(ifindex).name
|
||||
|
||||
def netifs(self, sort=False):
|
||||
''' Iterate over attached network interfaces.
|
||||
'''
|
||||
"""
|
||||
Iterate over attached network interfaces.
|
||||
"""
|
||||
if sort:
|
||||
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
|
||||
else:
|
||||
return self._netif.itervalues()
|
||||
|
||||
def numnetif(self):
|
||||
''' Return the attached interface count.
|
||||
'''
|
||||
"""
|
||||
Return the attached interface count.
|
||||
"""
|
||||
return len(self._netif)
|
||||
|
||||
|
||||
def getifindex(self, netif):
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
|
@ -111,95 +123,71 @@ class PyCoreObj(object):
|
|||
self.ifindex += 1
|
||||
return ifindex
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
''' Build a CORE API Node Message for this object. Both nodes and
|
||||
networks can be represented by a Node Message.
|
||||
'''
|
||||
def data(self, message_type):
|
||||
"""
|
||||
Build a data object for this node.
|
||||
|
||||
:param message_type: purpose for the data object we are creating
|
||||
:return: node data object
|
||||
:rtype: core.data.NodeData
|
||||
"""
|
||||
if self.apitype is None:
|
||||
return None
|
||||
tlvdata = ""
|
||||
(x, y, z) = self.getposition()
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER,
|
||||
self.objid)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_TYPE,
|
||||
self.apitype)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NAME,
|
||||
self.name)
|
||||
if hasattr(self, "type") and self.type is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_MODEL,
|
||||
self.type)
|
||||
if hasattr(self, "server") and self.server is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUSRV,
|
||||
self.server)
|
||||
|
||||
x, y, z = self.getposition()
|
||||
|
||||
model = None
|
||||
if hasattr(self, "type"):
|
||||
model = self.type
|
||||
|
||||
emulation_server = None
|
||||
if hasattr(self, "server"):
|
||||
emulation_server = self.server
|
||||
|
||||
services = None
|
||||
if hasattr(self, "services") and len(self.services) != 0:
|
||||
nodeservices = []
|
||||
for s in self.services:
|
||||
nodeservices.append(s._name)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_SERVICES,
|
||||
"|".join(nodeservices))
|
||||
nodeservices.append(s._name)
|
||||
services = "|".join(nodeservices)
|
||||
|
||||
node_data = NodeData(
|
||||
message_type=message_type,
|
||||
id=self.objid,
|
||||
node_type=self.apitype,
|
||||
name=self.name,
|
||||
emulation_id=self.objid,
|
||||
canvas=self.canvas,
|
||||
icon=self.icon,
|
||||
opaque=self.opaque,
|
||||
x_position=x,
|
||||
y_position=y,
|
||||
model=model,
|
||||
emulation_server=emulation_server,
|
||||
services=services
|
||||
)
|
||||
|
||||
if x is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_XPOS, x)
|
||||
if y is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_YPOS, y)
|
||||
if self.canvas is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_CANVAS,
|
||||
self.canvas)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID,
|
||||
self.objid)
|
||||
if self.icon is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_ICON,
|
||||
self.icon)
|
||||
if self.opaque is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_OPAQUE,
|
||||
self.opaque)
|
||||
msg = coreapi.CoreNodeMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
return node_data
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API Link Messages for this object. There is no default
|
||||
method for PyCoreObjs as PyCoreNodes do not implement this but
|
||||
PyCoreNets do.
|
||||
'''
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build CORE API Link Messages for this object. There is no default
|
||||
method for PyCoreObjs as PyCoreNodes do not implement this but
|
||||
PyCoreNets do.
|
||||
"""
|
||||
return []
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for printing informational messages when verbose
|
||||
is turned on.
|
||||
'''
|
||||
if self.verbose:
|
||||
print "%s: %s" % (self.name, msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for printing warning/error messages
|
||||
'''
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def exception(self, level, source, text):
|
||||
''' Generate an Exception Message for this session, providing this
|
||||
object number.
|
||||
'''
|
||||
if self.session:
|
||||
id = None
|
||||
if isinstance(self.objid, int):
|
||||
id = self.objid
|
||||
elif isinstance(self.objid, str) and self.objid.isdigit():
|
||||
id = int(self.objid)
|
||||
self.session.exception(level, source, id, text)
|
||||
|
||||
|
||||
class PyCoreNode(PyCoreObj):
|
||||
''' Base class for nodes
|
||||
'''
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
''' Initialization for node objects.
|
||||
'''
|
||||
PyCoreObj.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
"""
|
||||
Base class for nodes
|
||||
"""
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
Initialization for node objects.
|
||||
"""
|
||||
PyCoreObj.__init__(self, session, objid, name, start=start)
|
||||
self.services = []
|
||||
if not hasattr(self, "type"):
|
||||
self.type = None
|
||||
|
@ -207,26 +195,26 @@ class PyCoreNode(PyCoreObj):
|
|||
|
||||
def nodeid(self):
|
||||
return self.objid
|
||||
|
||||
def addservice(self, service):
|
||||
|
||||
def addservice(self, service):
|
||||
if service is not None:
|
||||
self.services.append(service)
|
||||
|
||||
def makenodedir(self):
|
||||
if self.nodedir is None:
|
||||
self.nodedir = \
|
||||
os.path.join(self.session.sessiondir, self.name + ".conf")
|
||||
os.path.join(self.session.session_dir, self.name + ".conf")
|
||||
os.makedirs(self.nodedir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
self.tmpnodedir = False
|
||||
|
||||
|
||||
def rmnodedir(self):
|
||||
if hasattr(self.session.options, 'preservedir'):
|
||||
if self.session.options.preservedir == '1':
|
||||
return
|
||||
if self.tmpnodedir:
|
||||
shutil.rmtree(self.nodedir, ignore_errors = True)
|
||||
shutil.rmtree(self.nodedir, ignore_errors=True)
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
if ifindex in self._netif:
|
||||
|
@ -241,12 +229,12 @@ class PyCoreNode(PyCoreObj):
|
|||
netif.shutdown()
|
||||
del netif
|
||||
|
||||
def netif(self, ifindex, net = None):
|
||||
def netif(self, ifindex, net=None):
|
||||
if ifindex in self._netif:
|
||||
return self._netif[ifindex]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
|
@ -257,8 +245,8 @@ class PyCoreNode(PyCoreObj):
|
|||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def setposition(self, x = None, y = None, z = None):
|
||||
changed = PyCoreObj.setposition(self, x = x, y = y, z = z)
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
changed = PyCoreObj.setposition(self, x=x, y=y, z=z)
|
||||
if not changed:
|
||||
# save extra interface range calculations
|
||||
return
|
||||
|
@ -266,31 +254,32 @@ class PyCoreNode(PyCoreObj):
|
|||
netif.setposition(x, y, z)
|
||||
|
||||
def commonnets(self, obj, want_ctrl=False):
|
||||
''' Given another node or net object, return common networks between
|
||||
this node and that object. A list of tuples is returned, with each tuple
|
||||
consisting of (network, interface1, interface2).
|
||||
'''
|
||||
"""
|
||||
Given another node or net object, return common networks between
|
||||
this node and that object. A list of tuples is returned, with each tuple
|
||||
consisting of (network, interface1, interface2).
|
||||
"""
|
||||
r = []
|
||||
for netif1 in self.netifs():
|
||||
if not want_ctrl and hasattr(netif1, 'control'):
|
||||
continue
|
||||
for netif2 in obj.netifs():
|
||||
if netif1.net == netif2.net:
|
||||
r += (netif1.net, netif1, netif2),
|
||||
r += (netif1.net, netif1, netif2),
|
||||
return r
|
||||
|
||||
|
||||
|
||||
class PyCoreNet(PyCoreObj):
|
||||
''' Base class for networks
|
||||
'''
|
||||
linktype = coreapi.CORE_LINK_WIRED
|
||||
"""
|
||||
Base class for networks
|
||||
"""
|
||||
linktype = LinkTypes.WIRED.value
|
||||
|
||||
def __init__(self, session, objid, name, verbose = False, start = True):
|
||||
''' Initialization for network objects.
|
||||
'''
|
||||
PyCoreObj.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
def __init__(self, session, objid, name, start=True):
|
||||
"""
|
||||
Initialization for network objects.
|
||||
"""
|
||||
PyCoreObj.__init__(self, session, objid, name, start=start)
|
||||
self._linked = {}
|
||||
self._linked_lock = threading.Lock()
|
||||
|
||||
|
@ -300,7 +289,7 @@ class PyCoreNet(PyCoreObj):
|
|||
netif.netifi = i
|
||||
with self._linked_lock:
|
||||
self._linked[netif] = {}
|
||||
|
||||
|
||||
def detach(self, netif):
|
||||
del self._netif[netif.netifi]
|
||||
netif.netifi = None
|
||||
|
@ -308,37 +297,37 @@ class PyCoreNet(PyCoreObj):
|
|||
del self._linked[netif]
|
||||
|
||||
def netifparamstolink(self, netif):
|
||||
''' Helper for tolinkmsgs() to build TLVs having link parameters
|
||||
from interface parameters.
|
||||
'''
|
||||
tlvdata = ""
|
||||
"""
|
||||
Helper for tolinkmsgs() to build TLVs having link parameters
|
||||
from interface parameters.
|
||||
"""
|
||||
|
||||
delay = netif.getparam('delay')
|
||||
bw = netif.getparam('bw')
|
||||
loss = netif.getparam('loss')
|
||||
duplicate = netif.getparam('duplicate')
|
||||
jitter = netif.getparam('jitter')
|
||||
|
||||
tlvdata = ""
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DELAY.value, delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.BANDWIDTH.value, bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.PER.value, str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DUP.value, str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.JITTER.value, jitter)
|
||||
return tlvdata
|
||||
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API Link Messages for this network. Each link message
|
||||
describes a link between this network and a node.
|
||||
'''
|
||||
msgs = []
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build CORE API Link Messages for this network. Each link message
|
||||
describes a link between this network and a node.
|
||||
"""
|
||||
all_links = []
|
||||
|
||||
# build a link message from this network node to each node having a
|
||||
# connected interface
|
||||
for netif in self.netifs(sort=True):
|
||||
|
@ -358,61 +347,77 @@ class PyCoreNet(PyCoreObj):
|
|||
netif.swapparams('_params_up')
|
||||
if netif.getparams() != upstream_params:
|
||||
uni = True
|
||||
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
self.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
otherobj.objid)
|
||||
tlvdata += self.netifparamstolink(netif)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
|
||||
unidirectional = 0
|
||||
if uni:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI,
|
||||
1)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM,
|
||||
otherobj.getifindex(netif))
|
||||
if netif.hwaddr:
|
||||
tlvdata += \
|
||||
coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2MAC,
|
||||
netif.hwaddr)
|
||||
for addr in netif.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
unidirectional = 1
|
||||
|
||||
interface2_ip4 = None
|
||||
interface2_ip4_mask = None
|
||||
interface2_ip6 = None
|
||||
interface2_ip6_mask = None
|
||||
for address in netif.addrlist:
|
||||
ip, sep, mask = address.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, \
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip6_mask = mask
|
||||
|
||||
# TODO: not currently used
|
||||
# loss = netif.getparam('loss')
|
||||
link_data = LinkData(
|
||||
message_type=flags,
|
||||
node1_id=self.objid,
|
||||
node2_id=otherobj.objid,
|
||||
link_type=self.linktype,
|
||||
unidirectional=unidirectional,
|
||||
interface2_id=otherobj.getifindex(netif),
|
||||
interface2_mac=netif.hwaddr,
|
||||
interface2_ip4=interface2_ip4,
|
||||
interface2_ip4_mask=interface2_ip4_mask,
|
||||
interface2_ip6=interface2_ip6,
|
||||
interface2_ip6_mask=interface2_ip6_mask,
|
||||
delay=netif.getparam("delay"),
|
||||
bandwidth=netif.getparam("bw"),
|
||||
dup=netif.getparam("duplicate"),
|
||||
jitter=netif.getparam("jitter")
|
||||
)
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
msgs.append(msg)
|
||||
if not uni:
|
||||
continue
|
||||
# build a 2nd link message for any upstream link parameters
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
otherobj.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
self.objid)
|
||||
|
||||
netif.swapparams('_params_up')
|
||||
tlvdata += self.netifparamstolink(netif)
|
||||
link_data = LinkData(
|
||||
message_type=0,
|
||||
node1_id=otherobj.objid,
|
||||
node2_id=self.objid,
|
||||
unidirectional=1,
|
||||
delay=netif.getparam("delay"),
|
||||
bandwidth=netif.getparam("bw"),
|
||||
dup=netif.getparam("duplicate"),
|
||||
jitter=netif.getparam("jitter")
|
||||
)
|
||||
netif.swapparams('_params_up')
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, 1)
|
||||
msg = coreapi.CoreLinkMessage.pack(0, tlvdata)
|
||||
msgs.append(msg)
|
||||
return msgs
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class PyCoreNetIf(object):
|
||||
''' Base class for interfaces.
|
||||
'''
|
||||
"""
|
||||
Base class for interfaces.
|
||||
"""
|
||||
|
||||
def __init__(self, node, name, mtu):
|
||||
self.node = node
|
||||
self.name = name
|
||||
|
@ -428,13 +433,15 @@ class PyCoreNetIf(object):
|
|||
self.transport_type = None
|
||||
# interface index on the network
|
||||
self.netindex = None
|
||||
# index used to find flow data
|
||||
self.flow_id = None
|
||||
|
||||
def startup(self):
|
||||
pass
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
|
||||
def attachnet(self, net):
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
|
@ -456,25 +463,28 @@ class PyCoreNetIf(object):
|
|||
self.hwaddr = addr
|
||||
|
||||
def getparam(self, key):
|
||||
''' Retrieve a parameter from the _params dict,
|
||||
or None if the parameter does not exist.
|
||||
'''
|
||||
"""
|
||||
Retrieve a parameter from the _params dict,
|
||||
or None if the parameter does not exist.
|
||||
"""
|
||||
if key not in self._params:
|
||||
return None
|
||||
return self._params[key]
|
||||
|
||||
|
||||
def getparams(self):
|
||||
''' Return (key, value) pairs from the _params dict.
|
||||
'''
|
||||
"""
|
||||
Return (key, value) pairs from the _params dict.
|
||||
"""
|
||||
r = []
|
||||
for k in sorted(self._params.keys()):
|
||||
r.append((k, self._params[k]))
|
||||
return r
|
||||
|
||||
|
||||
def setparam(self, key, value):
|
||||
''' Set a parameter in the _params dict.
|
||||
Returns True if the parameter has changed.
|
||||
'''
|
||||
"""
|
||||
Set a parameter in the _params dict.
|
||||
Returns True if the parameter has changed.
|
||||
"""
|
||||
if key in self._params:
|
||||
if self._params[key] == value:
|
||||
return False
|
||||
|
@ -483,12 +493,13 @@ class PyCoreNetIf(object):
|
|||
return False
|
||||
self._params[key] = value
|
||||
return True
|
||||
|
||||
|
||||
def swapparams(self, name):
|
||||
''' Swap out the _params dict for name. If name does not exist,
|
||||
"""
|
||||
Swap out the _params dict for name. If name does not exist,
|
||||
intialize it. This is for supporting separate upstream/downstream
|
||||
parameters when two layer-2 nodes are linked together.
|
||||
'''
|
||||
"""
|
||||
tmp = self._params
|
||||
if not hasattr(self, name):
|
||||
setattr(self, name, {})
|
||||
|
@ -496,8 +507,8 @@ class PyCoreNetIf(object):
|
|||
setattr(self, name, tmp)
|
||||
|
||||
def setposition(self, x, y, z):
|
||||
''' Dispatch to any position hook (self.poshook) handler.
|
||||
'''
|
||||
"""
|
||||
Dispatch to any position hook (self.poshook) handler.
|
||||
"""
|
||||
if self.poshook is not None:
|
||||
self.poshook(self, x, y, z)
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
120
daemon/core/data.py
Normal file
120
daemon/core/data.py
Normal file
|
@ -0,0 +1,120 @@
|
|||
"""
|
||||
CORE data objects.
|
||||
"""
|
||||
|
||||
import collections
|
||||
|
||||
ConfigData = collections.namedtuple("ConfigData", [
|
||||
"message_type",
|
||||
"node",
|
||||
"object",
|
||||
"type",
|
||||
"data_types",
|
||||
"data_values",
|
||||
"captions",
|
||||
"bitmap",
|
||||
"possible_values",
|
||||
"groups",
|
||||
"session",
|
||||
"interface_number",
|
||||
"network_id",
|
||||
"opaque"
|
||||
])
|
||||
ConfigData.__new__.__defaults__ = (None,) * len(ConfigData._fields)
|
||||
|
||||
EventData = collections.namedtuple("EventData", [
|
||||
"node",
|
||||
"event_type",
|
||||
"name",
|
||||
"data",
|
||||
"time",
|
||||
"session"
|
||||
])
|
||||
EventData.__new__.__defaults__ = (None,) * len(EventData._fields)
|
||||
|
||||
ExceptionData = collections.namedtuple("ExceptionData", [
|
||||
"node",
|
||||
"session",
|
||||
"level",
|
||||
"source",
|
||||
"date",
|
||||
"text",
|
||||
"opaque"
|
||||
])
|
||||
ExceptionData.__new__.__defaults__ = (None,) * len(ExceptionData._fields)
|
||||
|
||||
FileData = collections.namedtuple("FileData", [
|
||||
"message_type",
|
||||
"node",
|
||||
"name",
|
||||
"mode",
|
||||
"number",
|
||||
"type",
|
||||
"source",
|
||||
"session",
|
||||
"data",
|
||||
"compressed_data"
|
||||
])
|
||||
FileData.__new__.__defaults__ = (None,) * len(FileData._fields)
|
||||
|
||||
NodeData = collections.namedtuple("NodeData", [
|
||||
"message_type",
|
||||
"id",
|
||||
"node_type",
|
||||
"name",
|
||||
"ip_address",
|
||||
"mac_address",
|
||||
"ip6_address",
|
||||
"model",
|
||||
"emulation_id",
|
||||
"emulation_server",
|
||||
"session",
|
||||
"x_position",
|
||||
"y_position",
|
||||
"canvas",
|
||||
"network_id",
|
||||
"services",
|
||||
"latitude",
|
||||
"longitude",
|
||||
"altitude",
|
||||
"icon",
|
||||
"opaque"
|
||||
])
|
||||
NodeData.__new__.__defaults__ = (None,) * len(NodeData._fields)
|
||||
|
||||
LinkData = collections.namedtuple("LinkData", [
|
||||
"message_type",
|
||||
"node1_id",
|
||||
"node2_id",
|
||||
"delay",
|
||||
"bandwidth",
|
||||
"per",
|
||||
"dup",
|
||||
"jitter",
|
||||
"mer",
|
||||
"burst",
|
||||
"session",
|
||||
"mburst",
|
||||
"link_type",
|
||||
"gui_attributes",
|
||||
"unidirectional",
|
||||
"emulation_id",
|
||||
"network_id",
|
||||
"key",
|
||||
"interface1_id",
|
||||
"interface1_name",
|
||||
"interface1_ip4",
|
||||
"interface1_ip4_mask",
|
||||
"interface1_mac",
|
||||
"interface1_ip6",
|
||||
"interface1_ip6_mask",
|
||||
"interface2_id",
|
||||
"interface2_name",
|
||||
"interface2_ip4",
|
||||
"interface2_ip4_mask",
|
||||
"interface2_mac",
|
||||
"interface2_ip6",
|
||||
"interface2_ip6_mask",
|
||||
"opaque"
|
||||
])
|
||||
LinkData.__new__.__defaults__ = (None,) * len(LinkData._fields)
|
|
@ -0,0 +1,54 @@
|
|||
import subprocess
|
||||
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
EMANEUNK = 0
|
||||
EMANE074 = 7
|
||||
EMANE081 = 8
|
||||
EMANE091 = 91
|
||||
EMANE092 = 92
|
||||
EMANE093 = 93
|
||||
EMANE101 = 101
|
||||
|
||||
VERSION = None
|
||||
VERSIONSTR = None
|
||||
|
||||
|
||||
def emane_version():
|
||||
"""
|
||||
Return the locally installed EMANE version identifier and string.
|
||||
"""
|
||||
global VERSION
|
||||
global VERSIONSTR
|
||||
cmd = ("emane", "--version")
|
||||
|
||||
try:
|
||||
status, result = utils.cmdresult(cmd)
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
logger.exception("error checking emane version")
|
||||
status = -1
|
||||
result = ""
|
||||
|
||||
VERSION = EMANEUNK
|
||||
if status == 0:
|
||||
if result.startswith("0.7.4"):
|
||||
VERSION = EMANE074
|
||||
elif result.startswith("0.8.1"):
|
||||
VERSION = EMANE081
|
||||
elif result.startswith("0.9.1"):
|
||||
VERSION = EMANE091
|
||||
elif result.startswith("0.9.2"):
|
||||
VERSION = EMANE092
|
||||
elif result.startswith("0.9.3"):
|
||||
VERSION = EMANE093
|
||||
elif result.startswith("1.0.1"):
|
||||
VERSION = EMANE101
|
||||
|
||||
VERSIONSTR = result.strip()
|
||||
|
||||
|
||||
# set version variables for the Emane class
|
||||
emane_version()
|
|
@ -1,48 +1,38 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
bypass.py: EMANE Bypass model for CORE
|
||||
'''
|
||||
"""
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
class EmaneBypassModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
def __init__(self, session, object_id=None):
|
||||
EmaneModel.__init__(self, session, object_id)
|
||||
|
||||
_name = "emane_bypass"
|
||||
_confmatrix = [
|
||||
("none",coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'True,False','There are no parameters for the bypass model.'),
|
||||
name = "emane_bypass"
|
||||
config_matrix = [
|
||||
("none", ConfigDataTypes.BOOL.value, '0',
|
||||
'True,False', 'There are no parameters for the bypass model.'),
|
||||
]
|
||||
|
||||
# value groupings
|
||||
_confgroups = "Bypass Parameters:1-1"
|
||||
config_groups = "Bypass Parameters:1-1"
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml,
|
||||
nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
"""
|
||||
Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml,
|
||||
nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used.
|
||||
"""
|
||||
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "BYPASS NEM")
|
||||
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
|
||||
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
|
@ -62,5 +52,3 @@ class EmaneBypassModel(EmaneModel):
|
|||
phy.setAttribute("name", "BYPASS PHY")
|
||||
phy.setAttribute("library", "bypassphylayer")
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
||||
|
||||
|
|
|
@ -1,76 +1,70 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
# Randy Charland <rcharland@ll.mit.edu>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
commeffect.py: EMANE CommEffect model for CORE
|
||||
'''
|
||||
"""
|
||||
|
||||
from core import emane
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
import sys
|
||||
import string
|
||||
try:
|
||||
from emanesh.events import EventService
|
||||
except:
|
||||
pass
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
from emane import Emane, EmaneModel
|
||||
except ImportError:
|
||||
logger.error("error importing emanesh")
|
||||
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventcommeffect
|
||||
except Exception, e:
|
||||
pass
|
||||
except ImportError:
|
||||
logger.error("error importing emaneeventservice and emaneeventcommeffect")
|
||||
|
||||
|
||||
class EmaneCommEffectModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
def __init__(self, session, object_id=None):
|
||||
EmaneModel.__init__(self, session, object_id)
|
||||
|
||||
# model name
|
||||
_name = "emane_commeffect"
|
||||
name = "emane_commeffect"
|
||||
# CommEffect parameters
|
||||
_confmatrix_shim_base = [
|
||||
("filterfile", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
("filterfile", ConfigDataTypes.STRING.value, '',
|
||||
'', 'filter file'),
|
||||
("groupid", coreapi.CONF_DATA_TYPE_UINT32, '0',
|
||||
("groupid", ConfigDataTypes.UINT32.value, '0',
|
||||
'', 'NEM Group ID'),
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable promiscuous mode'),
|
||||
("receivebufferperiod", coreapi.CONF_DATA_TYPE_FLOAT, '1.0',
|
||||
("receivebufferperiod", ConfigDataTypes.FLOAT.value, '1.0',
|
||||
'', 'receivebufferperiod'),
|
||||
]
|
||||
_confmatrix_shim_081 = [
|
||||
("defaultconnectivity", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("defaultconnectivity", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'defaultconnectivity'),
|
||||
("enabletighttimingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("enabletighttimingmode", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable tight timing mode'),
|
||||
]
|
||||
_confmatrix_shim_091 = [
|
||||
("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("defaultconnectivitymode", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'defaultconnectivity'),
|
||||
]
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
_confmatrix_shim = _confmatrix_shim_base + _confmatrix_shim_091
|
||||
else:
|
||||
_confmatrix_shim = _confmatrix_shim_base + _confmatrix_shim_081
|
||||
|
||||
_confmatrix = _confmatrix_shim
|
||||
config_matrix = _confmatrix_shim
|
||||
# value groupings
|
||||
_confgroups = "CommEffect SHIM Parameters:1-%d" \
|
||||
% len(_confmatrix_shim)
|
||||
config_groups = "CommEffect SHIM Parameters:1-%d" % len(_confmatrix_shim)
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
"""
|
||||
Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
"""
|
||||
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
shimdoc = e.xmldoc("shim")
|
||||
|
@ -83,8 +77,7 @@ class EmaneCommEffectModel(EmaneModel):
|
|||
shimnames.remove("filterfile")
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
map( lambda n: shim.appendChild(e.xmlparam(shimdoc, n, \
|
||||
self.valueof(n, values))), shimnames)
|
||||
map(lambda n: shim.appendChild(e.xmlparam(shimdoc, n, self.valueof(n, values))), shimnames)
|
||||
# empty filterfile is not allowed
|
||||
ff = self.valueof("filterfile", values)
|
||||
if ff.strip() != '':
|
||||
|
@ -95,20 +88,24 @@ class EmaneCommEffectModel(EmaneModel):
|
|||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "commeffect NEM")
|
||||
nem.setAttribute("type", "unstructured")
|
||||
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
|
||||
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
|
||||
nem.appendChild(e.xmlshimdefinition(nemdoc, self.shimxmlname(ifc)))
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Generate CommEffect events when a Link Message is received having
|
||||
def linkconfig(self, netif, bw=None, delay=None,
|
||||
loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
Generate CommEffect events when a Link Message is received having
|
||||
link parameters.
|
||||
'''
|
||||
if self.session.emane.version >= self.session.emane.EMANE091:
|
||||
"""
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
raise NotImplementedError, \
|
||||
"CommEffect linkconfig() not implemented for EMANE 0.9.1+"
|
||||
"CommEffect linkconfig() not implemented for EMANE 0.9.1+"
|
||||
|
||||
def z(x):
|
||||
''' Helper to use 0 for None values. '''
|
||||
"""
|
||||
Helper to use 0 for None values.
|
||||
"""
|
||||
if type(x) is str:
|
||||
x = float(x)
|
||||
if x is None:
|
||||
|
@ -118,17 +115,16 @@ class EmaneCommEffectModel(EmaneModel):
|
|||
|
||||
service = self.session.emane.service
|
||||
if service is None:
|
||||
self.session.warn("%s: EMANE event service unavailable" % \
|
||||
self._name)
|
||||
logger.warn("%s: EMANE event service unavailable" % self.name)
|
||||
return
|
||||
if netif is None or netif2 is None:
|
||||
self.session.warn("%s: missing NEM information" % self._name)
|
||||
logger.warn("%s: missing NEM information" % self.name)
|
||||
return
|
||||
# TODO: batch these into multiple events per transmission
|
||||
# TODO: may want to split out seconds portion of delay and jitter
|
||||
event = emaneeventcommeffect.EventCommEffect(1)
|
||||
index = 0
|
||||
e = self.session.obj(self.objid)
|
||||
e = self.session.get_object(self.object_id)
|
||||
nemid = e.getnemid(netif)
|
||||
nemid2 = e.getnemid(netif2)
|
||||
mbw = bw
|
||||
|
@ -139,6 +135,3 @@ class EmaneCommEffectModel(EmaneModel):
|
|||
emaneeventservice.PLATFORMID_ANY,
|
||||
nemid2, emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
1206
daemon/core/emane/emanemanager.py
Normal file
1206
daemon/core/emane/emanemanager.py
Normal file
File diff suppressed because it is too large
Load diff
203
daemon/core/emane/emanemodel.py
Normal file
203
daemon/core/emane/emanemodel.py
Normal file
|
@ -0,0 +1,203 @@
|
|||
"""
|
||||
Defines Emane Models used within CORE.
|
||||
"""
|
||||
|
||||
from core import emane
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
from core.mobility import WirelessModel
|
||||
from core.xml import xmlutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class EmaneModel(WirelessModel):
|
||||
"""
|
||||
EMANE models inherit from this parent class, which takes care of
|
||||
handling configuration messages based on the _confmatrix list of
|
||||
configurable parameters. Helper functions also live here.
|
||||
"""
|
||||
_prefix = {'y': 1e-24, # yocto
|
||||
'z': 1e-21, # zepto
|
||||
'a': 1e-18, # atto
|
||||
'f': 1e-15, # femto
|
||||
'p': 1e-12, # pico
|
||||
'n': 1e-9, # nano
|
||||
'u': 1e-6, # micro
|
||||
'm': 1e-3, # mili
|
||||
'c': 1e-2, # centi
|
||||
'd': 1e-1, # deci
|
||||
'k': 1e3, # kilo
|
||||
'M': 1e6, # mega
|
||||
'G': 1e9, # giga
|
||||
'T': 1e12, # tera
|
||||
'P': 1e15, # peta
|
||||
'E': 1e18, # exa
|
||||
'Z': 1e21, # zetta
|
||||
'Y': 1e24, # yotta
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def configure_emane(cls, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for setting up a model.
|
||||
Pass the Emane object as the manager object.
|
||||
|
||||
:param core.session.Session session: session to configure emane
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
return cls.configure(session.emane, config_data)
|
||||
|
||||
@classmethod
|
||||
def emane074_fixup(cls, value, div=1.0):
|
||||
"""
|
||||
Helper for converting 0.8.1 and newer values to EMANE 0.7.4
|
||||
compatible values.
|
||||
NOTE: This should be removed when support for 0.7.4 has been
|
||||
deprecated.
|
||||
"""
|
||||
if div == 0:
|
||||
return "0"
|
||||
if type(value) is not str:
|
||||
return str(value / div)
|
||||
if value.endswith(tuple(cls._prefix.keys())):
|
||||
suffix = value[-1]
|
||||
value = float(value[:-1]) * cls._prefix[suffix]
|
||||
return str(int(value / div))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
"""
|
||||
Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def buildplatformxmlnementry(self, doc, n, ifc):
|
||||
"""
|
||||
Build the NEM definition that goes into the platform.xml file.
|
||||
This returns an XML element that will be added to the <platform/> element.
|
||||
This default method supports per-interface config
|
||||
(e.g. <nem definition="n2_0_63emane_rfpipe.xml" id="1"> or per-EmaneNode
|
||||
config (e.g. <nem definition="n1emane_rfpipe.xml" id="1">.
|
||||
This can be overriden by a model for NEM flexibility; n is the EmaneNode.
|
||||
"""
|
||||
nem = doc.createElement("nem")
|
||||
nem.setAttribute("name", ifc.localname)
|
||||
# if this netif contains a non-standard (per-interface) config,
|
||||
# then we need to use a more specific xml file here
|
||||
nem.setAttribute("definition", self.nemxmlname(ifc))
|
||||
return nem
|
||||
|
||||
def buildplatformxmltransportentry(self, doc, n, ifc):
|
||||
"""
|
||||
Build the transport definition that goes into the platform.xml file.
|
||||
This returns an XML element that will added to the nem definition.
|
||||
This default method supports raw and virtual transport types, but may be
|
||||
overriden by a model to support the e.g. pluggable virtual transport.
|
||||
n is the EmaneNode.
|
||||
"""
|
||||
ttype = ifc.transport_type
|
||||
if not ttype:
|
||||
logger.info("warning: %s interface type unsupported!" % ifc.name)
|
||||
ttype = "raw"
|
||||
trans = doc.createElement("transport")
|
||||
trans.setAttribute("definition", n.transportxmlname(ttype))
|
||||
if emane.VERSION < emane.EMANE092:
|
||||
trans.setAttribute("group", "1")
|
||||
param = doc.createElement("param")
|
||||
param.setAttribute("name", "device")
|
||||
if ttype == "raw":
|
||||
# raw RJ45 name e.g. 'eth0'
|
||||
param.setAttribute("value", ifc.name)
|
||||
else:
|
||||
# virtual TAP name e.g. 'n3.0.17'
|
||||
param.setAttribute("value", ifc.localname)
|
||||
if emane.VERSION > emane.EMANE091:
|
||||
param.setAttribute("value", ifc.name)
|
||||
|
||||
trans.appendChild(param)
|
||||
return trans
|
||||
|
||||
def basename(self, interface=None):
|
||||
"""
|
||||
Return the string that other names are based on.
|
||||
If a specific config is stored for a node's interface, a unique
|
||||
filename is needed; otherwise the name of the EmaneNode is used.
|
||||
"""
|
||||
emane = self.session.emane
|
||||
name = "n%s" % self.object_id
|
||||
if interface is not None:
|
||||
nodenum = interface.node.objid
|
||||
# Adamson change - use getifcconfig() to get proper result
|
||||
# if emane.getconfig(nodenum, self._name, None)[1] is not None:
|
||||
if emane.getifcconfig(nodenum, self.name, None, interface) is not None:
|
||||
name = interface.localname.replace('.', '_')
|
||||
return "%s%s" % (name, self.name)
|
||||
|
||||
def nemxmlname(self, interface=None):
|
||||
"""
|
||||
Return the string name for the NEM XML file, e.g. 'n3rfpipenem.xml'
|
||||
"""
|
||||
append = ""
|
||||
if emane.VERSION > emane.EMANE091:
|
||||
if interface and interface.transport_type == "raw":
|
||||
append = "_raw"
|
||||
return "%snem%s.xml" % (self.basename(interface), append)
|
||||
|
||||
def shimxmlname(self, ifc=None):
|
||||
"""
|
||||
Return the string name for the SHIM XML file, e.g. 'commeffectshim.xml'
|
||||
"""
|
||||
return "%sshim.xml" % self.basename(ifc)
|
||||
|
||||
def macxmlname(self, ifc=None):
|
||||
"""
|
||||
Return the string name for the MAC XML file, e.g. 'n3rfpipemac.xml'
|
||||
"""
|
||||
return "%smac.xml" % self.basename(ifc)
|
||||
|
||||
def phyxmlname(self, ifc=None):
|
||||
"""
|
||||
Return the string name for the PHY XML file, e.g. 'n3rfpipephy.xml'
|
||||
"""
|
||||
return "%sphy.xml" % self.basename(ifc)
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
"""
|
||||
invoked from MobilityModel when nodes are moved; this causes
|
||||
EMANE location events to be generated for the nodes in the moved
|
||||
list, making EmaneModels compatible with Ns2ScriptedMobility
|
||||
"""
|
||||
try:
|
||||
wlan = self.session.get_object(self.object_id)
|
||||
wlan.setnempositions(moved_netifs)
|
||||
except KeyError:
|
||||
logger.exception("error during update")
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
Invoked when a Link Message is received. Default is unimplemented.
|
||||
"""
|
||||
warntxt = "EMANE model %s does not support link " % self.name
|
||||
warntxt += "configuration, dropping Link Message"
|
||||
logger.warn(warntxt)
|
||||
|
||||
@staticmethod
|
||||
def valuestrtoparamlist(dom, name, value):
|
||||
"""
|
||||
Helper to convert a parameter to a paramlist.
|
||||
Returns a an XML paramlist, or None if the value does not expand to
|
||||
multiple values.
|
||||
"""
|
||||
try:
|
||||
values = utils.maketuplefromstr(value, str)
|
||||
except SyntaxError:
|
||||
logger.exception("error in value string to param list")
|
||||
return None
|
||||
|
||||
if not hasattr(values, '__iter__'):
|
||||
return None
|
||||
|
||||
if len(values) < 2:
|
||||
return None
|
||||
|
||||
return xmlutils.add_param_list_to_parent(dom, parent=None, name=name, values=values)
|
|
@ -1,117 +1,113 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
|
||||
'''
|
||||
"""
|
||||
|
||||
from core import emane
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.emane.universal import EmaneUniversalModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
import sys
|
||||
import string
|
||||
try:
|
||||
from emanesh.events import EventService
|
||||
except:
|
||||
pass
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
from emane import Emane, EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
logger.error("error importing emanesh")
|
||||
|
||||
|
||||
class EmaneIeee80211abgModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
def __init__(self, session, object_id=None):
|
||||
EmaneModel.__init__(self, session, object_id)
|
||||
|
||||
# model name
|
||||
_name = "emane_ieee80211abg"
|
||||
name = "emane_ieee80211abg"
|
||||
_80211rates = '1 1 Mbps,2 2 Mbps,3 5.5 Mbps,4 11 Mbps,5 6 Mbps,' + \
|
||||
'6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps,' + \
|
||||
'12 54 Mbps'
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
'6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps,' + \
|
||||
'12 54 Mbps'
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
xml_path = '/usr/share/emane/xml/models/mac/ieee80211abg'
|
||||
else:
|
||||
xml_path = "/usr/share/emane/models/ieee80211abg/xml"
|
||||
|
||||
# MAC parameters
|
||||
_confmatrix_mac_base = [
|
||||
("mode", coreapi.CONF_DATA_TYPE_UINT8, '0',
|
||||
("mode", ConfigDataTypes.UINT8.value, '0',
|
||||
'0 802.11b (DSSS only),1 802.11b (DSSS only),' + \
|
||||
'2 802.11a or g (OFDM),3 802.11b/g (DSSS and OFDM)', 'mode'),
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable promiscuous mode'),
|
||||
("distance", coreapi.CONF_DATA_TYPE_UINT32, '1000',
|
||||
("distance", ConfigDataTypes.UINT32.value, '1000',
|
||||
'', 'max distance (m)'),
|
||||
("unicastrate", coreapi.CONF_DATA_TYPE_UINT8, '4', _80211rates,
|
||||
("unicastrate", ConfigDataTypes.UINT8.value, '4', _80211rates,
|
||||
'unicast rate (Mbps)'),
|
||||
("multicastrate", coreapi.CONF_DATA_TYPE_UINT8, '1', _80211rates,
|
||||
("multicastrate", ConfigDataTypes.UINT8.value, '1', _80211rates,
|
||||
'multicast rate (Mbps)'),
|
||||
("rtsthreshold", coreapi.CONF_DATA_TYPE_UINT16, '0',
|
||||
("rtsthreshold", ConfigDataTypes.UINT16.value, '0',
|
||||
'', 'RTS threshold (bytes)'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
|
||||
("pcrcurveuri", ConfigDataTypes.STRING.value,
|
||||
'%s/ieee80211pcr.xml' % xml_path,
|
||||
'', 'SINR/PCR curve file'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("flowcontrolenable", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
("flowcontroltokens", ConfigDataTypes.UINT16.value, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
]
|
||||
# mac parameters introduced in EMANE 0.8.1
|
||||
# mac parameters introduced in EMANE 0.8.1
|
||||
# Note: The entry format for category queue parameters (queuesize, aifs, etc) were changed in
|
||||
# EMANE 9.x, but are being preserved for the time being due to space constraints in the
|
||||
# CORE GUI. A conversion function (get9xmacparamequivalent) has been defined to support this.
|
||||
# CORE GUI. A conversion function (get9xmacparamequivalent) has been defined to support this.
|
||||
_confmatrix_mac_extended = [
|
||||
("wmmenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("wmmenable", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'WiFi Multimedia (WMM)'),
|
||||
("queuesize", coreapi.CONF_DATA_TYPE_STRING, '0:255 1:255 2:255 3:255',
|
||||
("queuesize", ConfigDataTypes.STRING.value, '0:255 1:255 2:255 3:255',
|
||||
'', 'queue size (0-4:size)'),
|
||||
("cwmin", coreapi.CONF_DATA_TYPE_STRING, '0:32 1:32 2:16 3:8',
|
||||
("cwmin", ConfigDataTypes.STRING.value, '0:32 1:32 2:16 3:8',
|
||||
'', 'min contention window (0-4:minw)'),
|
||||
("cwmax", coreapi.CONF_DATA_TYPE_STRING, '0:1024 1:1024 2:64 3:16',
|
||||
("cwmax", ConfigDataTypes.STRING.value, '0:1024 1:1024 2:64 3:16',
|
||||
'', 'max contention window (0-4:maxw)'),
|
||||
("aifs", coreapi.CONF_DATA_TYPE_STRING, '0:2 1:2 2:2 3:1',
|
||||
("aifs", ConfigDataTypes.STRING.value, '0:2 1:2 2:2 3:1',
|
||||
'', 'arbitration inter frame space (0-4:aifs)'),
|
||||
("txop", coreapi.CONF_DATA_TYPE_STRING, '0:0 1:0 2:0 3:0',
|
||||
("txop", ConfigDataTypes.STRING.value, '0:0 1:0 2:0 3:0',
|
||||
'', 'txop (0-4:usec)'),
|
||||
("retrylimit", coreapi.CONF_DATA_TYPE_STRING, '0:3 1:3 2:3 3:3',
|
||||
("retrylimit", ConfigDataTypes.STRING.value, '0:3 1:3 2:3 3:3',
|
||||
'', 'retry limit (0-4:numretries)'),
|
||||
]
|
||||
_confmatrix_mac_091 = [
|
||||
('radiometricenable', coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
('radiometricenable', ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'report radio metrics via R2RI'),
|
||||
('radiometricreportinterval', coreapi.CONF_DATA_TYPE_FLOAT, '1.0',
|
||||
('radiometricreportinterval', ConfigDataTypes.FLOAT.value, '1.0',
|
||||
'', 'R2RI radio metric report interval (sec)'),
|
||||
('neighbormetricdeletetime', coreapi.CONF_DATA_TYPE_FLOAT, '60.0',
|
||||
('neighbormetricdeletetime', ConfigDataTypes.FLOAT.value, '60.0',
|
||||
'', 'R2RI neighbor table inactivity time (sec)'),
|
||||
]
|
||||
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_extended
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
_confmatrix_mac += _confmatrix_mac_091
|
||||
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
_confmatrix_phy = EmaneUniversalModel.config_matrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
config_matrix = _confmatrix_mac + _confmatrix_phy
|
||||
# value groupings
|
||||
_confgroups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% (len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
config_groups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % (
|
||||
len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml,
|
||||
nXXemane_ieee80211abgphy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
"""
|
||||
Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml,
|
||||
nXXemane_ieee80211abgphy.xml are used.
|
||||
"""
|
||||
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "ieee80211abg NEM")
|
||||
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
|
||||
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
|
@ -130,15 +126,14 @@ class EmaneIeee80211abgModel(EmaneModel):
|
|||
phynames = names[len(self._confmatrix_mac):]
|
||||
|
||||
# append all MAC options to macdoc
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
for macname in macnames:
|
||||
mac9xnvpairlist = self.get9xmacparamequivalent(macname, values)
|
||||
for nvpair in mac9xnvpairlist:
|
||||
mac.appendChild(e.xmlparam(macdoc, nvpair[0], nvpair[1]))
|
||||
else:
|
||||
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
|
||||
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames)
|
||||
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
|
@ -149,25 +144,25 @@ class EmaneIeee80211abgModel(EmaneModel):
|
|||
# This allows CORE to preserve the entry layout for the mac 'category' parameters
|
||||
# and work with EMANE 9.x onwards.
|
||||
#
|
||||
def get9xmacparamequivalent(self, macname, values):
|
||||
''' Generate a list of 80211abg mac parameters in 0.9.x layout for a given mac parameter
|
||||
in 8.x layout.For mac category parameters, the list returned will contain the four
|
||||
def get9xmacparamequivalent(self, macname, values):
|
||||
"""
|
||||
Generate a list of 80211abg mac parameters in 0.9.x layout for a given mac parameter
|
||||
in 8.x layout.For mac category parameters, the list returned will contain the four
|
||||
equivalent 9.x parameter and value pairs. Otherwise, the list returned will only
|
||||
contain a single name and value pair.
|
||||
'''
|
||||
"""
|
||||
nvpairlist = []
|
||||
macparmval = self.valueof(macname, values)
|
||||
if macname in ["queuesize","aifs","cwmin","cwmax","txop","retrylimit"]:
|
||||
if macname in ["queuesize", "aifs", "cwmin", "cwmax", "txop", "retrylimit"]:
|
||||
for catval in macparmval.split():
|
||||
idx_and_val = catval.split(":")
|
||||
idx = int(idx_and_val[0])
|
||||
val = idx_and_val[1]
|
||||
# aifs and tx are in microseconds. Convert to seconds.
|
||||
if macname in ["aifs","txop"]:
|
||||
val = "%f" % (float(val)*(1e-6))
|
||||
if macname in ["aifs", "txop"]:
|
||||
val = "%f" % (float(val) * 1e-6)
|
||||
name9x = "%s%d" % (macname, idx)
|
||||
nvpairlist.append([name9x, val])
|
||||
else:
|
||||
nvpairlist.append([macname, macparmval])
|
||||
return nvpairlist
|
||||
|
||||
|
|
|
@ -1,69 +1,72 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
nodes.py: definition of an EmaneNode class for implementing configuration
|
||||
control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
||||
share the same MAC+PHY model.
|
||||
'''
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os.path
|
||||
from os import path
|
||||
|
||||
from core.api import coreapi
|
||||
from core import emane
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
try:
|
||||
from emanesh.events import EventService
|
||||
from emanesh.events import LocationEvent
|
||||
except Exception, e:
|
||||
pass
|
||||
except ImportError:
|
||||
logger.error("error loading emanesh")
|
||||
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventlocation
|
||||
except Exception, e:
|
||||
''' Don't require all CORE users to have EMANE libeventservice and its
|
||||
Python bindings installed.
|
||||
'''
|
||||
pass
|
||||
except ImportError:
|
||||
"""
|
||||
Don't require all CORE users to have EMANE libeventservice and its
|
||||
Python bindings installed.
|
||||
"""
|
||||
logger.error("error loading emaneeventservice and emaneeventlocation")
|
||||
|
||||
|
||||
class EmaneNet(PyCoreNet):
|
||||
''' EMANE network base class.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_EMANE
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
type = "wlan" # icon used
|
||||
"""
|
||||
EMANE network base class.
|
||||
"""
|
||||
apitype = NodeTypes.EMANE.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
# icon used
|
||||
type = "wlan"
|
||||
|
||||
|
||||
class EmaneNode(EmaneNet):
|
||||
''' EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
'''
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
PyCoreNet.__init__(self, session, objid, name, verbose, start)
|
||||
self.verbose = verbose
|
||||
"""
|
||||
EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
"""
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
PyCoreNet.__init__(self, session, objid, name, start)
|
||||
self.conf = ""
|
||||
self.up = False
|
||||
self.nemidmap = {}
|
||||
self.model = None
|
||||
self.mobility = None
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' The CommEffect model supports link configuration.
|
||||
'''
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
The CommEffect model supports link configuration.
|
||||
"""
|
||||
if not self.model:
|
||||
return
|
||||
return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
|
||||
def config(self, conf):
|
||||
#print "emane", self.name, "got config:", conf
|
||||
self.conf = conf
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -76,55 +79,57 @@ class EmaneNode(EmaneNet):
|
|||
pass
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' set the EmaneModel associated with this node
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
"""
|
||||
set the EmaneModel associated with this node
|
||||
"""
|
||||
logger.info("adding model: %s", model.name)
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
# EmaneModel really uses values from ConfigurableManager
|
||||
# when buildnemxml() is called, not during init()
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose)
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
self.model = model(session=self.session, object_id=self.objid)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
|
||||
def setnemid(self, netif, nemid):
|
||||
''' Record an interface to numerical ID mapping. The Emane controller
|
||||
object manages and assigns these IDs for all NEMs.
|
||||
'''
|
||||
"""
|
||||
Record an interface to numerical ID mapping. The Emane controller
|
||||
object manages and assigns these IDs for all NEMs.
|
||||
"""
|
||||
self.nemidmap[netif] = nemid
|
||||
|
||||
def getnemid(self, netif):
|
||||
''' Given an interface, return its numerical ID.
|
||||
'''
|
||||
"""
|
||||
Given an interface, return its numerical ID.
|
||||
"""
|
||||
if netif not in self.nemidmap:
|
||||
return None
|
||||
else:
|
||||
return self.nemidmap[netif]
|
||||
|
||||
def getnemnetif(self, nemid):
|
||||
''' Given a numerical NEM ID, return its interface. This returns the
|
||||
first interface that matches the given NEM ID.
|
||||
'''
|
||||
"""
|
||||
Given a numerical NEM ID, return its interface. This returns the
|
||||
first interface that matches the given NEM ID.
|
||||
"""
|
||||
for netif in self.nemidmap:
|
||||
if self.nemidmap[netif] == nemid:
|
||||
return netif
|
||||
return None
|
||||
|
||||
def netifs(self, sort=True):
|
||||
''' Retrieve list of linked interfaces sorted by node number.
|
||||
'''
|
||||
"""
|
||||
Retrieve list of linked interfaces sorted by node number.
|
||||
"""
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
|
||||
|
||||
def buildplatformxmlentry(self, doc):
|
||||
''' Return a dictionary of XML elements describing the NEMs
|
||||
connected to this EmaneNode for inclusion in the platform.xml file.
|
||||
'''
|
||||
"""
|
||||
Return a dictionary of XML elements describing the NEMs
|
||||
connected to this EmaneNode for inclusion in the platform.xml file.
|
||||
"""
|
||||
ret = {}
|
||||
if self.model is None:
|
||||
self.info("warning: EmaneNode %s has no associated model" % \
|
||||
self.name)
|
||||
logger.info("warning: EmaneNode %s has no associated model" % self.name)
|
||||
return ret
|
||||
for netif in self.netifs():
|
||||
# <nem name="NODE-001" definition="rfpipenem.xml">
|
||||
|
@ -139,9 +144,9 @@ class EmaneNode(EmaneNet):
|
|||
return ret
|
||||
|
||||
def buildnemxmlfiles(self, emane):
|
||||
''' Let the configured model build the necessary nem, mac, and phy
|
||||
XMLs.
|
||||
'''
|
||||
"""
|
||||
Let the configured model build the necessary nem, mac, and phy XMLs.
|
||||
"""
|
||||
if self.model is None:
|
||||
return
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
|
@ -166,8 +171,9 @@ class EmaneNode(EmaneNet):
|
|||
self.buildtransportxml(emane, rtype)
|
||||
|
||||
def buildtransportxml(self, emane, type):
|
||||
''' Write a transport XML file for the Virtual or Raw Transport.
|
||||
'''
|
||||
"""
|
||||
Write a transport XML file for the Virtual or Raw Transport.
|
||||
"""
|
||||
transdoc = emane.xmldoc("transport")
|
||||
trans = transdoc.getElementsByTagName("transport").pop()
|
||||
trans.setAttribute("name", "%s Transport" % type.capitalize())
|
||||
|
@ -176,7 +182,7 @@ class EmaneNode(EmaneNet):
|
|||
|
||||
flowcontrol = False
|
||||
names = self.model.getnames()
|
||||
values = emane.getconfig(self.objid, self.model._name,
|
||||
values = emane.getconfig(self.objid, self.model.name,
|
||||
self.model.getdefaultvalues())[1]
|
||||
if "flowcontrolenable" in names and values:
|
||||
i = names.index("flowcontrolenable")
|
||||
|
@ -184,35 +190,30 @@ class EmaneNode(EmaneNet):
|
|||
flowcontrol = True
|
||||
|
||||
if "virtual" in type.lower():
|
||||
if os.path.exists("/dev/net/tun_flowctl"):
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath",
|
||||
"/dev/net/tun_flowctl"))
|
||||
if path.exists("/dev/net/tun_flowctl"):
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun_flowctl"))
|
||||
else:
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath",
|
||||
"/dev/net/tun"))
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun"))
|
||||
if flowcontrol:
|
||||
trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable",
|
||||
"on"))
|
||||
trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable", "on"))
|
||||
emane.xmlwrite(transdoc, self.transportxmlname(type.lower()))
|
||||
|
||||
def transportxmlname(self, type):
|
||||
''' Return the string name for the Transport XML file,
|
||||
e.g. 'n3transvirtual.xml'
|
||||
'''
|
||||
"""
|
||||
Return the string name for the Transport XML file, e.g. 'n3transvirtual.xml'
|
||||
"""
|
||||
return "n%strans%s.xml" % (self.objid, type)
|
||||
|
||||
|
||||
def installnetifs(self, do_netns=True):
|
||||
''' Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
to bind to the TAPs.
|
||||
'''
|
||||
if self.session.emane.genlocationevents() and \
|
||||
self.session.emane.service is None:
|
||||
"""
|
||||
Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
to bind to the TAPs.
|
||||
"""
|
||||
if self.session.emane.genlocationevents() and self.session.emane.service is None:
|
||||
warntxt = "unable to publish EMANE events because the eventservice "
|
||||
warntxt += "Python bindings failed to load"
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.name,
|
||||
self.objid, warntxt)
|
||||
logger.error(warntxt)
|
||||
|
||||
for netif in self.netifs():
|
||||
if do_netns and "virtual" in netif.transport_type.lower():
|
||||
|
@ -224,98 +225,99 @@ class EmaneNode(EmaneNet):
|
|||
# at this point we register location handlers for generating
|
||||
# EMANE location events
|
||||
netif.poshook = self.setnemposition
|
||||
(x,y,z) = netif.node.position.get()
|
||||
(x, y, z) = netif.node.position.get()
|
||||
self.setnemposition(netif, x, y, z)
|
||||
|
||||
def deinstallnetifs(self):
|
||||
''' Uninstall TAP devices. This invokes their shutdown method for
|
||||
any required cleanup; the device may be actually removed when
|
||||
emanetransportd terminates.
|
||||
'''
|
||||
"""
|
||||
Uninstall TAP devices. This invokes their shutdown method for
|
||||
any required cleanup; the device may be actually removed when
|
||||
emanetransportd terminates.
|
||||
"""
|
||||
for netif in self.netifs():
|
||||
if "virtual" in netif.transport_type.lower():
|
||||
netif.shutdown()
|
||||
netif.poshook = None
|
||||
|
||||
def setnemposition(self, netif, x, y, z):
|
||||
''' Publish a NEM location change event using the EMANE event service.
|
||||
'''
|
||||
"""
|
||||
Publish a NEM location change event using the EMANE event service.
|
||||
"""
|
||||
if self.session.emane.service is None:
|
||||
if self.verbose:
|
||||
self.info("position service not available")
|
||||
logger.info("position service not available")
|
||||
return
|
||||
nemid = self.getnemid(netif)
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
self.info("nemid for %s is unknown" % ifname)
|
||||
logger.info("nemid for %s is unknown" % ifname)
|
||||
return
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
if self.verbose:
|
||||
self.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(ifname, nemid, x, y, z, lat, long, alt))
|
||||
if self.session.emane.version >= self.session.emane.EMANE091:
|
||||
logger.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(ifname, nemid, x, y, z, lat, long, alt))
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
event = LocationEvent()
|
||||
else:
|
||||
event = emaneeventlocation.EventLocation(1)
|
||||
# altitude must be an integer or warning is printed
|
||||
# unused: yaw, pitch, roll, azimuth, elevation, velocity
|
||||
alt = int(round(alt))
|
||||
if self.session.emane.version >= self.session.emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
self.session.emane.service.publish(0, event)
|
||||
else:
|
||||
event.set(0, nemid, lat, long, alt)
|
||||
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
self.session.emane.service.publish(
|
||||
emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export()
|
||||
)
|
||||
|
||||
def setnempositions(self, moved_netifs):
|
||||
''' Several NEMs have moved, from e.g. a WaypointMobilityModel
|
||||
calculation. Generate an EMANE Location Event having several
|
||||
entries for each netif that has moved.
|
||||
'''
|
||||
"""
|
||||
Several NEMs have moved, from e.g. a WaypointMobilityModel
|
||||
calculation. Generate an EMANE Location Event having several
|
||||
entries for each netif that has moved.
|
||||
"""
|
||||
if len(moved_netifs) == 0:
|
||||
return
|
||||
if self.session.emane.service is None:
|
||||
if self.verbose:
|
||||
self.info("position service not available")
|
||||
logger.info("position service not available")
|
||||
return
|
||||
|
||||
if self.session.emane.version >= self.session.emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
event = LocationEvent()
|
||||
else:
|
||||
event = emaneeventlocation.EventLocation(len(moved_netifs))
|
||||
i = 0
|
||||
for netif in moved_netifs:
|
||||
nemid = self.getnemid(netif)
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
self.info("nemid for %s is unknown" % ifname)
|
||||
logger.info("nemid for %s is unknown" % ifname)
|
||||
continue
|
||||
(x, y, z) = netif.node.getposition()
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
if self.verbose:
|
||||
self.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(i, ifname, nemid, x, y, z, lat, long, alt))
|
||||
logger.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" %
|
||||
(i, ifname, nemid, x, y, z, lat, long, alt))
|
||||
# altitude must be an integer or warning is printed
|
||||
alt = int(round(alt))
|
||||
if self.session.emane.version >= self.session.emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
else:
|
||||
event.set(i, nemid, lat, long, alt)
|
||||
i += 1
|
||||
|
||||
if self.session.emane.version >= self.session.emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
self.session.emane.service.publish(0, event)
|
||||
else:
|
||||
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
|
||||
self.session.emane.service.publish(
|
||||
emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export()
|
||||
)
|
||||
|
|
|
@ -1,33 +1,28 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
# Harry Bullen <hbullen@i-a-i.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
rfpipe.py: EMANE RF-PIPE model for CORE
|
||||
'''
|
||||
"""
|
||||
|
||||
from core import emane
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.emane.universal import EmaneUniversalModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
import sys
|
||||
import string
|
||||
try:
|
||||
from emanesh.events import EventService
|
||||
except:
|
||||
pass
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
from emane import Emane, EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
except ImportError:
|
||||
logger.error("error importing emanesh")
|
||||
|
||||
|
||||
class EmaneRfPipeModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
def __init__(self, session, object_id=None):
|
||||
EmaneModel.__init__(self, session, object_id)
|
||||
|
||||
# model name
|
||||
_name = "emane_rfpipe"
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
name = "emane_rfpipe"
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
xml_path = '/usr/share/emane/xml/models/mac/rfpipe'
|
||||
else:
|
||||
xml_path = "/usr/share/emane/models/rfpipe/xml"
|
||||
|
@ -36,68 +31,69 @@ class EmaneRfPipeModel(EmaneModel):
|
|||
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
# MAC parameters
|
||||
_confmatrix_mac_base = [
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0',
|
||||
'True,False', 'enable promiscuous mode'),
|
||||
("datarate", coreapi.CONF_DATA_TYPE_UINT32, '1M',
|
||||
("datarate", ConfigDataTypes.UINT32.value, '1M',
|
||||
'', 'data rate (bps)'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("flowcontrolenable", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
("flowcontroltokens", ConfigDataTypes.UINT16.value, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
|
||||
("pcrcurveuri", ConfigDataTypes.STRING.value,
|
||||
'%s/rfpipepcr.xml' % xml_path,
|
||||
'', 'SINR/PCR curve file'),
|
||||
]
|
||||
_confmatrix_mac_081 = [
|
||||
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
("jitter", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'transmission jitter (usec)'),
|
||||
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
("delay", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'transmission delay (usec)'),
|
||||
("transmissioncontrolmap", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
("transmissioncontrolmap", ConfigDataTypes.STRING.value, '',
|
||||
'', 'tx control map (nem:rate:freq:tx_dBm)'),
|
||||
("enabletighttiming", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("enabletighttiming", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable tight timing for pkt delay'),
|
||||
]
|
||||
_confmatrix_mac_091 = [
|
||||
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
("jitter", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'transmission jitter (sec)'),
|
||||
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
("delay", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'transmission delay (sec)'),
|
||||
('radiometricenable', coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
('radiometricenable', ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'report radio metrics via R2RI'),
|
||||
('radiometricreportinterval', coreapi.CONF_DATA_TYPE_FLOAT, '1.0',
|
||||
('radiometricreportinterval', ConfigDataTypes.FLOAT.value, '1.0',
|
||||
'', 'R2RI radio metric report interval (sec)'),
|
||||
('neighbormetricdeletetime', coreapi.CONF_DATA_TYPE_FLOAT, '60.0',
|
||||
('neighbormetricdeletetime', ConfigDataTypes.FLOAT.value, '60.0',
|
||||
'', 'R2RI neighbor table inactivity time (sec)'),
|
||||
]
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_091
|
||||
else:
|
||||
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_081
|
||||
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
_confmatrix_phy = EmaneUniversalModel.config_matrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
config_matrix = _confmatrix_mac + _confmatrix_phy
|
||||
|
||||
# value groupings
|
||||
_confgroups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
config_groups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % (
|
||||
len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml,
|
||||
nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
"""
|
||||
Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml,
|
||||
nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used.
|
||||
"""
|
||||
values = e.getifcconfig(self.object_id, self.name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "RF-PIPE NEM")
|
||||
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
|
||||
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
|
@ -115,7 +111,7 @@ class EmaneRfPipeModel(EmaneModel):
|
|||
mac.setAttribute("name", "RF-PIPE MAC")
|
||||
mac.setAttribute("library", "rfpipemaclayer")
|
||||
if e.version < e.EMANE091 and \
|
||||
self.valueof("transmissioncontrolmap", values) is "":
|
||||
self.valueof("transmissioncontrolmap", values) is "":
|
||||
macnames.remove("transmissioncontrolmap")
|
||||
# EMANE 0.7.4 support
|
||||
if e.version == e.EMANE074:
|
||||
|
@ -124,10 +120,8 @@ class EmaneRfPipeModel(EmaneModel):
|
|||
values = list(values)
|
||||
values[i] = self.emane074_fixup(values[i], 1000)
|
||||
# append MAC options to macdoc
|
||||
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
||||
|
|
|
@ -1,91 +1,85 @@
|
|||
|
||||
#
|
||||
# CORE
|
||||
# Copyright (c)2013 Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Name <email@company.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
tdma.py: EMANE TDMA model bindings for CORE
|
||||
'''
|
||||
"""
|
||||
|
||||
from core import emane
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.emane.universal import EmaneUniversalModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
import sys
|
||||
import string
|
||||
try:
|
||||
from emanesh.events import EventService
|
||||
except:
|
||||
pass
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
from emane import Emane, EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
logger.error("error importing emanesh")
|
||||
|
||||
|
||||
class EmaneTdmaModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
def __init__(self, session, object_id=None):
|
||||
EmaneModel.__init__(self, session, object_id)
|
||||
|
||||
# model name
|
||||
_name = "emane_tdma"
|
||||
if Emane.version >= Emane.EMANE101:
|
||||
name = "emane_tdma"
|
||||
if emane.VERSION >= emane.EMANE101:
|
||||
xml_path = '/usr/share/emane/xml/models/mac/tdmaeventscheduler'
|
||||
else:
|
||||
raise Exception("EMANE TDMA requires EMANE 1.0.1 or greater")
|
||||
|
||||
|
||||
|
||||
# MAC parameters
|
||||
_confmatrix_mac = [
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0',
|
||||
'True,False', 'enable promiscuous mode'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("flowcontrolenable", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
("flowcontroltokens", ConfigDataTypes.UINT16.value, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
("fragmentcheckthreshold", coreapi.CONF_DATA_TYPE_UINT16, '2',
|
||||
("fragmentcheckthreshold", ConfigDataTypes.UINT16.value, '2',
|
||||
'', 'rate in seconds for check if fragment reassembly efforts should be abandoned'),
|
||||
("fragmenttimeoutthreshold", coreapi.CONF_DATA_TYPE_UINT16, '5',
|
||||
("fragmenttimeoutthreshold", ConfigDataTypes.UINT16.value, '5',
|
||||
'', 'threshold in seconds to wait for another packet fragment for reassembly'),
|
||||
('neighbormetricdeletetime', coreapi.CONF_DATA_TYPE_FLOAT, '60.0',
|
||||
('neighbormetricdeletetime', ConfigDataTypes.FLOAT.value, '60.0',
|
||||
'', 'neighbor RF reception timeout for removal from neighbor table (sec)'),
|
||||
('neighbormetricupdateinterval', coreapi.CONF_DATA_TYPE_FLOAT, '1.0',
|
||||
('neighbormetricupdateinterval', ConfigDataTypes.FLOAT.value, '1.0',
|
||||
'', 'neighbor table update interval (sec)'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING, '%s/tdmabasemodelpcr.xml' % xml_path,
|
||||
("pcrcurveuri", ConfigDataTypes.STRING.value, '%s/tdmabasemodelpcr.xml' % xml_path,
|
||||
'', 'SINR/PCR curve file'),
|
||||
("queue.aggregationenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
("queue.aggregationenable", ConfigDataTypes.BOOL.value, '1',
|
||||
'On,Off', 'enable transmit packet aggregation'),
|
||||
('queue.aggregationslotthreshold', coreapi.CONF_DATA_TYPE_FLOAT, '90.0',
|
||||
('queue.aggregationslotthreshold', ConfigDataTypes.FLOAT.value, '90.0',
|
||||
'', 'percentage of a slot that must be filled in order to conclude aggregation'),
|
||||
("queue.depth", coreapi.CONF_DATA_TYPE_UINT16, '256',
|
||||
("queue.depth", ConfigDataTypes.UINT16.value, '256',
|
||||
'', 'size of the per service class downstream packet queues (packets)'),
|
||||
("queue.fragmentationenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
("queue.fragmentationenable", ConfigDataTypes.BOOL.value, '1',
|
||||
'On,Off', 'enable packet fragmentation (over multiple slots)'),
|
||||
("queue.strictdequeueenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
("queue.strictdequeueenable", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable strict dequeueing to specified queues only'),
|
||||
]
|
||||
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
_confmatrix_phy = EmaneUniversalModel.config_matrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
config_matrix = _confmatrix_mac + _confmatrix_phy
|
||||
|
||||
# value groupings
|
||||
_confgroups = "TDMA MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % \
|
||||
(len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
config_groups = "TDMA MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % (
|
||||
len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_tdmanem.xml,
|
||||
nXXemane_tdmamac.xml, nXXemane_tdmaphy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
"""
|
||||
Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_tdmanem.xml,
|
||||
nXXemane_tdmamac.xml, nXXemane_tdmaphy.xml are used.
|
||||
"""
|
||||
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "TDMA NEM")
|
||||
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
|
||||
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
|
@ -105,10 +99,8 @@ class EmaneTdmaModel(EmaneModel):
|
|||
mac.setAttribute("name", "TDMA MAC")
|
||||
mac.setAttribute("library", "tdmaeventschedulerradiomodel")
|
||||
# append MAC options to macdoc
|
||||
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
||||
|
|
|
@ -1,99 +1,97 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
universal.py: EMANE Universal PHY model for CORE. Enumerates configuration items
|
||||
used for the Universal PHY.
|
||||
'''
|
||||
"""
|
||||
|
||||
from core import emane
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
import sys
|
||||
import string
|
||||
try:
|
||||
from emanesh.events import EventService
|
||||
except:
|
||||
pass
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
from emane import Emane, EmaneModel
|
||||
except ImportError:
|
||||
logger.error("error importing emanesh")
|
||||
|
||||
|
||||
class EmaneUniversalModel(EmaneModel):
|
||||
''' This Univeral PHY model is meant to be imported by other models,
|
||||
not instantiated.
|
||||
'''
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
raise SyntaxError
|
||||
"""
|
||||
This Univeral PHY model is meant to be imported by other models,
|
||||
not instantiated.
|
||||
"""
|
||||
|
||||
_name = "emane_universal"
|
||||
def __init__(self, session, object_id=None):
|
||||
raise NotImplemented("Cannot use this class directly")
|
||||
|
||||
name = "emane_universal"
|
||||
_xmlname = "universalphy"
|
||||
_xmllibrary = "universalphylayer"
|
||||
|
||||
# universal PHY parameters
|
||||
_confmatrix_base = [
|
||||
("bandwidth", coreapi.CONF_DATA_TYPE_UINT64, '1M',
|
||||
'', 'rf bandwidth (hz)'),
|
||||
("frequency", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
|
||||
'','frequency (Hz)'),
|
||||
("frequencyofinterest", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
|
||||
'','frequency of interest (Hz)'),
|
||||
("subid", coreapi.CONF_DATA_TYPE_UINT16, '1',
|
||||
'','subid'),
|
||||
("systemnoisefigure", coreapi.CONF_DATA_TYPE_FLOAT, '4.0',
|
||||
'','system noise figure (dB)'),
|
||||
("txpower", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','transmit power (dBm)'),
|
||||
("bandwidth", ConfigDataTypes.UINT64.value, '1M',
|
||||
'', 'rf bandwidth (hz)'),
|
||||
("frequency", ConfigDataTypes.UINT64.value, '2.347G',
|
||||
'', 'frequency (Hz)'),
|
||||
("frequencyofinterest", ConfigDataTypes.UINT64.value, '2.347G',
|
||||
'', 'frequency of interest (Hz)'),
|
||||
("subid", ConfigDataTypes.UINT16.value, '1',
|
||||
'', 'subid'),
|
||||
("systemnoisefigure", ConfigDataTypes.FLOAT.value, '4.0',
|
||||
'', 'system noise figure (dB)'),
|
||||
("txpower", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'transmit power (dBm)'),
|
||||
]
|
||||
_confmatrix_081 = [
|
||||
("antennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna gain (dBi)'),
|
||||
("antennaazimuth", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna azimuth (deg)'),
|
||||
("antennaelevation", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna elevation (deg)'),
|
||||
("antennaprofileid", coreapi.CONF_DATA_TYPE_STRING, '1',
|
||||
'','antenna profile ID'),
|
||||
("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'','antenna profile manifest URI'),
|
||||
("antennaprofileenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off','antenna profile mode'),
|
||||
("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','default connectivity'),
|
||||
("frequencyofinterestfilterenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','frequency of interest filter enable'),
|
||||
("noiseprocessingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off','enable noise processing'),
|
||||
("pathlossmode", coreapi.CONF_DATA_TYPE_STRING, '2ray',
|
||||
'pathloss,2ray,freespace','path loss mode'),
|
||||
("antennagain", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'antenna gain (dBi)'),
|
||||
("antennaazimuth", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'antenna azimuth (deg)'),
|
||||
("antennaelevation", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'antenna elevation (deg)'),
|
||||
("antennaprofileid", ConfigDataTypes.STRING.value, '1',
|
||||
'', 'antenna profile ID'),
|
||||
("antennaprofilemanifesturi", ConfigDataTypes.STRING.value, '',
|
||||
'', 'antenna profile manifest URI'),
|
||||
("antennaprofileenable", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'antenna profile mode'),
|
||||
("defaultconnectivitymode", ConfigDataTypes.BOOL.value, '1',
|
||||
'On,Off', 'default connectivity'),
|
||||
("frequencyofinterestfilterenable", ConfigDataTypes.BOOL.value, '1',
|
||||
'On,Off', 'frequency of interest filter enable'),
|
||||
("noiseprocessingmode", ConfigDataTypes.BOOL.value, '0',
|
||||
'On,Off', 'enable noise processing'),
|
||||
("pathlossmode", ConfigDataTypes.STRING.value, '2ray',
|
||||
'pathloss,2ray,freespace', 'path loss mode'),
|
||||
]
|
||||
_confmatrix_091 = [
|
||||
("fixedantennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna gain (dBi)'),
|
||||
("fixedantennagainenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','enable fixed antenna gain'),
|
||||
("noisemode", coreapi.CONF_DATA_TYPE_STRING, 'none',
|
||||
'none,all,outofband','noise processing mode'),
|
||||
("noisebinsize", coreapi.CONF_DATA_TYPE_UINT64, '20',
|
||||
'','noise bin size in microseconds'),
|
||||
("propagationmodel", coreapi.CONF_DATA_TYPE_STRING, '2ray',
|
||||
'precomputed,2ray,freespace','path loss mode'),
|
||||
("fixedantennagain", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'antenna gain (dBi)'),
|
||||
("fixedantennagainenable", ConfigDataTypes.BOOL.value, '1',
|
||||
'On,Off', 'enable fixed antenna gain'),
|
||||
("noisemode", ConfigDataTypes.STRING.value, 'none',
|
||||
'none,all,outofband', 'noise processing mode'),
|
||||
("noisebinsize", ConfigDataTypes.UINT64.value, '20',
|
||||
'', 'noise bin size in microseconds'),
|
||||
("propagationmodel", ConfigDataTypes.STRING.value, '2ray',
|
||||
'precomputed,2ray,freespace', 'path loss mode'),
|
||||
]
|
||||
if Emane.version >= Emane.EMANE091:
|
||||
_confmatrix = _confmatrix_base + _confmatrix_091
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
config_matrix = _confmatrix_base + _confmatrix_091
|
||||
else:
|
||||
_confmatrix = _confmatrix_base + _confmatrix_081
|
||||
config_matrix = _confmatrix_base + _confmatrix_081
|
||||
|
||||
# old parameters
|
||||
_confmatrix_ver074 = [
|
||||
("antennaazimuthbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '360.0',
|
||||
'','azimith beam width (deg)'),
|
||||
("antennaelevationbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '180.0',
|
||||
'','elevation beam width (deg)'),
|
||||
("antennatype", coreapi.CONF_DATA_TYPE_STRING, 'omnidirectional',
|
||||
'omnidirectional,unidirectional','antenna type'),
|
||||
]
|
||||
("antennaazimuthbeamwidth", ConfigDataTypes.FLOAT.value, '360.0',
|
||||
'', 'azimith beam width (deg)'),
|
||||
("antennaelevationbeamwidth", ConfigDataTypes.FLOAT.value, '180.0',
|
||||
'', 'elevation beam width (deg)'),
|
||||
("antennatype", ConfigDataTypes.STRING.value, 'omnidirectional',
|
||||
'omnidirectional,unidirectional', 'antenna type'),
|
||||
]
|
||||
|
||||
# parameters that require unit conversion for 0.7.4
|
||||
_update_ver074 = ("bandwidth", "frequency", "frequencyofinterest")
|
||||
|
@ -102,16 +100,15 @@ class EmaneUniversalModel(EmaneModel):
|
|||
"antennaprofilemanifesturi",
|
||||
"frequencyofinterestfilterenable")
|
||||
|
||||
|
||||
@classmethod
|
||||
def getphydoc(cls, e, mac, values, phynames):
|
||||
phydoc = e.xmldoc("phy")
|
||||
phy = phydoc.getElementsByTagName("phy").pop()
|
||||
phy.setAttribute("name", cls._xmlname)
|
||||
if e.version < e.EMANE091:
|
||||
if emane.VERSION < emane.EMANE091:
|
||||
phy.setAttribute("library", cls._xmllibrary)
|
||||
# EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated
|
||||
if e.version == e.EMANE074:
|
||||
if emane.VERSION == emane.EMANE074:
|
||||
names = mac.getnames()
|
||||
values = list(values)
|
||||
phynames = list(phynames)
|
||||
|
@ -128,7 +125,7 @@ class EmaneUniversalModel(EmaneModel):
|
|||
phy.appendChild(e.xmlparam(phydoc, old[0], old[2]))
|
||||
|
||||
frequencies = None
|
||||
if e.version >= e.EMANE091:
|
||||
if emane.VERSION >= emane.EMANE091:
|
||||
name = "frequencyofinterest"
|
||||
value = mac.valueof(name, values)
|
||||
frequencies = cls.valuestrtoparamlist(phydoc, name, value)
|
||||
|
@ -137,10 +134,7 @@ class EmaneUniversalModel(EmaneModel):
|
|||
phynames.remove("frequencyofinterest")
|
||||
|
||||
# append all PHY options to phydoc
|
||||
map( lambda n: phy.appendChild(e.xmlparam(phydoc, n, \
|
||||
mac.valueof(n, values))), phynames)
|
||||
map(lambda n: phy.appendChild(e.xmlparam(phydoc, n, mac.valueof(n, values))), phynames)
|
||||
if frequencies:
|
||||
phy.appendChild(frequencies)
|
||||
return phydoc
|
||||
|
||||
|
||||
|
|
315
daemon/core/enumerations.py
Normal file
315
daemon/core/enumerations.py
Normal file
|
@ -0,0 +1,315 @@
|
|||
"""
|
||||
Contains all legacy enumerations for interacting with legacy CORE code.
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
CORE_API_VERSION = "1.23"
|
||||
CORE_API_PORT = 4038
|
||||
|
||||
|
||||
class MessageTypes(Enum):
|
||||
"""
|
||||
CORE message types.
|
||||
"""
|
||||
NODE = 0x01
|
||||
LINK = 0x02
|
||||
EXECUTE = 0x03
|
||||
REGISTER = 0x04
|
||||
CONFIG = 0x05
|
||||
FILE = 0x06
|
||||
INTERFACE = 0x07
|
||||
EVENT = 0x08
|
||||
SESSION = 0x09
|
||||
EXCEPTION = 0x0A
|
||||
|
||||
|
||||
class MessageFlags(Enum):
|
||||
"""
|
||||
CORE message flags.
|
||||
"""
|
||||
ADD = 0x01
|
||||
DELETE = 0x02
|
||||
CRI = 0x04
|
||||
LOCAL = 0x08
|
||||
STRING = 0x10
|
||||
TEXT = 0x20
|
||||
TTY = 0x40
|
||||
|
||||
|
||||
class NodeTlvs(Enum):
|
||||
"""
|
||||
Node type, length, value enumerations.
|
||||
"""
|
||||
NUMBER = 0x01
|
||||
TYPE = 0x02
|
||||
NAME = 0x03
|
||||
IP_ADDRESS = 0x04
|
||||
MAC_ADDRESS = 0x05
|
||||
IP6_ADDRESS = 0x06
|
||||
MODEL = 0x07
|
||||
EMULATION_SERVER = 0x08
|
||||
SESSION = 0x0A
|
||||
X_POSITION = 0x20
|
||||
Y_POSITION = 0x21
|
||||
CANVAS = 0x22
|
||||
EMULATION_ID = 0x23
|
||||
NETWORK_ID = 0x24
|
||||
SERVICES = 0x25
|
||||
LATITUDE = 0x30
|
||||
LONGITUDE = 0x31
|
||||
ALTITUDE = 0x32
|
||||
ICON = 0x42
|
||||
OPAQUE = 0x50
|
||||
|
||||
|
||||
class NodeTypes(Enum):
|
||||
"""
|
||||
Node types.
|
||||
"""
|
||||
DEFAULT = 0
|
||||
PHYSICAL = 1
|
||||
XEN = 2
|
||||
TBD = 3
|
||||
SWITCH = 4
|
||||
HUB = 5
|
||||
WIRELESS_LAN = 6
|
||||
RJ45 = 7
|
||||
TUNNEL = 8
|
||||
KTUNNEL = 9
|
||||
EMANE = 10
|
||||
TAP_BRIDGE = 11
|
||||
PEER_TO_PEER = 12
|
||||
CONTROL_NET = 13
|
||||
EMANE_NET = 14
|
||||
|
||||
|
||||
class Rj45Models(Enum):
|
||||
"""
|
||||
RJ45 model types.
|
||||
"""
|
||||
LINKED = 0
|
||||
WIRELESS = 1
|
||||
INSTALLED = 2
|
||||
|
||||
|
||||
# Link Message TLV Types
|
||||
class LinkTlvs(Enum):
|
||||
"""
|
||||
Link type, length, value enumerations.
|
||||
"""
|
||||
N1_NUMBER = 0x01
|
||||
N2_NUMBER = 0x02
|
||||
DELAY = 0x03
|
||||
BANDWIDTH = 0x04
|
||||
PER = 0x05
|
||||
DUP = 0x06
|
||||
JITTER = 0x07
|
||||
MER = 0x08
|
||||
BURST = 0x09
|
||||
SESSION = 0x0A
|
||||
MBURST = 0x10
|
||||
TYPE = 0x20
|
||||
GUI_ATTRIBUTES = 0x21
|
||||
UNIDIRECTIONAL = 0x22
|
||||
EMULATION_ID = 0x23
|
||||
NETWORK_ID = 0x24
|
||||
KEY = 0x25
|
||||
INTERFACE1_NUMBER = 0x30
|
||||
INTERFACE1_IP4 = 0x31
|
||||
INTERFACE1_IP4_MASK = 0x32
|
||||
INTERFACE1_MAC = 0x33
|
||||
INTERFACE1_IP6 = 0x34
|
||||
INTERFACE1_IP6_MASK = 0x35
|
||||
INTERFACE2_NUMBER = 0x36
|
||||
INTERFACE2_IP4 = 0x37
|
||||
INTERFACE2_IP4_MASK = 0x38
|
||||
INTERFACE2_MAC = 0x39
|
||||
INTERFACE2_IP6 = 0x40
|
||||
INTERFACE2_IP6_MASK = 0x41
|
||||
INTERFACE1_NAME = 0x42
|
||||
INTERFACE2_NAME = 0x43
|
||||
OPAQUE = 0x50
|
||||
|
||||
|
||||
class LinkTypes(Enum):
|
||||
"""
|
||||
Link types.
|
||||
"""
|
||||
WIRELESS = 0
|
||||
WIRED = 1
|
||||
|
||||
|
||||
class ExecuteTlvs(Enum):
|
||||
"""
|
||||
Execute type, length, value enumerations.
|
||||
"""
|
||||
NODE = 0x01
|
||||
NUMBER = 0x02
|
||||
TIME = 0x03
|
||||
COMMAND = 0x04
|
||||
RESULT = 0x05
|
||||
STATUS = 0x06
|
||||
SESSION = 0x0A
|
||||
|
||||
|
||||
class RegisterTlvs(Enum):
|
||||
"""
|
||||
Register type, length, value enumerations.
|
||||
"""
|
||||
WIRELESS = 0x01
|
||||
MOBILITY = 0x02
|
||||
UTILITY = 0x03
|
||||
EXECUTE_SERVER = 0x04
|
||||
GUI = 0x05
|
||||
EMULATION_SERVER = 0x06
|
||||
SESSION = 0x0A
|
||||
|
||||
|
||||
class ConfigTlvs(Enum):
|
||||
"""
|
||||
Configuration type, length, value enumerations.
|
||||
"""
|
||||
NODE = 0x01
|
||||
OBJECT = 0x02
|
||||
TYPE = 0x03
|
||||
DATA_TYPES = 0x04
|
||||
VALUES = 0x05
|
||||
CAPTIONS = 0x06
|
||||
BITMAP = 0x07
|
||||
POSSIBLE_VALUES = 0x08
|
||||
GROUPS = 0x09
|
||||
SESSION = 0x0A
|
||||
INTERFACE_NUMBER = 0x0B
|
||||
NETWORK_ID = 0x24
|
||||
OPAQUE = 0x50
|
||||
|
||||
|
||||
class ConfigFlags(Enum):
|
||||
"""
|
||||
Configuration flags.
|
||||
"""
|
||||
NONE = 0x00
|
||||
REQUEST = 0x01
|
||||
UPDATE = 0x02
|
||||
RESET = 0x03
|
||||
|
||||
|
||||
class ConfigDataTypes(Enum):
|
||||
"""
|
||||
Configuration data types.
|
||||
"""
|
||||
UINT8 = 0x01
|
||||
UINT16 = 0x02
|
||||
UINT32 = 0x03
|
||||
UINT64 = 0x04
|
||||
INT8 = 0x05
|
||||
INT16 = 0x06
|
||||
INT32 = 0x07
|
||||
INT64 = 0x08
|
||||
FLOAT = 0x09
|
||||
STRING = 0x0A
|
||||
BOOL = 0x0B
|
||||
|
||||
|
||||
class FileTlvs(Enum):
|
||||
"""
|
||||
File type, length, value enumerations.
|
||||
"""
|
||||
NODE = 0x01
|
||||
NAME = 0x02
|
||||
MODE = 0x03
|
||||
NUMBER = 0x04
|
||||
TYPE = 0x05
|
||||
SOURCE_NAME = 0x06
|
||||
SESSION = 0x0A
|
||||
DATA = 0x10
|
||||
COMPRESSED_DATA = 0x11
|
||||
|
||||
|
||||
class InterfaceTlvs(Enum):
|
||||
"""
|
||||
Interface type, length, value enumerations.
|
||||
"""
|
||||
NODE = 0x01
|
||||
NUMBER = 0x02
|
||||
NAME = 0x03
|
||||
IP_ADDRESS = 0x04
|
||||
MASK = 0x05
|
||||
MAC_ADDRESS = 0x06
|
||||
IP6_ADDRESS = 0x07
|
||||
IP6_MASK = 0x08
|
||||
TYPE = 0x09
|
||||
SESSION = 0x0A
|
||||
STATE = 0x0B
|
||||
EMULATION_ID = 0x23
|
||||
NETWORK_ID = 0x24
|
||||
|
||||
|
||||
class EventTlvs(Enum):
|
||||
"""
|
||||
Event type, length, value enumerations.
|
||||
"""
|
||||
NODE = 0x01
|
||||
TYPE = 0x02
|
||||
NAME = 0x03
|
||||
DATA = 0x04
|
||||
TIME = 0x05
|
||||
SESSION = 0x0A
|
||||
|
||||
|
||||
class EventTypes(Enum):
|
||||
"""
|
||||
Event types.
|
||||
"""
|
||||
NONE = 0
|
||||
DEFINITION_STATE = 1
|
||||
CONFIGURATION_STATE = 2
|
||||
INSTANTIATION_STATE = 3
|
||||
RUNTIME_STATE = 4
|
||||
DATACOLLECT_STATE = 5
|
||||
SHUTDOWN_STATE = 6
|
||||
START = 7
|
||||
STOP = 8
|
||||
PAUSE = 9
|
||||
RESTART = 10
|
||||
FILE_OPEN = 11
|
||||
FILE_SAVE = 12
|
||||
SCHEDULED = 13
|
||||
RECONFIGURE = 14
|
||||
|
||||
|
||||
# Session Message TLV Types
|
||||
class SessionTlvs(Enum):
|
||||
NUMBER = 0x01
|
||||
NAME = 0x02
|
||||
FILE = 0x03
|
||||
NODE_COUNT = 0x04
|
||||
DATE = 0x05
|
||||
THUMB = 0x06
|
||||
USER = 0x07
|
||||
OPAQUE = 0x0A
|
||||
|
||||
|
||||
class ExceptionTlvs(Enum):
|
||||
"""
|
||||
Exception type, length, value enumerations.
|
||||
"""
|
||||
NODE = 0x01
|
||||
SESSION = 0x02
|
||||
LEVEL = 0x03
|
||||
SOURCE = 0x04
|
||||
DATE = 0x05
|
||||
TEXT = 0x06
|
||||
OPAQUE = 0x0A
|
||||
|
||||
|
||||
class ExceptionLevels(Enum):
|
||||
"""
|
||||
Exception levels.
|
||||
"""
|
||||
NONE = 0
|
||||
FATAL = 1
|
||||
ERROR = 2
|
||||
WARNING = 3
|
||||
NOTICE = 4
|
|
@ -1,42 +1,47 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
location.py: definition of CoreLocation class that is a member of the
|
||||
Session object. Provides conversions between Cartesian and geographic coordinate
|
||||
systems. Depends on utm contributed module, from
|
||||
https://pypi.python.org/pypi/utm (version 0.3.0).
|
||||
'''
|
||||
"""
|
||||
|
||||
from core.conf import ConfigurableManager
|
||||
from core.api import coreapi
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import log
|
||||
from core.misc import utm
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class CoreLocation(ConfigurableManager):
|
||||
''' Member of session class for handling global location data. This keeps
|
||||
track of a latitude/longitude/altitude reference point and scale in
|
||||
order to convert between X,Y and geo coordinates.
|
||||
|
||||
TODO: this could be updated to use more generic
|
||||
Configurable/ConfigurableManager code like other Session objects
|
||||
'''
|
||||
_name = "location"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
"""
|
||||
Member of session class for handling global location data. This keeps
|
||||
track of a latitude/longitude/altitude reference point and scale in
|
||||
order to convert between X,Y and geo coordinates.
|
||||
|
||||
TODO: this could be updated to use more generic
|
||||
Configurable/ConfigurableManager code like other Session objects
|
||||
"""
|
||||
name = "location"
|
||||
config_type = RegisterTlvs.UTILITY.value
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a MobilityManager instance.
|
||||
|
||||
:param core.session.Session session: session this manager is tied to
|
||||
:return: nothing
|
||||
"""
|
||||
ConfigurableManager.__init__(self)
|
||||
self.reset()
|
||||
self.zonemap = {}
|
||||
for n, l in utm.ZONE_LETTERS:
|
||||
self.zonemap[l] = n
|
||||
|
||||
def reset(self):
|
||||
''' Reset to initial state.
|
||||
'''
|
||||
"""
|
||||
Reset to initial state.
|
||||
"""
|
||||
# (x, y, z) coordinates of the point given by self.refgeo
|
||||
self.refxyz = (0.0, 0.0, 0.0)
|
||||
# decimal latitude, longitude, and altitude at the point (x, y, z)
|
||||
|
@ -46,66 +51,76 @@ class CoreLocation(ConfigurableManager):
|
|||
# cached distance to refpt in other zones
|
||||
self.zoneshifts = {}
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message for setting the reference point
|
||||
and scale.
|
||||
'''
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Receive configuration message for setting the reference point
|
||||
and scale.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
values = config_data.data_values
|
||||
|
||||
if values is None:
|
||||
self.session.info("location data missing")
|
||||
logger.info("location data missing")
|
||||
return None
|
||||
values = values.split('|')
|
||||
|
||||
# Cartesian coordinate reference point
|
||||
refx,refy = map(lambda x: float(x), values[0:2])
|
||||
refx, refy = map(lambda x: float(x), values[0:2])
|
||||
refz = 0.0
|
||||
self.refxyz = (refx, refy, refz)
|
||||
# Geographic reference point
|
||||
lat,long,alt = map(lambda x: float(x), values[2:5])
|
||||
self.setrefgeo(lat, long, alt)
|
||||
lat, lon, alt = map(lambda x: float(x), values[2:5])
|
||||
self.setrefgeo(lat, lon, alt)
|
||||
self.refscale = float(values[5])
|
||||
self.session.info("location configured: (%.2f,%.2f,%.2f) = "
|
||||
"(%.5f,%.5f,%.5f) scale=%.2f" %
|
||||
(self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0],
|
||||
self.refgeo[1], self.refgeo[2], self.refscale))
|
||||
self.session.info("location configured: UTM(%.5f,%.5f,%.5f)" %
|
||||
(self.refutm[1], self.refutm[2], self.refutm[3]))
|
||||
logger.info("location configured: (%.2f,%.2f,%.2f) = (%.5f,%.5f,%.5f) scale=%.2f" %
|
||||
(self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0],
|
||||
self.refgeo[1], self.refgeo[2], self.refscale))
|
||||
logger.info("location configured: UTM(%.5f,%.5f,%.5f)" %
|
||||
(self.refutm[1], self.refutm[2], self.refutm[3]))
|
||||
|
||||
def px2m(self, val):
|
||||
''' Convert the specified value in pixels to meters using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
'''
|
||||
"""
|
||||
Convert the specified value in pixels to meters using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
"""
|
||||
return (val / 100.0) * self.refscale
|
||||
|
||||
def m2px(self, val):
|
||||
''' Convert the specified value in meters to pixels using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
'''
|
||||
"""
|
||||
Convert the specified value in meters to pixels using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
"""
|
||||
if self.refscale == 0.0:
|
||||
return 0.0
|
||||
return 100.0 * (val / self.refscale)
|
||||
|
||||
def setrefgeo(self, lat, lon, alt):
|
||||
''' Record the geographical reference point decimal (lat, lon, alt)
|
||||
and convert and store its UTM equivalent for later use.
|
||||
'''
|
||||
"""
|
||||
Record the geographical reference point decimal (lat, lon, alt)
|
||||
and convert and store its UTM equivalent for later use.
|
||||
"""
|
||||
self.refgeo = (lat, lon, alt)
|
||||
# easting, northing, zone
|
||||
(e, n, zonen, zonel) = utm.from_latlon(lat, lon)
|
||||
self.refutm = ( (zonen, zonel), e, n, alt)
|
||||
self.refutm = ((zonen, zonel), e, n, alt)
|
||||
|
||||
def getgeo(self, x, y, z):
|
||||
''' Given (x, y, z) Cartesian coordinates, convert them to latitude,
|
||||
longitude, and altitude based on the configured reference point
|
||||
and scale.
|
||||
'''
|
||||
"""
|
||||
Given (x, y, z) Cartesian coordinates, convert them to latitude,
|
||||
longitude, and altitude based on the configured reference point
|
||||
and scale.
|
||||
"""
|
||||
# shift (x,y,z) over to reference point (x,y,z)
|
||||
x = x - self.refxyz[0]
|
||||
y = -(y - self.refxyz[1])
|
||||
if z is None:
|
||||
z = self.refxyz[2]
|
||||
else:
|
||||
z = z - self.refxyz[2]
|
||||
z -= self.refxyz[2]
|
||||
# use UTM coordinates since unit is meters
|
||||
zone = self.refutm[0]
|
||||
if zone == "":
|
||||
|
@ -117,23 +132,23 @@ class CoreLocation(ConfigurableManager):
|
|||
try:
|
||||
lat, lon = utm.to_latlon(e, n, zone[0], zone[1])
|
||||
except utm.OutOfRangeError:
|
||||
self.info("UTM out of range error for e=%s n=%s zone=%s" \
|
||||
"xyz=(%s,%s,%s)" % (e, n, zone, x, y, z))
|
||||
logger.exception("UTM out of range error for n=%s zone=%s xyz=(%s,%s,%s)", n, zone, x, y, z)
|
||||
(lat, lon) = self.refgeo[:2]
|
||||
#self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \
|
||||
# self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \
|
||||
# "%.3f,%.3f,%.3f" % (x, y, z, e, n, zone, lat, lon, alt))
|
||||
return (lat, lon, alt)
|
||||
return lat, lon, alt
|
||||
|
||||
def getxyz(self, lat, lon, alt):
|
||||
''' Given latitude, longitude, and altitude location data, convert them
|
||||
to (x, y, z) Cartesian coordinates based on the configured
|
||||
reference point and scale. Lat/lon is converted to UTM meter
|
||||
coordinates, UTM zones are accounted for, and the scale turns
|
||||
meters to pixels.
|
||||
'''
|
||||
"""
|
||||
Given latitude, longitude, and altitude location data, convert them
|
||||
to (x, y, z) Cartesian coordinates based on the configured
|
||||
reference point and scale. Lat/lon is converted to UTM meter
|
||||
coordinates, UTM zones are accounted for, and the scale turns
|
||||
meters to pixels.
|
||||
"""
|
||||
# convert lat/lon to UTM coordinates in meters
|
||||
(e, n, zonen, zonel) = utm.from_latlon(lat, lon)
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
e, n, zonen, zonel = utm.from_latlon(lat, lon)
|
||||
rlat, rlon, ralt = self.refgeo
|
||||
xshift = self.geteastingshift(zonen, zonel)
|
||||
if xshift is None:
|
||||
xm = e - self.refutm[1]
|
||||
|
@ -145,31 +160,32 @@ class CoreLocation(ConfigurableManager):
|
|||
else:
|
||||
ym = n + yshift
|
||||
zm = alt - ralt
|
||||
|
||||
|
||||
# shift (x,y,z) over to reference point (x,y,z)
|
||||
x = self.m2px(xm) + self.refxyz[0]
|
||||
y = -(self.m2px(ym) + self.refxyz[1])
|
||||
z = self.m2px(zm) + self.refxyz[2]
|
||||
return (x, y, z)
|
||||
return x, y, z
|
||||
|
||||
def geteastingshift(self, zonen, zonel):
|
||||
''' If the lat, lon coordinates being converted are located in a
|
||||
"""
|
||||
If the lat, lon coordinates being converted are located in a
|
||||
different UTM zone than the canvas reference point, the UTM meters
|
||||
may need to be shifted.
|
||||
This picks a reference point in the same longitudinal band
|
||||
This picks a reference point in the same longitudinal band
|
||||
(UTM zone number) as the provided zone, to calculate the shift in
|
||||
meters for the x coordinate.
|
||||
'''
|
||||
"""
|
||||
rzonen = int(self.refutm[0][0])
|
||||
if zonen == rzonen:
|
||||
return None # same zone number, no x shift required
|
||||
return None # same zone number, no x shift required
|
||||
z = (zonen, zonel)
|
||||
if z in self.zoneshifts and self.zoneshifts[z][0] is not None:
|
||||
return self.zoneshifts[z][0] # x shift already calculated, cached
|
||||
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
lon2 = rlon + 6*(zonen - rzonen) # ea. zone is 6deg band
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, lon2) # ignore northing
|
||||
return self.zoneshifts[z][0] # x shift already calculated, cached
|
||||
|
||||
rlat, rlon, ralt = self.refgeo
|
||||
lon2 = rlon + 6 * (zonen - rzonen) # ea. zone is 6deg band
|
||||
e2, n2, zonen2, zonel2 = utm.from_latlon(rlat, lon2) # ignore northing
|
||||
# NOTE: great circle distance used here, not reference ellipsoid!
|
||||
xshift = utm.haversine(rlon, rlat, lon2, rlat) - e2
|
||||
# cache the return value
|
||||
|
@ -178,27 +194,28 @@ class CoreLocation(ConfigurableManager):
|
|||
yshift = self.zoneshifts[z][1]
|
||||
self.zoneshifts[z] = (xshift, yshift)
|
||||
return xshift
|
||||
|
||||
|
||||
def getnorthingshift(self, zonen, zonel):
|
||||
''' If the lat, lon coordinates being converted are located in a
|
||||
"""
|
||||
If the lat, lon coordinates being converted are located in a
|
||||
different UTM zone than the canvas reference point, the UTM meters
|
||||
may need to be shifted.
|
||||
This picks a reference point in the same latitude band (UTM zone letter)
|
||||
as the provided zone, to calculate the shift in meters for the
|
||||
y coordinate.
|
||||
'''
|
||||
"""
|
||||
rzonel = self.refutm[0][1]
|
||||
if zonel == rzonel:
|
||||
return None # same zone letter, no y shift required
|
||||
return None # same zone letter, no y shift required
|
||||
z = (zonen, zonel)
|
||||
if z in self.zoneshifts and self.zoneshifts[z][1] is not None:
|
||||
return self.zoneshifts[z][1] # y shift already calculated, cached
|
||||
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
return self.zoneshifts[z][1] # y shift already calculated, cached
|
||||
|
||||
rlat, rlon, ralt = self.refgeo
|
||||
# zonemap is used to calculate degrees difference between zone letters
|
||||
latshift = self.zonemap[zonel] - self.zonemap[rzonel]
|
||||
lat2 = rlat + latshift # ea. latitude band is 8deg high
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(lat2, rlon)
|
||||
lat2 = rlat + latshift # ea. latitude band is 8deg high
|
||||
e2, n2, zonen2, zonel2 = utm.from_latlon(lat2, rlon)
|
||||
# NOTE: great circle distance used here, not reference ellipsoid
|
||||
yshift = -(utm.haversine(rlon, rlat, rlon, lat2) + n2)
|
||||
# cache the return value
|
||||
|
@ -209,26 +226,27 @@ class CoreLocation(ConfigurableManager):
|
|||
return yshift
|
||||
|
||||
def getutmzoneshift(self, e, n):
|
||||
''' Given UTM easting and northing values, check if they fall outside
|
||||
"""
|
||||
Given UTM easting and northing values, check if they fall outside
|
||||
the reference point's zone boundary. Return the UTM coordinates in a
|
||||
different zone and the new zone if they do. Zone lettering is only
|
||||
changed when the reference point is in the opposite hemisphere.
|
||||
'''
|
||||
"""
|
||||
zone = self.refutm[0]
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
rlat, rlon, ralt = self.refgeo
|
||||
if e > 834000 or e < 166000:
|
||||
num_zones = (int(e) - 166000) / (utm.R/10)
|
||||
num_zones = (int(e) - 166000) / (utm.R / 10)
|
||||
# estimate number of zones to shift, E (positive) or W (negative)
|
||||
rlon2 = self.refgeo[1] + (num_zones * 6)
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2)
|
||||
e2, n2, zonen2, zonel2 = utm.from_latlon(rlat, rlon2)
|
||||
xshift = utm.haversine(rlon, rlat, rlon2, rlat)
|
||||
# after >3 zones away from refpt, the above estimate won't work
|
||||
# (the above estimate could be improved)
|
||||
if not 100000 <= (e - xshift) < 1000000:
|
||||
# move one more zone away
|
||||
num_zones = (abs(num_zones)+1) * (abs(num_zones)/num_zones)
|
||||
num_zones = (abs(num_zones) + 1) * (abs(num_zones) / num_zones)
|
||||
rlon2 = self.refgeo[1] + (num_zones * 6)
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2)
|
||||
e2, n2, zonen2, zonel2 = utm.from_latlon(rlat, rlon2)
|
||||
xshift = utm.haversine(rlon, rlat, rlon2, rlat)
|
||||
e = e - xshift
|
||||
zone = (zonen2, zonel2)
|
||||
|
@ -240,7 +258,4 @@ class CoreLocation(ConfigurableManager):
|
|||
# refpt in southern hemisphere and we crossed north of equator
|
||||
n -= 10000000
|
||||
zone = (zone[0], 'N')
|
||||
return (e, n, zone)
|
||||
|
||||
|
||||
|
||||
return e, n, zone
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
|
||||
from math import pi, sin, cos, tan, sqrt
|
||||
|
||||
#LatLong- UTM conversion..h
|
||||
#definitions for lat/long to UTM and UTM to lat/lng conversions
|
||||
#include <string.h>
|
||||
# LatLong- UTM conversion..h
|
||||
# definitions for lat/long to UTM and UTM to lat/lng conversions
|
||||
# include <string.h>
|
||||
|
||||
_deg2rad = pi / 180.0
|
||||
_rad2deg = 180.0 / pi
|
||||
|
@ -16,48 +16,49 @@ _EquatorialRadius = 2
|
|||
_eccentricitySquared = 3
|
||||
|
||||
_ellipsoid = [
|
||||
# id, Ellipsoid name, Equatorial Radius, square of eccentricity
|
||||
# first once is a placeholder only, To allow array indices to match id numbers
|
||||
[ -1, "Placeholder", 0, 0],
|
||||
[ 1, "Airy", 6377563, 0.00667054],
|
||||
[ 2, "Australian National", 6378160, 0.006694542],
|
||||
[ 3, "Bessel 1841", 6377397, 0.006674372],
|
||||
[ 4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372],
|
||||
[ 5, "Clarke 1866", 6378206, 0.006768658],
|
||||
[ 6, "Clarke 1880", 6378249, 0.006803511],
|
||||
[ 7, "Everest", 6377276, 0.006637847],
|
||||
[ 8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422],
|
||||
[ 9, "Fischer 1968", 6378150, 0.006693422],
|
||||
[ 10, "GRS 1967", 6378160, 0.006694605],
|
||||
[ 11, "GRS 1980", 6378137, 0.00669438],
|
||||
[ 12, "Helmert 1906", 6378200, 0.006693422],
|
||||
[ 13, "Hough", 6378270, 0.00672267],
|
||||
[ 14, "International", 6378388, 0.00672267],
|
||||
[ 15, "Krassovsky", 6378245, 0.006693422],
|
||||
[ 16, "Modified Airy", 6377340, 0.00667054],
|
||||
[ 17, "Modified Everest", 6377304, 0.006637847],
|
||||
[ 18, "Modified Fischer 1960", 6378155, 0.006693422],
|
||||
[ 19, "South American 1969", 6378160, 0.006694542],
|
||||
[ 20, "WGS 60", 6378165, 0.006693422],
|
||||
[ 21, "WGS 66", 6378145, 0.006694542],
|
||||
[ 22, "WGS-72", 6378135, 0.006694318],
|
||||
[ 23, "WGS-84", 6378137, 0.00669438]
|
||||
# id, Ellipsoid name, Equatorial Radius, square of eccentricity
|
||||
# first once is a placeholder only, To allow array indices to match id numbers
|
||||
[-1, "Placeholder", 0, 0],
|
||||
[1, "Airy", 6377563, 0.00667054],
|
||||
[2, "Australian National", 6378160, 0.006694542],
|
||||
[3, "Bessel 1841", 6377397, 0.006674372],
|
||||
[4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372],
|
||||
[5, "Clarke 1866", 6378206, 0.006768658],
|
||||
[6, "Clarke 1880", 6378249, 0.006803511],
|
||||
[7, "Everest", 6377276, 0.006637847],
|
||||
[8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422],
|
||||
[9, "Fischer 1968", 6378150, 0.006693422],
|
||||
[10, "GRS 1967", 6378160, 0.006694605],
|
||||
[11, "GRS 1980", 6378137, 0.00669438],
|
||||
[12, "Helmert 1906", 6378200, 0.006693422],
|
||||
[13, "Hough", 6378270, 0.00672267],
|
||||
[14, "International", 6378388, 0.00672267],
|
||||
[15, "Krassovsky", 6378245, 0.006693422],
|
||||
[16, "Modified Airy", 6377340, 0.00667054],
|
||||
[17, "Modified Everest", 6377304, 0.006637847],
|
||||
[18, "Modified Fischer 1960", 6378155, 0.006693422],
|
||||
[19, "South American 1969", 6378160, 0.006694542],
|
||||
[20, "WGS 60", 6378165, 0.006693422],
|
||||
[21, "WGS 66", 6378145, 0.006694542],
|
||||
[22, "WGS-72", 6378135, 0.006694318],
|
||||
[23, "WGS-84", 6378137, 0.00669438]
|
||||
]
|
||||
|
||||
#Reference ellipsoids derived from Peter H. Dana's website-
|
||||
#http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html
|
||||
#Department of Geography, University of Texas at Austin
|
||||
#Internet: pdana@mail.utexas.edu
|
||||
#3/22/95
|
||||
|
||||
#Source
|
||||
#Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System
|
||||
#1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency
|
||||
# Reference ellipsoids derived from Peter H. Dana's website-
|
||||
# http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html
|
||||
# Department of Geography, University of Texas at Austin
|
||||
# Internet: pdana@mail.utexas.edu
|
||||
# 3/22/95
|
||||
|
||||
#def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long,
|
||||
# Source
|
||||
# Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System
|
||||
# 1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency
|
||||
|
||||
# def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long,
|
||||
# double &UTMNorthing, double &UTMEasting, char* UTMZone)
|
||||
|
||||
def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None):
|
||||
def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone=None):
|
||||
"""converts lat/long to UTM coords. Equations from USGS Bulletin 1532
|
||||
East Longitudes are positive, West longitudes are negative.
|
||||
North latitudes are positive, South latitudes are negative
|
||||
|
@ -68,14 +69,14 @@ def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None):
|
|||
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
|
||||
k0 = 0.9996
|
||||
|
||||
#Make sure the longitude is between -180.00 .. 179.9
|
||||
LongTemp = (Long+180)-int((Long+180)/360)*360-180 # -180.00 .. 179.9
|
||||
# Make sure the longitude is between -180.00 .. 179.9
|
||||
LongTemp = (Long + 180) - int((Long + 180) / 360) * 360 - 180 # -180.00 .. 179.9
|
||||
|
||||
LatRad = Lat*_deg2rad
|
||||
LongRad = LongTemp*_deg2rad
|
||||
LatRad = Lat * _deg2rad
|
||||
LongRad = LongTemp * _deg2rad
|
||||
|
||||
if zone is None:
|
||||
ZoneNumber = int((LongTemp + 180)/6) + 1
|
||||
ZoneNumber = int((LongTemp + 180) / 6) + 1
|
||||
else:
|
||||
ZoneNumber = zone
|
||||
|
||||
|
@ -84,46 +85,50 @@ def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None):
|
|||
|
||||
# Special zones for Svalbard
|
||||
if Lat >= 72.0 and Lat < 84.0:
|
||||
if LongTemp >= 0.0 and LongTemp < 9.0:ZoneNumber = 31
|
||||
elif LongTemp >= 9.0 and LongTemp < 21.0: ZoneNumber = 33
|
||||
elif LongTemp >= 21.0 and LongTemp < 33.0: ZoneNumber = 35
|
||||
elif LongTemp >= 33.0 and LongTemp < 42.0: ZoneNumber = 37
|
||||
if LongTemp >= 0.0 and LongTemp < 9.0:
|
||||
ZoneNumber = 31
|
||||
elif LongTemp >= 9.0 and LongTemp < 21.0:
|
||||
ZoneNumber = 33
|
||||
elif LongTemp >= 21.0 and LongTemp < 33.0:
|
||||
ZoneNumber = 35
|
||||
elif LongTemp >= 33.0 and LongTemp < 42.0:
|
||||
ZoneNumber = 37
|
||||
|
||||
LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 #+3 puts origin in middle of zone
|
||||
LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone
|
||||
LongOriginRad = LongOrigin * _deg2rad
|
||||
|
||||
#compute the UTM Zone from the latitude and longitude
|
||||
# compute the UTM Zone from the latitude and longitude
|
||||
UTMZone = "%d%c" % (ZoneNumber, _UTMLetterDesignator(Lat))
|
||||
|
||||
eccPrimeSquared = (eccSquared)/(1-eccSquared)
|
||||
N = a/sqrt(1-eccSquared*sin(LatRad)*sin(LatRad))
|
||||
T = tan(LatRad)*tan(LatRad)
|
||||
C = eccPrimeSquared*cos(LatRad)*cos(LatRad)
|
||||
A = cos(LatRad)*(LongRad-LongOriginRad)
|
||||
eccPrimeSquared = (eccSquared) / (1 - eccSquared)
|
||||
N = a / sqrt(1 - eccSquared * sin(LatRad) * sin(LatRad))
|
||||
T = tan(LatRad) * tan(LatRad)
|
||||
C = eccPrimeSquared * cos(LatRad) * cos(LatRad)
|
||||
A = cos(LatRad) * (LongRad - LongOriginRad)
|
||||
|
||||
M = a*((1
|
||||
- eccSquared/4
|
||||
- 3*eccSquared*eccSquared/64
|
||||
- 5*eccSquared*eccSquared*eccSquared/256)*LatRad
|
||||
- (3*eccSquared/8
|
||||
+ 3*eccSquared*eccSquared/32
|
||||
+ 45*eccSquared*eccSquared*eccSquared/1024)*sin(2*LatRad)
|
||||
+ (15*eccSquared*eccSquared/256 + 45*eccSquared*eccSquared*eccSquared/1024)*sin(4*LatRad)
|
||||
- (35*eccSquared*eccSquared*eccSquared/3072)*sin(6*LatRad))
|
||||
M = a * ((1
|
||||
- eccSquared / 4
|
||||
- 3 * eccSquared * eccSquared / 64
|
||||
- 5 * eccSquared * eccSquared * eccSquared / 256) * LatRad
|
||||
- (3 * eccSquared / 8
|
||||
+ 3 * eccSquared * eccSquared / 32
|
||||
+ 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(2 * LatRad)
|
||||
+ (15 * eccSquared * eccSquared / 256 + 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(4 * LatRad)
|
||||
- (35 * eccSquared * eccSquared * eccSquared / 3072) * sin(6 * LatRad))
|
||||
|
||||
UTMEasting = (k0*N*(A+(1-T+C)*A*A*A/6
|
||||
+ (5-18*T+T*T+72*C-58*eccPrimeSquared)*A*A*A*A*A/120)
|
||||
UTMEasting = (k0 * N * (A + (1 - T + C) * A * A * A / 6
|
||||
+ (5 - 18 * T + T * T + 72 * C - 58 * eccPrimeSquared) * A * A * A * A * A / 120)
|
||||
+ 500000.0)
|
||||
|
||||
UTMNorthing = (k0*(M+N*tan(LatRad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24
|
||||
+ (61
|
||||
-58*T
|
||||
+T*T
|
||||
+600*C
|
||||
-330*eccPrimeSquared)*A*A*A*A*A*A/720)))
|
||||
UTMNorthing = (k0 * (M + N * tan(LatRad) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24
|
||||
+ (61
|
||||
- 58 * T
|
||||
+ T * T
|
||||
+ 600 * C
|
||||
- 330 * eccPrimeSquared) * A * A * A * A * A * A / 720)))
|
||||
|
||||
if Lat < 0:
|
||||
UTMNorthing = UTMNorthing + 10000000.0; #10000000 meter offset for southern hemisphere
|
||||
UTMNorthing = UTMNorthing + 10000000.0; # 10000000 meter offset for southern hemisphere
|
||||
return (UTMZone, UTMEasting, UTMNorthing)
|
||||
|
||||
|
||||
|
@ -132,29 +137,51 @@ def _UTMLetterDesignator(Lat):
|
|||
latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
|
||||
|
||||
if 84 >= Lat >= 72: return 'X'
|
||||
elif 72 > Lat >= 64: return 'W'
|
||||
elif 64 > Lat >= 56: return 'V'
|
||||
elif 56 > Lat >= 48: return 'U'
|
||||
elif 48 > Lat >= 40: return 'T'
|
||||
elif 40 > Lat >= 32: return 'S'
|
||||
elif 32 > Lat >= 24: return 'R'
|
||||
elif 24 > Lat >= 16: return 'Q'
|
||||
elif 16 > Lat >= 8: return 'P'
|
||||
elif 8 > Lat >= 0: return 'N'
|
||||
elif 0 > Lat >= -8: return 'M'
|
||||
elif -8> Lat >= -16: return 'L'
|
||||
elif -16 > Lat >= -24: return 'K'
|
||||
elif -24 > Lat >= -32: return 'J'
|
||||
elif -32 > Lat >= -40: return 'H'
|
||||
elif -40 > Lat >= -48: return 'G'
|
||||
elif -48 > Lat >= -56: return 'F'
|
||||
elif -56 > Lat >= -64: return 'E'
|
||||
elif -64 > Lat >= -72: return 'D'
|
||||
elif -72 > Lat >= -80: return 'C'
|
||||
else: return 'Z' # if the Latitude is outside the UTM limits
|
||||
if 84 >= Lat >= 72:
|
||||
return 'X'
|
||||
elif 72 > Lat >= 64:
|
||||
return 'W'
|
||||
elif 64 > Lat >= 56:
|
||||
return 'V'
|
||||
elif 56 > Lat >= 48:
|
||||
return 'U'
|
||||
elif 48 > Lat >= 40:
|
||||
return 'T'
|
||||
elif 40 > Lat >= 32:
|
||||
return 'S'
|
||||
elif 32 > Lat >= 24:
|
||||
return 'R'
|
||||
elif 24 > Lat >= 16:
|
||||
return 'Q'
|
||||
elif 16 > Lat >= 8:
|
||||
return 'P'
|
||||
elif 8 > Lat >= 0:
|
||||
return 'N'
|
||||
elif 0 > Lat >= -8:
|
||||
return 'M'
|
||||
elif -8 > Lat >= -16:
|
||||
return 'L'
|
||||
elif -16 > Lat >= -24:
|
||||
return 'K'
|
||||
elif -24 > Lat >= -32:
|
||||
return 'J'
|
||||
elif -32 > Lat >= -40:
|
||||
return 'H'
|
||||
elif -40 > Lat >= -48:
|
||||
return 'G'
|
||||
elif -48 > Lat >= -56:
|
||||
return 'F'
|
||||
elif -56 > Lat >= -64:
|
||||
return 'E'
|
||||
elif -64 > Lat >= -72:
|
||||
return 'D'
|
||||
elif -72 > Lat >= -80:
|
||||
return 'C'
|
||||
else:
|
||||
return 'Z' # if the Latitude is outside the UTM limits
|
||||
|
||||
#void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone,
|
||||
|
||||
# void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone,
|
||||
# double& Lat, double& Long )
|
||||
|
||||
def UTMtoLL(ReferenceEllipsoid, northing, easting, zone):
|
||||
|
@ -168,10 +195,10 @@ Converted to Python by Russ Nelson <nelson@crynwr.com>"""
|
|||
k0 = 0.9996
|
||||
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
|
||||
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
|
||||
e1 = (1-sqrt(1-eccSquared))/(1+sqrt(1-eccSquared))
|
||||
#NorthernHemisphere; //1 for northern hemispher, 0 for southern
|
||||
e1 = (1 - sqrt(1 - eccSquared)) / (1 + sqrt(1 - eccSquared))
|
||||
# NorthernHemisphere; //1 for northern hemispher, 0 for southern
|
||||
|
||||
x = easting - 500000.0 #remove 500,000 meter offset for longitude
|
||||
x = easting - 500000.0 # remove 500,000 meter offset for longitude
|
||||
y = northing
|
||||
|
||||
ZoneLetter = zone[-1]
|
||||
|
@ -180,37 +207,40 @@ Converted to Python by Russ Nelson <nelson@crynwr.com>"""
|
|||
NorthernHemisphere = 1 # point is in northern hemisphere
|
||||
else:
|
||||
NorthernHemisphere = 0 # point is in southern hemisphere
|
||||
y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere
|
||||
y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere
|
||||
|
||||
LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 # +3 puts origin in middle of zone
|
||||
LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone
|
||||
|
||||
eccPrimeSquared = (eccSquared)/(1-eccSquared)
|
||||
eccPrimeSquared = (eccSquared) / (1 - eccSquared)
|
||||
|
||||
M = y / k0
|
||||
mu = M/(a*(1-eccSquared/4-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256))
|
||||
mu = M / (
|
||||
a * (1 - eccSquared / 4 - 3 * eccSquared * eccSquared / 64 - 5 * eccSquared * eccSquared * eccSquared / 256))
|
||||
|
||||
phi1Rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu)
|
||||
+ (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu)
|
||||
+(151*e1*e1*e1/96)*sin(6*mu))
|
||||
phi1 = phi1Rad*_rad2deg;
|
||||
phi1Rad = (mu + (3 * e1 / 2 - 27 * e1 * e1 * e1 / 32) * sin(2 * mu)
|
||||
+ (21 * e1 * e1 / 16 - 55 * e1 * e1 * e1 * e1 / 32) * sin(4 * mu)
|
||||
+ (151 * e1 * e1 * e1 / 96) * sin(6 * mu))
|
||||
phi1 = phi1Rad * _rad2deg;
|
||||
|
||||
N1 = a/sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad))
|
||||
T1 = tan(phi1Rad)*tan(phi1Rad)
|
||||
C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad)
|
||||
R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5)
|
||||
D = x/(N1*k0)
|
||||
N1 = a / sqrt(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad))
|
||||
T1 = tan(phi1Rad) * tan(phi1Rad)
|
||||
C1 = eccPrimeSquared * cos(phi1Rad) * cos(phi1Rad)
|
||||
R1 = a * (1 - eccSquared) / pow(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad), 1.5)
|
||||
D = x / (N1 * k0)
|
||||
|
||||
Lat = phi1Rad - (N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24
|
||||
+(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720)
|
||||
Lat = phi1Rad - (N1 * tan(phi1Rad) / R1) * (
|
||||
D * D / 2 - (5 + 3 * T1 + 10 * C1 - 4 * C1 * C1 - 9 * eccPrimeSquared) * D * D * D * D / 24
|
||||
+ (61 + 90 * T1 + 298 * C1 + 45 * T1 * T1 - 252 * eccPrimeSquared - 3 * C1 * C1) * D * D * D * D * D * D / 720)
|
||||
Lat = Lat * _rad2deg
|
||||
|
||||
Long = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1)
|
||||
*D*D*D*D*D/120)/cos(phi1Rad)
|
||||
Long = (D - (1 + 2 * T1 + C1) * D * D * D / 6 + (
|
||||
5 - 2 * C1 + 28 * T1 - 3 * C1 * C1 + 8 * eccPrimeSquared + 24 * T1 * T1)
|
||||
* D * D * D * D * D / 120) / cos(phi1Rad)
|
||||
Long = LongOrigin + Long * _rad2deg
|
||||
return (Lat, Long)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
(z, e, n) = LLtoUTM(23, 45.00, -75.00)
|
||||
print z, e, n
|
||||
print UTMtoLL(23, n, e, z)
|
||||
|
||||
|
|
|
@ -1,75 +1,76 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
event.py: event loop implementation using a heap queue and threads.
|
||||
'''
|
||||
import time
|
||||
import threading
|
||||
"""
|
||||
|
||||
import heapq
|
||||
import threading
|
||||
import time
|
||||
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class Timer(threading.Thread):
|
||||
"""
|
||||
Based on threading.Timer but cancel() returns if the timer was
|
||||
already running.
|
||||
"""
|
||||
|
||||
def __init__(self, interval, function, args=[], kwargs={}):
|
||||
super(Timer, self).__init__()
|
||||
self.interval = interval
|
||||
self.function = function
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.finished = threading.Event()
|
||||
self._running = threading.Lock()
|
||||
|
||||
def cancel(self):
|
||||
"""
|
||||
Stop the timer if it hasn't finished yet. Return False if
|
||||
the timer was already running.
|
||||
"""
|
||||
locked = self._running.acquire(False)
|
||||
if locked:
|
||||
self.finished.set()
|
||||
self._running.release()
|
||||
return locked
|
||||
|
||||
def run(self):
|
||||
self.finished.wait(self.interval)
|
||||
with self._running:
|
||||
if not self.finished.is_set():
|
||||
self.function(*self.args, **self.kwargs)
|
||||
self.finished.set()
|
||||
|
||||
|
||||
class Event(object):
|
||||
def __init__(self, eventnum, event_time, func, *args, **kwds):
|
||||
self.eventnum = eventnum
|
||||
self.time = event_time
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.kwds = kwds
|
||||
self.canceled = False
|
||||
|
||||
def __cmp__(self, other):
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.eventnum, other.eventnum)
|
||||
return tmp
|
||||
|
||||
def run(self):
|
||||
if self.canceled:
|
||||
return
|
||||
self.func(*self.args, **self.kwds)
|
||||
|
||||
def cancel(self):
|
||||
# XXX not thread-safe
|
||||
self.canceled = True
|
||||
|
||||
|
||||
class EventLoop(object):
|
||||
|
||||
class Timer(threading.Thread):
|
||||
'''\
|
||||
Based on threading.Timer but cancel() returns if the timer was
|
||||
already running.
|
||||
'''
|
||||
|
||||
def __init__(self, interval, function, args=[], kwargs={}):
|
||||
super(EventLoop.Timer, self).__init__()
|
||||
self.interval = interval
|
||||
self.function = function
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.finished = threading.Event()
|
||||
self._running = threading.Lock()
|
||||
|
||||
def cancel(self):
|
||||
'''\
|
||||
Stop the timer if it hasn't finished yet. Return False if
|
||||
the timer was already running.
|
||||
'''
|
||||
locked = self._running.acquire(False)
|
||||
if locked:
|
||||
self.finished.set()
|
||||
self._running.release()
|
||||
return locked
|
||||
|
||||
def run(self):
|
||||
self.finished.wait(self.interval)
|
||||
with self._running:
|
||||
if not self.finished.is_set():
|
||||
self.function(*self.args, **self.kwargs)
|
||||
self.finished.set()
|
||||
|
||||
class Event(object):
|
||||
def __init__(self, eventnum, time, func, *args, **kwds):
|
||||
self.eventnum = eventnum
|
||||
self.time = time
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.kwds = kwds
|
||||
self.canceled = False
|
||||
|
||||
def __cmp__(self, other):
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.eventnum, other.eventnum)
|
||||
return tmp
|
||||
|
||||
def run(self):
|
||||
if self.canceled:
|
||||
return
|
||||
self.func(*self.args, **self.kwds)
|
||||
|
||||
def cancel(self):
|
||||
self.canceled = True # XXX not thread-safe
|
||||
|
||||
def __init__(self):
|
||||
self.lock = threading.RLock()
|
||||
self.queue = []
|
||||
|
@ -103,7 +104,7 @@ class EventLoop(object):
|
|||
return
|
||||
delay = self.queue[0].time - time.time()
|
||||
assert self.timer is None
|
||||
self.timer = EventLoop.Timer(delay, self.__run_events)
|
||||
self.timer = Timer(delay, self.__run_events)
|
||||
self.timer.daemon = True
|
||||
self.timer.start()
|
||||
|
||||
|
@ -136,7 +137,7 @@ class EventLoop(object):
|
|||
evtime = float(delaysec)
|
||||
if self.running:
|
||||
evtime += time.time()
|
||||
event = self.Event(eventnum, evtime, func, *args, **kwds)
|
||||
event = Event(eventnum, evtime, func, *args, **kwds)
|
||||
|
||||
if self.queue:
|
||||
prevhead = self.queue[0]
|
||||
|
@ -152,12 +153,13 @@ class EventLoop(object):
|
|||
self.__schedule_event()
|
||||
return event
|
||||
|
||||
|
||||
def example():
|
||||
loop = EventLoop()
|
||||
|
||||
def msg(arg):
|
||||
delta = time.time() - loop.start
|
||||
print delta, 'arg:', arg
|
||||
logger.debug("%s arg: %s", delta, arg)
|
||||
|
||||
def repeat(interval, count):
|
||||
count -= 1
|
||||
|
|
|
@ -1,34 +1,35 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
ipaddr.py: helper objects for dealing with IPv4/v6 addresses.
|
||||
'''
|
||||
"""
|
||||
Helper objects for dealing with IPv4/v6 addresses.
|
||||
"""
|
||||
|
||||
import random
|
||||
import socket
|
||||
import struct
|
||||
import random
|
||||
from socket import AF_INET
|
||||
from socket import AF_INET6
|
||||
|
||||
AF_INET = socket.AF_INET
|
||||
AF_INET6 = socket.AF_INET6
|
||||
from core.misc import log
|
||||
|
||||
class MacAddr(object):
|
||||
def __init__(self, addr):
|
||||
self.addr = addr
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class MacAddress(object):
|
||||
def __init__(self, address):
|
||||
self.addr = address
|
||||
|
||||
def __str__(self):
|
||||
return ":".join(map(lambda x: ("%02x" % ord(x)), self.addr))
|
||||
|
||||
def tolinklocal(self):
|
||||
''' Convert the MAC address to a IPv6 link-local address, using EUI 48
|
||||
return ":".join(map(lambda x: "%02x" % ord(x), self.addr))
|
||||
|
||||
def to_link_local(self):
|
||||
"""
|
||||
Convert the MAC address to a IPv6 link-local address, using EUI 48
|
||||
to EUI 64 conversion process per RFC 5342.
|
||||
'''
|
||||
|
||||
:return: ip address object
|
||||
:rtype: IpAddress
|
||||
"""
|
||||
if not self.addr:
|
||||
return IPAddr.fromstring("::")
|
||||
return IpAddress.from_string("::")
|
||||
tmp = struct.unpack("!Q", '\x00\x00' + self.addr)[0]
|
||||
nic = long(tmp) & 0x000000FFFFFFL
|
||||
oui = long(tmp) & 0xFFFFFF000000L
|
||||
|
@ -36,32 +37,33 @@ class MacAddr(object):
|
|||
oui ^= 0x020000000000L
|
||||
# append EUI-48 octets
|
||||
oui = (oui << 16) | 0xFFFE000000L
|
||||
return IPAddr(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic))
|
||||
return IpAddress(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic))
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
def from_string(cls, s):
|
||||
addr = "".join(map(lambda x: chr(int(x, 16)), s.split(":")))
|
||||
return cls(addr)
|
||||
|
||||
@classmethod
|
||||
def random(cls):
|
||||
tmp = random.randint(0, 0xFFFFFF)
|
||||
tmp |= 0x00163E << 24 # use the Xen OID 00:16:3E
|
||||
tmp |= 0x00163E << 24 # use the Xen OID 00:16:3E
|
||||
tmpbytes = struct.pack("!Q", tmp)
|
||||
return cls(tmpbytes[2:])
|
||||
|
||||
class IPAddr(object):
|
||||
def __init__(self, af, addr):
|
||||
|
||||
class IpAddress(object):
|
||||
def __init__(self, af, address):
|
||||
# check if (af, addr) is valid
|
||||
if not socket.inet_ntop(af, addr):
|
||||
if not socket.inet_ntop(af, address):
|
||||
raise ValueError, "invalid af/addr"
|
||||
self.af = af
|
||||
self.addr = addr
|
||||
self.addr = address
|
||||
|
||||
def isIPv4(self):
|
||||
def is_ipv4(self):
|
||||
return self.af == AF_INET
|
||||
|
||||
def isIPv6(self):
|
||||
def is_ipv6(self):
|
||||
return self.af == AF_INET6
|
||||
|
||||
def __str__(self):
|
||||
|
@ -70,14 +72,17 @@ class IPAddr(object):
|
|||
def __eq__(self, other):
|
||||
try:
|
||||
return other.af == self.af and other.addr == self.addr
|
||||
except:
|
||||
except AttributeError:
|
||||
logger.exception("error during equals compare")
|
||||
return False
|
||||
|
||||
def __add__(self, other):
|
||||
try:
|
||||
carry = int(other)
|
||||
except:
|
||||
except ValueError:
|
||||
logger.exception("error during addition")
|
||||
return NotImplemented
|
||||
|
||||
tmp = map(lambda x: ord(x), self.addr)
|
||||
for i in xrange(len(tmp) - 1, -1, -1):
|
||||
x = tmp[i] + carry
|
||||
|
@ -91,27 +96,26 @@ class IPAddr(object):
|
|||
def __sub__(self, other):
|
||||
try:
|
||||
tmp = -int(other)
|
||||
except:
|
||||
except ValueError:
|
||||
logger.exception("error during subtraction")
|
||||
return NotImplemented
|
||||
return self.__add__(tmp)
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
def from_string(cls, s):
|
||||
for af in AF_INET, AF_INET6:
|
||||
try:
|
||||
return cls(af, socket.inet_pton(af, s))
|
||||
except Exception, e:
|
||||
pass
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
def toint(s):
|
||||
''' convert IPv4 string to 32-bit integer
|
||||
'''
|
||||
bin = socket.inet_pton(AF_INET, s)
|
||||
return(struct.unpack('!I', bin)[0])
|
||||
return cls(af, socket.inet_pton(af, s))
|
||||
|
||||
class IPPrefix(object):
|
||||
@staticmethod
|
||||
def to_int(s):
|
||||
"""
|
||||
convert IPv4 string to 32-bit integer
|
||||
"""
|
||||
bin = socket.inet_pton(AF_INET, s)
|
||||
return struct.unpack('!I', bin)[0]
|
||||
|
||||
|
||||
class IpPrefix(object):
|
||||
def __init__(self, af, prefixstr):
|
||||
"prefixstr format: address/prefixlen"
|
||||
tmp = prefixstr.split("/")
|
||||
|
@ -139,26 +143,21 @@ class IPPrefix(object):
|
|||
self.prefix = self.prefix[:i] + prefix
|
||||
|
||||
def __str__(self):
|
||||
return "%s/%s" % (socket.inet_ntop(self.af, self.prefix),
|
||||
self.prefixlen)
|
||||
return "%s/%s" % (socket.inet_ntop(self.af, self.prefix), self.prefixlen)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return other.af == self.af and \
|
||||
other.prefixlen == self.prefixlen and \
|
||||
other.prefix == self.prefix
|
||||
except:
|
||||
return False
|
||||
return other.af == self.af and other.prefixlen == self.prefixlen and other.prefix == self.prefix
|
||||
|
||||
def __add__(self, other):
|
||||
try:
|
||||
tmp = int(other)
|
||||
except:
|
||||
except ValueError:
|
||||
logger.exception("error during addition")
|
||||
return NotImplemented
|
||||
a = IPAddr(self.af, self.prefix) + \
|
||||
(tmp << (self.addrlen - self.prefixlen))
|
||||
|
||||
a = IpAddress(self.af, self.prefix) + (tmp << (self.addrlen - self.prefixlen))
|
||||
prefixstr = "%s/%s" % (a, self.prefixlen)
|
||||
if self.__class__ == IPPrefix:
|
||||
if self.__class__ == IpPrefix:
|
||||
return self.__class__(self.af, prefixstr)
|
||||
else:
|
||||
return self.__class__(prefixstr)
|
||||
|
@ -166,65 +165,74 @@ class IPPrefix(object):
|
|||
def __sub__(self, other):
|
||||
try:
|
||||
tmp = -int(other)
|
||||
except:
|
||||
except ValueError:
|
||||
logger.exception("error during subtraction")
|
||||
return NotImplemented
|
||||
|
||||
return self.__add__(tmp)
|
||||
|
||||
def addr(self, hostid):
|
||||
tmp = int(hostid)
|
||||
if (tmp == 1 or tmp == 0 or tmp == -1) and self.addrlen == self.prefixlen:
|
||||
return IPAddr(self.af, self.prefix)
|
||||
if tmp == 0 or \
|
||||
tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or \
|
||||
if tmp in [-1, 0, 1] and self.addrlen == self.prefixlen:
|
||||
return IpAddress(self.af, self.prefix)
|
||||
if tmp == 0 or tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or \
|
||||
(self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1):
|
||||
raise ValueError, "invalid hostid for prefix %s: %s" % (self, hostid)
|
||||
raise ValueError("invalid hostid for prefix %s: %s" % (self, hostid))
|
||||
|
||||
addr = ""
|
||||
prefix_endpoint = -1
|
||||
for i in xrange(-1, -(self.addrlen >> 3) - 1, -1):
|
||||
prefix_endpoint = i
|
||||
addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr
|
||||
tmp >>= 8
|
||||
if not tmp:
|
||||
break
|
||||
addr = self.prefix[:i] + addr
|
||||
return IPAddr(self.af, addr)
|
||||
addr = self.prefix[:prefix_endpoint] + addr
|
||||
return IpAddress(self.af, addr)
|
||||
|
||||
def minaddr(self):
|
||||
def min_addr(self):
|
||||
return self.addr(1)
|
||||
|
||||
def maxaddr(self):
|
||||
def max_addr(self):
|
||||
if self.af == AF_INET:
|
||||
return self.addr((1 << (self.addrlen - self.prefixlen)) - 2)
|
||||
else:
|
||||
return self.addr((1 << (self.addrlen - self.prefixlen)) - 1)
|
||||
|
||||
def numaddr(self):
|
||||
def num_addr(self):
|
||||
return max(0, (1 << (self.addrlen - self.prefixlen)) - 2)
|
||||
|
||||
def prefixstr(self):
|
||||
|
||||
def prefix_str(self):
|
||||
return "%s" % socket.inet_ntop(self.af, self.prefix)
|
||||
|
||||
def netmaskstr(self):
|
||||
|
||||
def netmask_str(self):
|
||||
addrbits = self.addrlen - self.prefixlen
|
||||
netmask = ((1L << self.prefixlen) - 1) << addrbits
|
||||
netmaskbytes = struct.pack("!L", netmask)
|
||||
return IPAddr(af=AF_INET, addr=netmaskbytes).__str__()
|
||||
netmaskbytes = struct.pack("!L", netmask)
|
||||
return IpAddress(af=AF_INET, address=netmaskbytes).__str__()
|
||||
|
||||
class IPv4Prefix(IPPrefix):
|
||||
|
||||
class Ipv4Prefix(IpPrefix):
|
||||
def __init__(self, prefixstr):
|
||||
IPPrefix.__init__(self, AF_INET, prefixstr)
|
||||
IpPrefix.__init__(self, AF_INET, prefixstr)
|
||||
|
||||
class IPv6Prefix(IPPrefix):
|
||||
|
||||
class Ipv6Prefix(IpPrefix):
|
||||
def __init__(self, prefixstr):
|
||||
IPPrefix.__init__(self, AF_INET6, prefixstr)
|
||||
IpPrefix.__init__(self, AF_INET6, prefixstr)
|
||||
|
||||
def isIPAddress(af, addrstr):
|
||||
|
||||
def is_ip_address(af, addrstr):
|
||||
try:
|
||||
tmp = socket.inet_pton(af, addrstr)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def isIPv4Address(addrstr):
|
||||
return isIPAddress(AF_INET, addrstr)
|
||||
|
||||
def isIPv6Address(addrstr):
|
||||
return isIPAddress(AF_INET6, addrstr)
|
||||
def is_ipv4_address(addrstr):
|
||||
return is_ip_address(AF_INET, addrstr)
|
||||
|
||||
|
||||
def is_ipv6_address(addrstr):
|
||||
return is_ip_address(AF_INET6, addrstr)
|
35
daemon/core/misc/log.py
Normal file
35
daemon/core/misc/log.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
"""
|
||||
Convenience methods to setup logging.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
_LOG_LEVEL = logging.INFO
|
||||
_LOG_FORMAT = '%(levelname)-7s %(asctime)s %(name)-15s %(funcName)-15s %(lineno)-4d: %(message)s'
|
||||
_INITIAL = True
|
||||
|
||||
|
||||
def setup(level=_LOG_LEVEL, log_format=_LOG_FORMAT):
|
||||
"""
|
||||
Configure a logging with a basic configuration, output to console.
|
||||
|
||||
:param logging.LEVEL level: level for logger, defaults to module defined format
|
||||
:param int log_format: format for logger, default to DEBUG
|
||||
:return: nothing
|
||||
"""
|
||||
logging.basicConfig(level=level, format=log_format)
|
||||
|
||||
|
||||
def get_logger(name):
|
||||
"""
|
||||
Retrieve a logger for logging.
|
||||
|
||||
:param str name: name for logger to retrieve
|
||||
:return: logging.logger
|
||||
"""
|
||||
global _INITIAL
|
||||
if _INITIAL:
|
||||
setup()
|
||||
_INITIAL = False
|
||||
|
||||
return logging.getLogger(name)
|
44
daemon/core/misc/nodemaps.py
Normal file
44
daemon/core/misc/nodemaps.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
from core.emane.nodes import EmaneNet
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.enumerations import NodeTypes
|
||||
from core.netns import nodes
|
||||
from core.netns import openvswitch
|
||||
from core.netns.vnet import GreTapBridge
|
||||
from core.phys import pnodes
|
||||
from core.xen import xen
|
||||
|
||||
CLASSIC_NODES = {
|
||||
NodeTypes.DEFAULT: nodes.CoreNode,
|
||||
NodeTypes.PHYSICAL: pnodes.PhysicalNode,
|
||||
NodeTypes.XEN: xen.XenNode,
|
||||
NodeTypes.TBD: None,
|
||||
NodeTypes.SWITCH: nodes.SwitchNode,
|
||||
NodeTypes.HUB: nodes.HubNode,
|
||||
NodeTypes.WIRELESS_LAN: nodes.WlanNode,
|
||||
NodeTypes.RJ45: nodes.RJ45Node,
|
||||
NodeTypes.TUNNEL: nodes.TunnelNode,
|
||||
NodeTypes.KTUNNEL: None,
|
||||
NodeTypes.EMANE: EmaneNode,
|
||||
NodeTypes.EMANE_NET: EmaneNet,
|
||||
NodeTypes.TAP_BRIDGE: GreTapBridge,
|
||||
NodeTypes.PEER_TO_PEER: nodes.PtpNet,
|
||||
NodeTypes.CONTROL_NET: nodes.CtrlNet
|
||||
}
|
||||
|
||||
OVS_NODES = {
|
||||
NodeTypes.DEFAULT: nodes.CoreNode,
|
||||
NodeTypes.PHYSICAL: pnodes.PhysicalNode,
|
||||
NodeTypes.XEN: xen.XenNode,
|
||||
NodeTypes.TBD: None,
|
||||
NodeTypes.SWITCH: openvswitch.OvsSwitchNode,
|
||||
NodeTypes.HUB: openvswitch.OvsHubNode,
|
||||
NodeTypes.WIRELESS_LAN: openvswitch.OvsWlanNode,
|
||||
NodeTypes.RJ45: nodes.RJ45Node,
|
||||
NodeTypes.TUNNEL: openvswitch.OvsTunnelNode,
|
||||
NodeTypes.KTUNNEL: None,
|
||||
NodeTypes.EMANE: EmaneNode,
|
||||
NodeTypes.EMANE_NET: EmaneNet,
|
||||
NodeTypes.TAP_BRIDGE: openvswitch.OvsGreTapBridge,
|
||||
NodeTypes.PEER_TO_PEER: openvswitch.OvsPtpNet,
|
||||
NodeTypes.CONTROL_NET: openvswitch.OvsCtrlNet
|
||||
}
|
41
daemon/core/misc/nodeutils.py
Normal file
41
daemon/core/misc/nodeutils.py
Normal file
|
@ -0,0 +1,41 @@
|
|||
"""
|
||||
Serves as a global point for storing and retrieving node types needed during simulation.
|
||||
"""
|
||||
|
||||
import pprint
|
||||
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
_NODE_MAP = None
|
||||
|
||||
|
||||
def _convert_map(x, y):
|
||||
x[y[0].name] = y[1]
|
||||
return x
|
||||
|
||||
|
||||
def set_node_map(node_map):
|
||||
global _NODE_MAP
|
||||
print_map = reduce(lambda x, y: _convert_map(x, y), node_map.items(), {})
|
||||
logger.info("setting node class map: \n%s", pprint.pformat(print_map, indent=4))
|
||||
_NODE_MAP = node_map
|
||||
|
||||
|
||||
def get_node_class(node_type):
|
||||
global _NODE_MAP
|
||||
return _NODE_MAP[node_type]
|
||||
|
||||
|
||||
def is_node(obj, node_types):
|
||||
type_classes = []
|
||||
if isinstance(node_types, (tuple, list)):
|
||||
for node_type in node_types:
|
||||
type_class = get_node_class(node_type)
|
||||
type_classes.append(type_class)
|
||||
else:
|
||||
type_class = get_node_class(node_types)
|
||||
type_classes.append(type_class)
|
||||
|
||||
return isinstance(obj, tuple(type_classes))
|
|
@ -9,20 +9,22 @@
|
|||
quagga.py: helper class for generating Quagga configuration.
|
||||
'''
|
||||
|
||||
import os.path
|
||||
from string import Template
|
||||
|
||||
|
||||
def maketuple(obj):
|
||||
if hasattr(obj, "__iter__"):
|
||||
return tuple(obj)
|
||||
else:
|
||||
return (obj,)
|
||||
|
||||
|
||||
class NetIf(object):
|
||||
def __init__(self, name, addrlist = []):
|
||||
def __init__(self, name, addrlist=[]):
|
||||
self.name = name
|
||||
self.addrlist = addrlist
|
||||
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, **kwds):
|
||||
self.kwds = kwds
|
||||
|
@ -33,6 +35,7 @@ class Conf(object):
|
|||
tmp = tmp[:-1]
|
||||
return tmp
|
||||
|
||||
|
||||
class QuaggaOSPF6Interface(Conf):
|
||||
AF_IPV6_ID = 0
|
||||
AF_IPV4_ID = 65
|
||||
|
@ -50,13 +53,14 @@ interface $interface
|
|||
ipv6 ospf6 lsafullness mincostlsa
|
||||
""")
|
||||
|
||||
# ip address $ipaddr/32
|
||||
# ipv6 ospf6 simhelloLLtoULRecv :$simhelloport
|
||||
# !$ipaddr:$simhelloport
|
||||
# ip address $ipaddr/32
|
||||
# ipv6 ospf6 simhelloLLtoULRecv :$simhelloport
|
||||
# !$ipaddr:$simhelloport
|
||||
|
||||
def __init__(self, netif, instanceid = AF_IPV4_ID,
|
||||
network = "manet-designated-router", **kwds):
|
||||
def __init__(self, netif, instanceid=AF_IPV4_ID,
|
||||
network="manet-designated-router", **kwds):
|
||||
self.netif = netif
|
||||
|
||||
def addrstr(x):
|
||||
if x.find(".") >= 0:
|
||||
return "ip address %s" % x
|
||||
|
@ -64,18 +68,19 @@ interface $interface
|
|||
return "ipv6 address %s" % x
|
||||
else:
|
||||
raise Value, "invalid address: %s", x
|
||||
|
||||
addr = "\n ".join(map(addrstr, netif.addrlist))
|
||||
|
||||
self.instanceid = instanceid
|
||||
self.network = network
|
||||
Conf.__init__(self, interface = netif.name, addr = addr,
|
||||
instanceid = instanceid, network = network, **kwds)
|
||||
Conf.__init__(self, interface=netif.name, addr=addr,
|
||||
instanceid=instanceid, network=network, **kwds)
|
||||
|
||||
def name(self):
|
||||
return self.netif.name
|
||||
|
||||
class QuaggaOSPF6(Conf):
|
||||
|
||||
class QuaggaOSPF6(Conf):
|
||||
template = Template("""\
|
||||
$interfaces
|
||||
!
|
||||
|
@ -86,13 +91,13 @@ router ospf6
|
|||
""")
|
||||
|
||||
def __init__(self, ospf6ifs, area, routerid,
|
||||
redistribute = "! no redistribute"):
|
||||
redistribute="! no redistribute"):
|
||||
ospf6ifs = maketuple(ospf6ifs)
|
||||
interfaces = "\n!\n".join(map(str, ospf6ifs))
|
||||
ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % \
|
||||
(x.name(), area), ospf6ifs))
|
||||
Conf.__init__(self, interfaces = interfaces, routerid = routerid,
|
||||
ospfifs = ospfifs, redistribute = redistribute)
|
||||
(x.name(), area), ospf6ifs))
|
||||
Conf.__init__(self, interfaces=interfaces, routerid=routerid,
|
||||
ospfifs=ospfifs, redistribute=redistribute)
|
||||
|
||||
|
||||
class QuaggaConf(Conf):
|
||||
|
@ -105,12 +110,12 @@ $routers
|
|||
$forwarding
|
||||
""")
|
||||
|
||||
def __init__(self, routers, logfile, debugs = ()):
|
||||
def __init__(self, routers, logfile, debugs=()):
|
||||
routers = "\n!\n".join(map(str, maketuple(routers)))
|
||||
if debugs:
|
||||
debugs = "\n".join(maketuple(debugs))
|
||||
else:
|
||||
debugs = "! no debugs"
|
||||
forwarding = "ip forwarding\nipv6 forwarding"
|
||||
Conf.__init__(self, logfile = logfile, debugs = debugs,
|
||||
routers = routers, forwarding = forwarding)
|
||||
Conf.__init__(self, logfile=logfile, debugs=debugs,
|
||||
routers=routers, forwarding=forwarding)
|
||||
|
|
44
daemon/core/misc/structutils.py
Normal file
44
daemon/core/misc/structutils.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
def pack_values(clazz, packers):
|
||||
"""
|
||||
Pack values for a given legacy class.
|
||||
|
||||
:param class clazz: class that will provide a pack method
|
||||
:param list packers: a list of tuples that are used to pack values and transform them
|
||||
:return: packed data string of all values
|
||||
"""
|
||||
|
||||
# iterate through tuples of values to pack
|
||||
data = ""
|
||||
for packer in packers:
|
||||
# check if a transformer was provided for valid values
|
||||
transformer = None
|
||||
if len(packer) == 2:
|
||||
tlv_type, value = packer
|
||||
elif len(packer) == 3:
|
||||
tlv_type, value, transformer = packer
|
||||
else:
|
||||
raise RuntimeError("packer had more than 3 arguments")
|
||||
|
||||
# convert unicode to normal str for packing
|
||||
if isinstance(value, unicode):
|
||||
value = str(value)
|
||||
|
||||
# only pack actual values and avoid packing empty strings
|
||||
# protobuf defaults to empty strings and does no imply a value to set
|
||||
if value is None or (isinstance(value, str) and not value):
|
||||
continue
|
||||
|
||||
# transform values as needed
|
||||
if transformer:
|
||||
value = transformer(value)
|
||||
|
||||
# pack and add to existing data
|
||||
logger.info("packing: %s - %s", tlv_type, value)
|
||||
data += clazz.pack(tlv_type.value, value)
|
||||
|
||||
return data
|
|
@ -1,31 +1,37 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
utils.py: miscellaneous utility functions, wrappers around some subprocess
|
||||
procedures.
|
||||
'''
|
||||
"""
|
||||
Miscellaneous utility functions, wrappers around some subprocess procedures.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import subprocess, os, ast
|
||||
import fcntl
|
||||
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
def closeonexec(fd):
|
||||
fdflags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC)
|
||||
|
||||
def checkexec(execlist):
|
||||
for bin in execlist:
|
||||
if which(bin) is None:
|
||||
raise EnvironmentError, "executable not found: %s" % bin
|
||||
|
||||
def check_executables(executables):
|
||||
for executable in executables:
|
||||
if not (os.path.isfile(executable) and os.access(executable, os.X_OK)):
|
||||
raise EnvironmentError("executable not found: %s" % executable)
|
||||
|
||||
|
||||
def which(program):
|
||||
''' From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
|
||||
'''
|
||||
"""
|
||||
From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
|
||||
|
||||
:param str program: program to check for
|
||||
:return: path if it exists, none otherwise
|
||||
"""
|
||||
|
||||
def is_exe(fpath):
|
||||
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
|
||||
|
||||
|
@ -42,82 +48,91 @@ def which(program):
|
|||
|
||||
return None
|
||||
|
||||
|
||||
def ensurepath(pathlist):
|
||||
searchpath = os.environ["PATH"].split(":")
|
||||
for p in set(pathlist):
|
||||
if p not in searchpath:
|
||||
os.environ["PATH"] += ":" + p
|
||||
|
||||
|
||||
def maketuple(obj):
|
||||
if hasattr(obj, "__iter__"):
|
||||
return tuple(obj)
|
||||
else:
|
||||
return (obj,)
|
||||
|
||||
return obj,
|
||||
|
||||
|
||||
def maketuplefromstr(s, type):
|
||||
s.replace('\\', '\\\\')
|
||||
return ast.literal_eval(s)
|
||||
#return tuple(type(i) for i in s[1:-1].split(','))
|
||||
#r = ()
|
||||
#for i in s.strip("()").split(','):
|
||||
# return tuple(type(i) for i in s[1:-1].split(','))
|
||||
# r = ()
|
||||
# for i in s.strip("()").split(','):
|
||||
# r += (i.strip("' "), )
|
||||
# chop empty last element from "('a',)" strings
|
||||
#if r[-1] == '':
|
||||
# if r[-1] == '':
|
||||
# r = r[:-1]
|
||||
#return r
|
||||
# return r
|
||||
|
||||
def call(*args, **kwds):
|
||||
return subprocess.call(*args, **kwds)
|
||||
|
||||
def mutecall(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return call(*args, **kwds)
|
||||
return subprocess.call(*args, **kwds)
|
||||
|
||||
def check_call(*args, **kwds):
|
||||
return subprocess.check_call(*args, **kwds)
|
||||
|
||||
def mutecheck_call(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.check_call(*args, **kwds)
|
||||
|
||||
|
||||
def spawn(*args, **kwds):
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
|
||||
def mutespawn(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
|
||||
def detachinit():
|
||||
if os.fork():
|
||||
os._exit(0) # parent exits
|
||||
# parent exits
|
||||
os._exit(0)
|
||||
os.setsid()
|
||||
|
||||
|
||||
def detach(*args, **kwds):
|
||||
kwds["preexec_fn"] = detachinit
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
|
||||
def mutedetach(*args, **kwds):
|
||||
kwds["preexec_fn"] = detachinit
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def cmdresult(args):
|
||||
''' Execute a command on the host and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
'''
|
||||
cmdid = subprocess.Popen(args, stdin = open(os.devnull, 'r'),
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.STDOUT)
|
||||
result, err = cmdid.communicate() # err will always be None
|
||||
status = cmdid.wait()
|
||||
return (status, result)
|
||||
|
||||
def hexdump(s, bytes_per_word = 2, words_per_line = 8):
|
||||
def cmdresult(args):
|
||||
"""
|
||||
Execute a command on the host and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
"""
|
||||
cmdid = subprocess.Popen(args, stdin=open(os.devnull, 'r'),
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
# err will always be None
|
||||
result, err = cmdid.communicate()
|
||||
status = cmdid.wait()
|
||||
return status, result
|
||||
|
||||
|
||||
def hexdump(s, bytes_per_word=2, words_per_line=8):
|
||||
dump = ""
|
||||
count = 0
|
||||
bytes = bytes_per_word * words_per_line
|
||||
|
@ -132,19 +147,24 @@ def hexdump(s, bytes_per_word = 2, words_per_line = 8):
|
|||
count += len(line)
|
||||
return dump[:-1]
|
||||
|
||||
|
||||
def filemunge(pathname, header, text):
|
||||
''' Insert text at the end of a file, surrounded by header comments.
|
||||
'''
|
||||
filedemunge(pathname, header) # prevent duplicates
|
||||
"""
|
||||
Insert text at the end of a file, surrounded by header comments.
|
||||
"""
|
||||
# prevent duplicates
|
||||
filedemunge(pathname, header)
|
||||
f = open(pathname, 'a')
|
||||
f.write("# BEGIN %s\n" % header)
|
||||
f.write(text)
|
||||
f.write("# END %s\n" % header)
|
||||
f.close()
|
||||
|
||||
|
||||
def filedemunge(pathname, header):
|
||||
''' Remove text that was inserted in a file surrounded by header comments.
|
||||
'''
|
||||
"""
|
||||
Remove text that was inserted in a file surrounded by header comments.
|
||||
"""
|
||||
f = open(pathname, 'r')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
|
@ -161,10 +181,12 @@ def filedemunge(pathname, header):
|
|||
lines = lines[:start] + lines[end:]
|
||||
f.write("".join(lines))
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
def expandcorepath(pathname, session=None, node=None):
|
||||
''' Expand a file path given session information.
|
||||
'''
|
||||
"""
|
||||
Expand a file path given session information.
|
||||
"""
|
||||
if session is not None:
|
||||
pathname = pathname.replace('~', "/home/%s" % session.user)
|
||||
pathname = pathname.replace('%SESSION%', str(session.sessionid))
|
||||
|
@ -174,20 +196,24 @@ def expandcorepath(pathname, session=None, node=None):
|
|||
pathname = pathname.replace('%NODE%', str(node.objid))
|
||||
pathname = pathname.replace('%NODENAME%', node.name)
|
||||
return pathname
|
||||
|
||||
|
||||
|
||||
def sysctldevname(devname):
|
||||
''' Translate a device name to the name used with sysctl.
|
||||
'''
|
||||
"""
|
||||
Translate a device name to the name used with sysctl.
|
||||
"""
|
||||
if devname is None:
|
||||
return None
|
||||
return devname.replace(".", "/")
|
||||
|
||||
def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
||||
stdin = os.devnull, stdout = os.devnull, stderr = os.devnull,
|
||||
stdoutmode = 0644, stderrmode = 0644, pidfilename = None,
|
||||
defaultmaxfd = 1024):
|
||||
''' Run the background process as a daemon.
|
||||
'''
|
||||
|
||||
def daemonize(rootdir="/", umask=0, close_fds=False, dontclose=(),
|
||||
stdin=os.devnull, stdout=os.devnull, stderr=os.devnull,
|
||||
stdoutmode=0644, stderrmode=0644, pidfilename=None,
|
||||
defaultmaxfd=1024):
|
||||
"""
|
||||
Run the background process as a daemon.
|
||||
"""
|
||||
if not hasattr(dontclose, "__contains__"):
|
||||
if not isinstance(dontclose, int):
|
||||
raise TypeError, "dontclose must be an integer"
|
||||
|
@ -206,7 +232,7 @@ def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
|||
fd = os.open(stdout, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
|
||||
stdoutmode)
|
||||
os.dup2(fd, 1)
|
||||
if (stdout == stderr):
|
||||
if stdout == stderr:
|
||||
os.dup2(1, 2)
|
||||
os.close(fd)
|
||||
# redirect stderr
|
||||
|
@ -216,7 +242,8 @@ def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
|||
os.dup2(fd, 2)
|
||||
os.close(fd)
|
||||
if os.fork():
|
||||
os._exit(0) # parent exits
|
||||
# parent exits
|
||||
os._exit(0)
|
||||
os.setsid()
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
|
@ -225,9 +252,10 @@ def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
|||
f = open(pidfilename, "w")
|
||||
f.write("%s\n" % pid)
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
os._exit(0) # parent exits
|
||||
except IOError:
|
||||
logger.exception("error writing to file: %s", pidfilename)
|
||||
# parent exits
|
||||
os._exit(0)
|
||||
if rootdir:
|
||||
os.chdir(rootdir)
|
||||
os.umask(umask)
|
||||
|
@ -244,12 +272,14 @@ def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
|||
try:
|
||||
os.close(fd)
|
||||
except:
|
||||
pass
|
||||
logger.exception("error closing file descriptor")
|
||||
|
||||
|
||||
def readfileintodict(filename, d):
|
||||
''' Read key=value pairs from a file, into a dict.
|
||||
Skip comments; strip newline characters and spacing.
|
||||
'''
|
||||
"""
|
||||
Read key=value pairs from a file, into a dict.
|
||||
Skip comments; strip newline characters and spacing.
|
||||
"""
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
for l in lines:
|
||||
|
@ -259,15 +289,16 @@ def readfileintodict(filename, d):
|
|||
key, value = l.split('=', 1)
|
||||
d[key] = value.strip()
|
||||
except ValueError:
|
||||
pass
|
||||
logger.exception("error reading file to dict: %s", filename)
|
||||
|
||||
|
||||
def checkforkernelmodule(name):
|
||||
''' Return a string if a Linux kernel module is loaded, None otherwise.
|
||||
"""
|
||||
Return a string if a Linux kernel module is loaded, None otherwise.
|
||||
The string is the line from /proc/modules containing the module name,
|
||||
memory size (bytes), number of loaded instances, dependencies, state,
|
||||
and kernel memory offset.
|
||||
'''
|
||||
"""
|
||||
with open('/proc/modules', 'r') as f:
|
||||
for line in f:
|
||||
if line.startswith(name + ' '):
|
||||
|
|
|
@ -66,6 +66,7 @@ import math
|
|||
|
||||
__all__ = ['to_latlon', 'from_latlon']
|
||||
|
||||
|
||||
class OutOfRangeError(ValueError):
|
||||
pass
|
||||
|
||||
|
@ -139,7 +140,7 @@ def to_latlon(easting, northing, zone_number, zone_letter):
|
|||
n = R / ep_sin_sqrt
|
||||
r = (1 - E) / ep_sin
|
||||
|
||||
c = _E * p_cos**2
|
||||
c = _E * p_cos ** 2
|
||||
c2 = c * c
|
||||
|
||||
d = x / (n * K0)
|
||||
|
@ -152,7 +153,7 @@ def to_latlon(easting, northing, zone_number, zone_letter):
|
|||
latitude = (p_rad - (p_tan / r) *
|
||||
(d2 / 2 -
|
||||
d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) +
|
||||
d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2))
|
||||
d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2))
|
||||
|
||||
longitude = (d -
|
||||
d3 / 6 * (1 + 2 * p_tan2 + c) +
|
||||
|
@ -184,8 +185,8 @@ def from_latlon(latitude, longitude):
|
|||
|
||||
zone_letter = latitude_to_zone_letter(latitude)
|
||||
|
||||
n = R / math.sqrt(1 - E * lat_sin**2)
|
||||
c = E_P2 * lat_cos**2
|
||||
n = R / math.sqrt(1 - E * lat_sin ** 2)
|
||||
c = E_P2 * lat_cos ** 2
|
||||
|
||||
a = lat_cos * (lon_rad - central_lon_rad)
|
||||
a2 = a * a
|
||||
|
@ -204,7 +205,7 @@ def from_latlon(latitude, longitude):
|
|||
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000
|
||||
|
||||
northing = K0 * (m + n * lat_tan * (a2 / 2 +
|
||||
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +
|
||||
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c ** 2) +
|
||||
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2)))
|
||||
|
||||
if latitude < 0:
|
||||
|
@ -244,16 +245,15 @@ def zone_number_to_central_longitude(zone_number):
|
|||
|
||||
def haversine(lon1, lat1, lon2, lat2):
|
||||
"""
|
||||
Calculate the great circle distance between two points
|
||||
Calculate the great circle distance between two points
|
||||
on the earth (specified in decimal degrees)
|
||||
"""
|
||||
# convert decimal degrees to radians
|
||||
# convert decimal degrees to radians
|
||||
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
|
||||
# haversine formula
|
||||
dlon = lon2 - lon1
|
||||
dlat = lat2 - lat1
|
||||
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
|
||||
c = 2 * math.asin(math.sqrt(a))
|
||||
# haversine formula
|
||||
dlon = lon2 - lon1
|
||||
dlat = lat2 - lat1
|
||||
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
|
||||
c = 2 * math.asin(math.sqrt(a))
|
||||
m = 6367000 * c
|
||||
return m
|
||||
|
||||
return m
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
|
||||
'''
|
||||
Helpers for loading and saving XML files. savesessionxml(session, filename) is
|
||||
the main public interface here.
|
||||
'''
|
||||
|
||||
import os.path
|
||||
from core.netns import nodes
|
||||
from xmlparser import core_document_parser
|
||||
from xmlwriter import core_document_writer
|
||||
|
||||
def opensessionxml(session, filename, start=False, nodecls=nodes.CoreNode):
|
||||
''' Import a session from the EmulationScript XML format.
|
||||
'''
|
||||
options = {'start': start, 'nodecls': nodecls}
|
||||
doc = core_document_parser(session, filename, options)
|
||||
if start:
|
||||
session.name = os.path.basename(filename)
|
||||
session.filename = filename
|
||||
session.node_count = str(session.getnodecount())
|
||||
session.instantiate()
|
||||
|
||||
def savesessionxml(session, filename, version):
|
||||
''' Export a session to the EmulationScript XML format.
|
||||
'''
|
||||
doc = core_document_writer(session, version)
|
||||
doc.writexml(filename)
|
|
@ -1,15 +0,0 @@
|
|||
# CORE
|
||||
# Copyright (c) 2015 The Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
from xmlwriter0 import CoreDocumentWriter0
|
||||
from xmlwriter1 import CoreDocumentWriter1
|
||||
|
||||
def core_document_writer(session, version):
|
||||
if version == '0.0':
|
||||
doc = CoreDocumentWriter0(session)
|
||||
elif version == '1.0':
|
||||
doc = CoreDocumentWriter1(session)
|
||||
else:
|
||||
raise ValueError, 'unsupported document version: %s' % version
|
||||
return doc
|
|
@ -1,989 +0,0 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2015 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# Created on Dec 18, 2014
|
||||
#
|
||||
# @author: santiago
|
||||
#
|
||||
|
||||
import os
|
||||
import pwd
|
||||
import collections
|
||||
from core.netns import nodes
|
||||
from core.api import coreapi
|
||||
from core.misc.ipaddr import *
|
||||
|
||||
from xml.dom.minidom import Document
|
||||
from xmlutils import *
|
||||
from xmldeployment import CoreDeploymentWriter
|
||||
|
||||
def enum(**enums):
|
||||
return type('Enum', (), enums)
|
||||
|
||||
class Attrib(object):
|
||||
''' NMF scenario plan attribute constants
|
||||
'''
|
||||
NetType = enum(WIRELESS = 'wireless', ETHERNET = 'ethernet',
|
||||
PTP_WIRED = 'point-to-point-wired',
|
||||
PTP_WIRELESS = 'point-to-point-wireless')
|
||||
MembType = enum(INTERFACE = 'interface', CHANNEL = 'channel',
|
||||
SWITCH = 'switch', HUB = 'hub', TUNNEL = 'tunnel',
|
||||
NETWORK = "network")
|
||||
DevType = enum(HOST = 'host', ROUTER = 'router', SWITCH = 'switch',
|
||||
HUB = 'hub')
|
||||
''' Node types in CORE
|
||||
'''
|
||||
NodeType = enum(ROUTER = 'router', HOST = 'host', MDR = 'mdr',
|
||||
PC = 'PC', RJ45 = 'rj45', SWITCH = 'lanswitch',
|
||||
HUB = 'hub')
|
||||
Alias = enum(ID = "COREID")
|
||||
|
||||
''' A link endpoint in CORE
|
||||
net: the network that the endpoint belongs to
|
||||
netif: the network interface at this end
|
||||
id: the identifier for the endpoint
|
||||
l2devport: if the other end is a layer 2 device, this is the assigned port in that device
|
||||
params: link/interface parameters
|
||||
'''
|
||||
Endpoint = collections.namedtuple('Endpoint',
|
||||
['net', 'netif', 'type', 'id', 'l2devport', 'params'])
|
||||
|
||||
|
||||
|
||||
class CoreDocumentWriter1(Document):
|
||||
''' Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init
|
||||
method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.
|
||||
'''
|
||||
|
||||
def __init__(self, session):
|
||||
''' Create an empty Scenario XML Document, then populate it with
|
||||
objects from the given session.
|
||||
'''
|
||||
Document.__init__(self)
|
||||
session.info('Exporting to NMF XML version 1.0')
|
||||
with session._objslock:
|
||||
self.scenarioPlan = ScenarioPlan(self, session)
|
||||
if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
|
||||
deployment = CoreDeploymentWriter(self, self.scenarioPlan,
|
||||
session)
|
||||
deployment.add_deployment()
|
||||
self.scenarioPlan.setAttribute('deployed', 'true')
|
||||
|
||||
def writexml(self, filename):
|
||||
''' Commit to file
|
||||
'''
|
||||
self.scenarioPlan.coreSession.info("saving session XML file %s" % filename)
|
||||
f = open(filename, "w")
|
||||
Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \
|
||||
encoding="UTF-8")
|
||||
f.close()
|
||||
if self.scenarioPlan.coreSession.user is not None:
|
||||
uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid
|
||||
gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid
|
||||
os.chown(filename, uid, gid)
|
||||
|
||||
|
||||
class XmlElement(object):
|
||||
''' The base class for all XML elements in the scenario plan. Includes
|
||||
convenience functions.
|
||||
'''
|
||||
def __init__(self, document, parent, elementType):
|
||||
self.document = document
|
||||
self.parent = parent
|
||||
self.baseEle = document.createElement("%s" % elementType)
|
||||
if self.parent is not None:
|
||||
self.parent.appendChild(self.baseEle)
|
||||
|
||||
def createElement(self, elementTag):
|
||||
return self.document.createElement(elementTag)
|
||||
|
||||
def getTagName(self):
|
||||
return self.baseEle.tagName
|
||||
|
||||
def createTextNode(self, nodeTag):
|
||||
return self.document.createTextNode(nodeTag)
|
||||
|
||||
def appendChild(self, child):
|
||||
if isinstance(child, XmlElement):
|
||||
self.baseEle.appendChild(child.baseEle)
|
||||
else:
|
||||
self.baseEle.appendChild(child)
|
||||
|
||||
@staticmethod
|
||||
def add_parameter(doc, parent, key, value):
|
||||
if key and value:
|
||||
parm = doc.createElement("parameter")
|
||||
parm.setAttribute("name", str(key))
|
||||
parm.appendChild(doc.createTextNode(str(value)))
|
||||
parent.appendChild(parm)
|
||||
|
||||
def addParameter(self, key, value):
|
||||
'''
|
||||
Add a parameter to the xml element
|
||||
'''
|
||||
self.add_parameter(self.document, self, key, value)
|
||||
|
||||
def setAttribute(self, name, val):
|
||||
self.baseEle.setAttribute(name, val)
|
||||
|
||||
def getAttribute(self, name):
|
||||
return self.baseEle.getAttribute(name)
|
||||
|
||||
|
||||
class NamedXmlElement(XmlElement):
|
||||
''' The base class for all "named" xml elements. Named elements are
|
||||
xml elements in the scenario plan that have an id and a name attribute.
|
||||
'''
|
||||
def __init__(self, scenPlan, parent, elementType, elementName):
|
||||
XmlElement.__init__(self, scenPlan.document, parent, elementType)
|
||||
|
||||
self.scenPlan = scenPlan
|
||||
self.coreSession = scenPlan.coreSession
|
||||
|
||||
elementPath = ''
|
||||
self.id=None
|
||||
if self.parent is not None and isinstance(self.parent, XmlElement) and self.parent.getTagName() != "scenario":
|
||||
elementPath="%s/" % self.parent.getAttribute("id")
|
||||
|
||||
self.id = "%s%s" % (elementPath,elementName)
|
||||
self.setAttribute("name", elementName)
|
||||
self.setAttribute("id", self.id)
|
||||
|
||||
|
||||
def addPoint(self, coreObj):
|
||||
''' Add position to an object
|
||||
'''
|
||||
(x,y,z) = coreObj.position.get()
|
||||
if x is None or y is None:
|
||||
return
|
||||
lat, lon, alt = self.coreSession.location.getgeo(x, y, z)
|
||||
|
||||
pt = self.createElement("point")
|
||||
pt.setAttribute("type", "gps")
|
||||
pt.setAttribute("lat", "%s" % lat)
|
||||
pt.setAttribute("lon", "%s" % lon)
|
||||
if z:
|
||||
pt.setAttribute("z", "%s" % alt)
|
||||
self.appendChild(pt)
|
||||
|
||||
def createAlias(self, domain, valueStr):
|
||||
''' Create an alias element for CORE specific information
|
||||
'''
|
||||
a = self.createElement("alias")
|
||||
a.setAttribute("domain", "%s" % domain)
|
||||
a.appendChild(self.createTextNode(valueStr))
|
||||
return a
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class ScenarioPlan(XmlElement):
|
||||
''' Container class for ScenarioPlan.
|
||||
'''
|
||||
def __init__(self, document, session):
|
||||
XmlElement.__init__(self, document, parent=document, elementType='scenario')
|
||||
|
||||
self.coreSession = session
|
||||
|
||||
self.setAttribute('version', '1.0')
|
||||
self.setAttribute("name", "%s" % session.name)
|
||||
|
||||
self.setAttribute('xmlns', 'nmfPlan')
|
||||
self.setAttribute('xmlns:CORE', 'coreSpecific')
|
||||
self.setAttribute('compiled', 'true')
|
||||
|
||||
self.allChannelMembers = dict()
|
||||
self.lastNetIdx = 0
|
||||
self.addNetworks()
|
||||
self.addDevices()
|
||||
|
||||
# XXX Do we need these?
|
||||
#self.session.emane.setup() # not during runtime?
|
||||
#self.addorigin()
|
||||
|
||||
self.addDefaultServices()
|
||||
|
||||
self.addSessionConfiguration()
|
||||
|
||||
|
||||
|
||||
def addNetworks(self):
|
||||
''' Add networks in the session to the scenPlan.
|
||||
'''
|
||||
for net in self.coreSession.objs():
|
||||
if not isinstance(net, nodes.PyCoreNet):
|
||||
continue
|
||||
|
||||
if isinstance(net, nodes.CtrlNet):
|
||||
continue
|
||||
|
||||
# Do not add switches and hubs that belong to another network
|
||||
if isinstance(net, (nodes.SwitchNode, nodes.HubNode)):
|
||||
if inOtherNetwork(net):
|
||||
continue
|
||||
|
||||
try:
|
||||
NetworkElement(self, self, net)
|
||||
except:
|
||||
if hasattr(net, "name") and net.name:
|
||||
self.coreSession.warn('Unsupported net: %s' % net.name)
|
||||
else:
|
||||
self.coreSession.warn('Unsupported net: %s' % net.__class__.__name__)
|
||||
|
||||
|
||||
def addDevices(self):
|
||||
''' Add device elements to the scenario plan.
|
||||
'''
|
||||
for node in self.coreSession.objs():
|
||||
if not isinstance(node, (nodes.PyCoreNode)):
|
||||
continue
|
||||
try:
|
||||
DeviceElement(self, self, node)
|
||||
except:
|
||||
if hasattr(node, "name") and node.name:
|
||||
self.coreSession.warn('Unsupported device: %s' % node.name)
|
||||
else:
|
||||
self.coreSession.warn('Unsupported device: %s' % node.__class__.__name__)
|
||||
|
||||
|
||||
def addDefaultServices(self):
|
||||
''' Add default services and node types to the ServicePlan.
|
||||
'''
|
||||
defaultservices = self.createElement("CORE:defaultservices")
|
||||
for type in self.coreSession.services.defaultservices:
|
||||
defaults = self.coreSession.services.getdefaultservices(type)
|
||||
spn = self.createElement("device")
|
||||
spn.setAttribute("type", type)
|
||||
defaultservices.appendChild(spn)
|
||||
for svc in defaults:
|
||||
s = self.createElement("service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
if defaultservices.hasChildNodes():
|
||||
self.appendChild(defaultservices)
|
||||
|
||||
def addSessionConfiguration(self):
|
||||
''' Add CORE-specific session configuration XML elements.
|
||||
'''
|
||||
config = self.createElement("CORE:sessionconfig")
|
||||
|
||||
# origin: geolocation of cartesian coordinate 0,0,0
|
||||
refgeo = self.coreSession.location.refgeo
|
||||
origin = self.createElement("origin")
|
||||
attrs = ("lat","lon","alt")
|
||||
have_origin = False
|
||||
for i in xrange(3):
|
||||
if refgeo[i] is not None:
|
||||
origin.setAttribute(attrs[i], str(refgeo[i]))
|
||||
have_origin = True
|
||||
if have_origin:
|
||||
if self.coreSession.location.refscale != 1.0: # 100 pixels = refscale m
|
||||
origin.setAttribute("scale100", str(self.coreSession.location.refscale))
|
||||
if self.coreSession.location.refxyz != (0.0, 0.0, 0.0):
|
||||
pt = self.createElement("point")
|
||||
origin.appendChild(pt)
|
||||
x,y,z = self.coreSession.location.refxyz
|
||||
coordstxt = "%s,%s" % (x,y)
|
||||
if z:
|
||||
coordstxt += ",%s" % z
|
||||
coords = self.createTextNode(coordstxt)
|
||||
pt.appendChild(coords)
|
||||
config.appendChild(origin)
|
||||
|
||||
|
||||
# options
|
||||
options = self.createElement("options")
|
||||
defaults = self.coreSession.options.getdefaultvalues()
|
||||
for i, (k, v) in enumerate(self.coreSession.options.getkeyvaluelist()):
|
||||
if str(v) != str(defaults[i]):
|
||||
XmlElement.add_parameter(self.document, options, k, v)
|
||||
if options.hasChildNodes():
|
||||
config.appendChild(options)
|
||||
|
||||
# hook scripts
|
||||
hooks = self.createElement("hooks")
|
||||
for state in sorted(self.coreSession._hooks.keys()):
|
||||
for (filename, data) in self.coreSession._hooks[state]:
|
||||
hook = self.createElement("hook")
|
||||
hook.setAttribute("name", filename)
|
||||
hook.setAttribute("state", str(state))
|
||||
txt = self.createTextNode(data)
|
||||
hook.appendChild(txt)
|
||||
hooks.appendChild(hook)
|
||||
if hooks.hasChildNodes():
|
||||
config.appendChild(hooks)
|
||||
|
||||
# metadata
|
||||
meta = self.createElement("metadata")
|
||||
for (k, v) in self.coreSession.metadata.items():
|
||||
XmlElement.add_parameter(self.document, meta, k, v)
|
||||
if meta.hasChildNodes():
|
||||
config.appendChild(meta)
|
||||
|
||||
if config.hasChildNodes():
|
||||
self.appendChild(config)
|
||||
|
||||
|
||||
class NetworkElement(NamedXmlElement):
|
||||
def __init__(self, scenPlan, parent, netObj):
|
||||
''' Add one PyCoreNet object as one network XML element.
|
||||
'''
|
||||
elementName = self.getNetworkName(scenPlan, netObj)
|
||||
NamedXmlElement.__init__(self, scenPlan, parent, "network", elementName)
|
||||
|
||||
self.scenPlan = scenPlan
|
||||
|
||||
self.addPoint(netObj)
|
||||
|
||||
netType = None
|
||||
if isinstance(netObj, (nodes.WlanNode, nodes.EmaneNode)):
|
||||
netType = Attrib.NetType.WIRELESS
|
||||
elif isinstance(netObj, (nodes.SwitchNode, nodes.HubNode,
|
||||
nodes.PtpNet, nodes.TunnelNode)):
|
||||
netType = Attrib.NetType.ETHERNET
|
||||
else:
|
||||
netType ="%s" % netObj.__class__.__name__
|
||||
|
||||
typeEle = self.createElement("type")
|
||||
typeEle.appendChild(self.createTextNode(netType))
|
||||
self.appendChild(typeEle)
|
||||
|
||||
# Gather all endpoints belonging to this network
|
||||
self.endpoints = getEndpoints(netObj)
|
||||
|
||||
# Special case for a network of switches and hubs
|
||||
createAlias = True
|
||||
self.l2devices = []
|
||||
if isinstance(netObj, (nodes.SwitchNode, nodes.HubNode)):
|
||||
createAlias = False
|
||||
self.appendChild(typeEle)
|
||||
self.addL2Devices(netObj)
|
||||
|
||||
if createAlias:
|
||||
a = self.createAlias(Attrib.Alias.ID, "%d" % int(netObj.objid))
|
||||
self.appendChild(a)
|
||||
|
||||
# XXXX TODO: Move this to channel?
|
||||
# key used with tunnel node
|
||||
if hasattr(netObj, 'grekey') and netObj.grekey is not None:
|
||||
a = self.createAlias("COREGREKEY", "%s" % netObj.grekey)
|
||||
self.appendChild(a)
|
||||
|
||||
self.addNetMembers(netObj)
|
||||
self.addChannels(netObj)
|
||||
|
||||
presentationEle = self.createElement("CORE:presentation")
|
||||
addPresentationEle = False
|
||||
if netObj.icon and not netObj.icon.isspace():
|
||||
presentationEle.setAttribute("icon", netObj.icon)
|
||||
addPresentationEle = True
|
||||
if netObj.canvas:
|
||||
presentationEle.setAttribute("canvas", str(netObj.canvas))
|
||||
addPresentationEle = True
|
||||
if addPresentationEle:
|
||||
self.appendChild(presentationEle)
|
||||
|
||||
def getNetworkName(self, scenPlan, netObj):
|
||||
''' Determine the name to use for this network element
|
||||
'''
|
||||
if isinstance(netObj, (nodes.PtpNet, nodes.TunnelNode)):
|
||||
name = "net%s" % scenPlan.lastNetIdx
|
||||
scenPlan.lastNetIdx += 1
|
||||
elif netObj.name:
|
||||
name = str(netObj.name) # could use net.brname for bridges?
|
||||
elif isinstance(netObj, (nodes.SwitchNode, nodes.HubNode)):
|
||||
name = "lan%s" % netObj.objid
|
||||
else:
|
||||
name = ''
|
||||
return name
|
||||
|
||||
|
||||
def addL2Devices(self, netObj):
|
||||
''' Add switches and hubs
|
||||
'''
|
||||
|
||||
# Add the netObj as a device
|
||||
self.l2devices.append(DeviceElement(self.scenPlan, self, netObj))
|
||||
|
||||
# Add downstream switches/hubs
|
||||
l2devs = []
|
||||
neweps = []
|
||||
for ep in self.endpoints:
|
||||
if ep.type and ep.net.objid != netObj.objid:
|
||||
l2s, eps = getDowmstreamL2Devices(ep.net)
|
||||
l2devs.extend(l2s)
|
||||
neweps.extend(eps)
|
||||
|
||||
for l2dev in l2devs:
|
||||
self.l2devices.append(DeviceElement(self.scenPlan, self, l2dev))
|
||||
|
||||
self.endpoints.extend(neweps)
|
||||
|
||||
# XXX: Optimize later
|
||||
def addNetMembers(self, netObj):
|
||||
''' Add members to a network XML element.
|
||||
'''
|
||||
|
||||
for ep in self.endpoints:
|
||||
if ep.type:
|
||||
MemberElement(self.scenPlan, self, referencedType=ep.type, referencedId=ep.id)
|
||||
|
||||
if ep.l2devport:
|
||||
MemberElement(self.scenPlan,
|
||||
self,
|
||||
referencedType=Attrib.MembType.INTERFACE,
|
||||
referencedId="%s/%s" % (self.id,ep.l2devport))
|
||||
|
||||
# XXX Revisit this
|
||||
# Create implied members given the network type
|
||||
if isinstance(netObj, nodes.TunnelNode):
|
||||
MemberElement(self.scenPlan,
|
||||
self,
|
||||
referencedType=Attrib.MembType.TUNNEL,
|
||||
referencedId="%s/%s" % (netObj.name, netObj.name))
|
||||
|
||||
# XXX: Optimize later
|
||||
def addChannels(self, netObj):
|
||||
''' Add channels to a network XML element
|
||||
'''
|
||||
|
||||
if isinstance(netObj, (nodes.WlanNode, nodes.EmaneNode)):
|
||||
modelconfigs = netObj.session.mobility.getmodels(netObj)
|
||||
modelconfigs += netObj.session.emane.getmodels(netObj)
|
||||
chan = None
|
||||
for (model, conf) in modelconfigs:
|
||||
# Handle mobility parameters below
|
||||
if model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
continue
|
||||
|
||||
# Create the channel
|
||||
if chan is None:
|
||||
name = "wireless"
|
||||
chan = ChannelElement(self.scenPlan, self, netObj,
|
||||
channelType=model._name,
|
||||
channelName=name,
|
||||
channelDomain="CORE")
|
||||
|
||||
# Add wireless model parameters
|
||||
for i, key in enumerate(model.getnames()):
|
||||
value = conf[i]
|
||||
if value is not None:
|
||||
chan.addParameter(key, model.valueof(key, conf))
|
||||
|
||||
for (model, conf) in modelconfigs:
|
||||
if model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
# Add wireless mobility parameters
|
||||
mobility = XmlElement(self.scenPlan, chan, "CORE:mobility")
|
||||
# Add a type child
|
||||
typeEle = self.createElement("type")
|
||||
typeEle.appendChild(self.createTextNode(model._name))
|
||||
mobility.appendChild(typeEle)
|
||||
for i, key in enumerate(model.getnames()):
|
||||
value = conf[i]
|
||||
if value is not None:
|
||||
mobility.addParameter(key, value)
|
||||
|
||||
# Add members to the channel
|
||||
if chan is not None:
|
||||
chan.addChannelMembers(self.endpoints)
|
||||
self.appendChild(chan.baseEle)
|
||||
elif isinstance(netObj, nodes.PtpNet) :
|
||||
if len(self.endpoints) < 2:
|
||||
if len(self.endpoints) == 1:
|
||||
self.coreSession.warn('Pt2Pt network with only 1 endpoint: %s' % self.endpoints[0].id)
|
||||
else:
|
||||
self.coreSession.warn('Pt2Pt network with no endpoints encountered in %s' % netObj.name)
|
||||
return
|
||||
name = "chan%d" % (0)
|
||||
chan = ChannelElement(self.scenPlan, self, netObj,
|
||||
channelType=Attrib.NetType.ETHERNET,
|
||||
channelName=name)
|
||||
|
||||
# Add interface parameters
|
||||
if self.endpoints[0].params != self.endpoints[1].params:
|
||||
self.coreSession.warn('Pt2Pt Endpoint parameters do not match in %s' % netObj.name)
|
||||
for key, value in self.endpoints[0].params:
|
||||
# XXX lifted from original addnetem function. revisit this.
|
||||
# default netem parameters are 0 or None
|
||||
if value is None or value == 0:
|
||||
continue
|
||||
if key == "has_netem" or key == "has_tbf":
|
||||
continue
|
||||
chan.addParameter(key, value)
|
||||
|
||||
# Add members to the channel
|
||||
chan.addChannelMembers(self.endpoints)
|
||||
self.appendChild(chan)
|
||||
|
||||
elif isinstance(netObj, (nodes.SwitchNode,
|
||||
nodes.HubNode, nodes.TunnelNode)):
|
||||
cidx=0
|
||||
channels = []
|
||||
for ep in self.endpoints:
|
||||
# Create one channel member per ep
|
||||
if ep.type:
|
||||
name = "chan%d" % (cidx)
|
||||
chan = ChannelElement(self.scenPlan, self, netObj,
|
||||
channelType=Attrib.NetType.ETHERNET,
|
||||
channelName=name)
|
||||
|
||||
# Add interface parameters
|
||||
for key, value in ep.params:
|
||||
# XXX lifted from original addnetem function. revisit this.
|
||||
# default netem parameters are 0 or None
|
||||
if value is None or value == 0:
|
||||
continue
|
||||
if key == "has_netem" or key == "has_tbf":
|
||||
continue
|
||||
chan.addParameter(key, value)
|
||||
|
||||
# Add members to the channel
|
||||
chan.addChannelMembers(ep)
|
||||
channels.append(chan)
|
||||
cidx += 1
|
||||
|
||||
for chan in channels:
|
||||
self.appendChild(chan)
|
||||
|
||||
|
||||
|
||||
|
||||
class DeviceElement(NamedXmlElement):
|
||||
''' A device element in the scenario plan.
|
||||
'''
|
||||
def __init__(self, scenPlan, parent, devObj):
|
||||
''' Add a PyCoreNode object as a device element.
|
||||
'''
|
||||
|
||||
devType = None
|
||||
coreDevType = None
|
||||
if hasattr(devObj, "type") and devObj.type:
|
||||
coreDevType = devObj.type
|
||||
if devObj.type == Attrib.NodeType.ROUTER:
|
||||
devType = Attrib.DevType.ROUTER
|
||||
elif devObj.type == Attrib.NodeType.MDR:
|
||||
devType = Attrib.DevType.ROUTER
|
||||
elif devObj.type == Attrib.NodeType.HOST:
|
||||
devType = Attrib.DevType.HOST
|
||||
elif devObj.type == Attrib.NodeType.PC:
|
||||
devType = Attrib.DevType.HOST
|
||||
elif devObj.type == Attrib.NodeType.RJ45:
|
||||
devType = Attrib.DevType.HOST
|
||||
nodeId = "EMULATOR-HOST"
|
||||
elif devObj.type == Attrib.NodeType.HUB:
|
||||
devType = Attrib.DevType.HUB
|
||||
elif devObj.type == Attrib.NodeType.SWITCH:
|
||||
devType = Attrib.DevType.SWITCH
|
||||
else:
|
||||
# Default custom types (defined in ~/.core/nodes.conf) to HOST
|
||||
devType = Attrib.DevType.HOST
|
||||
|
||||
|
||||
if devType is None:
|
||||
raise Exception
|
||||
|
||||
|
||||
NamedXmlElement.__init__(self, scenPlan, parent, devType, devObj.name)
|
||||
|
||||
if coreDevType is not None:
|
||||
typeEle = self.createElement("type")
|
||||
typeEle.setAttribute("domain", "CORE")
|
||||
typeEle.appendChild(self.createTextNode("%s" % coreDevType))
|
||||
self.appendChild(typeEle)
|
||||
|
||||
self.interfaces = []
|
||||
self.addInterfaces(devObj)
|
||||
alias = self.createAlias(Attrib.Alias.ID, "%s" % devObj.objid)
|
||||
self.appendChild(alias)
|
||||
self.addPoint(devObj)
|
||||
self.addServices(devObj)
|
||||
|
||||
|
||||
presentationEle = self.createElement("CORE:presentation")
|
||||
addPresentationEle = False
|
||||
if devObj.icon and not devObj.icon.isspace():
|
||||
presentationEle.setAttribute("icon", devObj.icon)
|
||||
addPresentationEle = True
|
||||
if devObj.canvas:
|
||||
presentationEle.setAttribute("canvas", str(devObj.canvas))
|
||||
addPresentationEle = True
|
||||
if addPresentationEle:
|
||||
self.appendChild(presentationEle)
|
||||
|
||||
def addInterfaces(self, devObj):
|
||||
''' Add interfaces to a device element.
|
||||
'''
|
||||
idx=0
|
||||
for ifcObj in devObj.netifs(sort=True):
|
||||
if ifcObj.net and isinstance(ifcObj.net, nodes.CtrlNet):
|
||||
continue
|
||||
if isinstance(devObj, nodes.PyCoreNode):
|
||||
ifcEle = InterfaceElement(self.scenPlan, self, devObj, ifcObj)
|
||||
else: # isinstance(node, (nodes.HubNode nodes.SwitchNode)):
|
||||
ifcEle = InterfaceElement(self.scenPlan, self, devObj, ifcObj, idx)
|
||||
idx += 1
|
||||
|
||||
netmodel = None
|
||||
if ifcObj.net:
|
||||
if hasattr(ifcObj.net, "model"):
|
||||
netmodel = ifcObj.net.model
|
||||
if ifcObj.mtu and ifcObj.mtu != 1500:
|
||||
ifcEle.setAttribute("mtu", "%s" % ifcObj.mtu)
|
||||
|
||||
# The interfaces returned for Switches and Hubs are the interfaces of the nodes connected to them.
|
||||
# The addresses are for those interfaces. Don't include them here.
|
||||
if isinstance(devObj, nodes.PyCoreNode):
|
||||
# could use ifcObj.params, transport_type
|
||||
ifcEle.addAddresses(ifcObj)
|
||||
# per-interface models
|
||||
# XXX Remove???
|
||||
if netmodel and netmodel._name[:6] == "emane_":
|
||||
cfg = self.coreSession.emane.getifcconfig(devObj.objid, netmodel._name,
|
||||
None, ifcObj)
|
||||
if cfg:
|
||||
ifcEle.addModels(((netmodel, cfg),) )
|
||||
|
||||
self.interfaces.append(ifcEle)
|
||||
|
||||
|
||||
def addServices(self, devObj):
|
||||
''' Add services and their customizations to the ServicePlan.
|
||||
'''
|
||||
if not hasattr(devObj, "services") :
|
||||
return
|
||||
|
||||
if len(devObj.services) == 0:
|
||||
return
|
||||
|
||||
defaults = self.coreSession.services.getdefaultservices(devObj.type)
|
||||
if devObj.services == defaults:
|
||||
return
|
||||
spn = self.createElement("CORE:services")
|
||||
spn.setAttribute("name", devObj.name)
|
||||
self.appendChild(spn)
|
||||
|
||||
for svc in devObj.services:
|
||||
s = self.createElement("service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("startup_idx", str(svc._startindex))
|
||||
if svc._starttime != "":
|
||||
s.setAttribute("start_time", str(svc._starttime))
|
||||
# only record service names if not a customized service
|
||||
if not svc._custom:
|
||||
continue
|
||||
s.setAttribute("custom", str(svc._custom))
|
||||
addelementsfromlist(self, s, svc._dirs, "directory", "name")
|
||||
|
||||
for fn in svc._configs:
|
||||
if len(fn) == 0:
|
||||
continue
|
||||
f = self.createElement("file")
|
||||
f.setAttribute("name", fn)
|
||||
# all file names are added to determine when a file has been deleted
|
||||
s.appendChild(f)
|
||||
data = self.coreSession.services.getservicefiledata(svc, fn)
|
||||
if data is None:
|
||||
# this includes only customized file contents and skips
|
||||
# the auto-generated files
|
||||
continue
|
||||
txt = self.createTextNode("\n" + data)
|
||||
f.appendChild(txt)
|
||||
|
||||
addtextelementsfromlist(self, s, svc._startup, "command",
|
||||
(("type","start"),))
|
||||
addtextelementsfromlist(self, s, svc._shutdown, "command",
|
||||
(("type","stop"),))
|
||||
addtextelementsfromlist(self, s, svc._validate, "command",
|
||||
(("type","validate"),))
|
||||
|
||||
|
||||
|
||||
class ChannelElement(NamedXmlElement):
|
||||
''' A channel element in the scenario plan
|
||||
'''
|
||||
def __init__(self, scenPlan, parent, netObj, channelType, channelName, channelDomain=None):
|
||||
NamedXmlElement.__init__(self, scenPlan, parent, "channel", channelName)
|
||||
'''
|
||||
Create a channel element and append a member child referencing this channel element
|
||||
in the parent element.
|
||||
'''
|
||||
# Create a member element for this channel in the parent
|
||||
MemberElement(self.scenPlan,
|
||||
parent,
|
||||
referencedType=Attrib.MembType.CHANNEL,
|
||||
referencedId=self.id)
|
||||
|
||||
# Add a type child
|
||||
typeEle = self.createElement("type")
|
||||
if channelDomain is not None:
|
||||
typeEle.setAttribute("domain", "%s" % channelDomain)
|
||||
typeEle.appendChild(self.createTextNode(channelType))
|
||||
self.appendChild(typeEle)
|
||||
|
||||
|
||||
def addChannelMembers(self, endpoints):
|
||||
'''
|
||||
Add network channel members referencing interfaces in the channel
|
||||
'''
|
||||
if isinstance(endpoints, list):
|
||||
# A list of endpoints is given. Create one channel member per endpoint
|
||||
idx = 0
|
||||
for ep in endpoints:
|
||||
self.addChannelMember(ep.type, ep.id, idx)
|
||||
idx += 1
|
||||
else:
|
||||
# A single endpoint is given. Create one channel member for the endpoint,
|
||||
# and if the endpoint is associated with a Layer 2 device port, add the
|
||||
# port as a second member
|
||||
ep = endpoints
|
||||
self.addChannelMember(ep.type, ep.id, 0)
|
||||
if ep.l2devport is not None:
|
||||
memId = "%s/%s" % (self.parent.getAttribute("id"), ep.l2devport)
|
||||
self.addChannelMember(ep.type, memId, 1)
|
||||
|
||||
|
||||
def addChannelMember(self, memIfcType, memIfcId, memIdx):
|
||||
'''
|
||||
add a member to a given channel
|
||||
'''
|
||||
|
||||
m = MemberElement(self.scenPlan,
|
||||
self,
|
||||
referencedType=memIfcType,
|
||||
referencedId=memIfcId,
|
||||
index=memIdx)
|
||||
self.scenPlan.allChannelMembers[memIfcId] = m
|
||||
|
||||
|
||||
|
||||
class InterfaceElement(NamedXmlElement):
|
||||
'''
|
||||
A network interface element
|
||||
'''
|
||||
def __init__(self, scenPlan, parent, devObj, ifcObj, ifcIdx=None):
|
||||
'''
|
||||
Create a network interface element with references to channel that this
|
||||
interface is used.
|
||||
'''
|
||||
elementName=None
|
||||
if ifcIdx is not None:
|
||||
elementName = "e%d" % ifcIdx
|
||||
else:
|
||||
elementName = ifcObj.name
|
||||
NamedXmlElement.__init__(self, scenPlan, parent, "interface", elementName)
|
||||
self.ifcObj = ifcObj
|
||||
self.addChannelReference()
|
||||
|
||||
def addChannelReference(self):
|
||||
'''
|
||||
Add a reference to the channel that uses this interface
|
||||
'''
|
||||
try:
|
||||
cm = self.scenPlan.allChannelMembers[self.id]
|
||||
if cm is not None:
|
||||
ch = cm.baseEle.parentNode
|
||||
if ch is not None:
|
||||
net = ch.parentNode
|
||||
if net is not None:
|
||||
MemberElement(self.scenPlan,
|
||||
self,
|
||||
referencedType=Attrib.MembType.CHANNEL,
|
||||
referencedId=ch.getAttribute("id"),
|
||||
index=int(cm.getAttribute("index")))
|
||||
MemberElement(self.scenPlan,
|
||||
self,
|
||||
referencedType=Attrib.MembType.NETWORK,
|
||||
referencedId=net.getAttribute("id"))
|
||||
except KeyError:
|
||||
pass # Not an error. This occurs when an interface belongs to a switch or a hub within a network and the channel is yet to be defined
|
||||
|
||||
|
||||
def addAddresses(self, ifcObj):
|
||||
'''
|
||||
Add MAC and IP addresses to interface XML elements.
|
||||
'''
|
||||
if ifcObj.hwaddr:
|
||||
h = self.createElement("address")
|
||||
self.appendChild(h)
|
||||
h.setAttribute("type", "mac")
|
||||
htxt = self.createTextNode("%s" % ifcObj.hwaddr)
|
||||
h.appendChild(htxt)
|
||||
for addr in ifcObj.addrlist:
|
||||
a = self.createElement("address")
|
||||
self.appendChild(a)
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
# mask = int(mask) XXX?
|
||||
if isIPv4Address(ip):
|
||||
a.setAttribute("type", "IPv4")
|
||||
else:
|
||||
a.setAttribute("type", "IPv6")
|
||||
|
||||
# a.setAttribute("type", )
|
||||
atxt = self.createTextNode("%s" % addr)
|
||||
a.appendChild(atxt)
|
||||
|
||||
|
||||
# XXX Remove?
|
||||
def addModels(self, configs):
|
||||
'''
|
||||
Add models from a list of model-class, config values tuples.
|
||||
'''
|
||||
for (m, conf) in configs:
|
||||
modelEle = self.createElement("model")
|
||||
modelEle.setAttribute("name", m._name)
|
||||
typeStr = "wireless"
|
||||
if m._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
typeStr = "mobility"
|
||||
modelEle.setAttribute("type", typeStr)
|
||||
for i, k in enumerate(m.getnames()):
|
||||
key = self.createElement(k)
|
||||
value = conf[i]
|
||||
if value is None:
|
||||
value = ""
|
||||
key.appendChild(self.createTextNode("%s" % value))
|
||||
modelEle.appendChild(key)
|
||||
self.appendChild(modelEle)
|
||||
|
||||
|
||||
class MemberElement(XmlElement):
|
||||
'''
|
||||
Member elements are references to other elements in the network plan elements of the scenario.
|
||||
They are used in networks to reference channels, in channels to reference interfaces,
|
||||
and in interfaces to reference networks/channels. Member elements provided allow bi-directional
|
||||
traversal of network plan components.
|
||||
'''
|
||||
def __init__(self, scenPlan, parent, referencedType, referencedId, index=None):
|
||||
'''
|
||||
Create a member element
|
||||
'''
|
||||
XmlElement.__init__(self, scenPlan.document, parent, "member")
|
||||
self.setAttribute("type", "%s" % referencedType)
|
||||
# See'Understanding the Network Modeling Framework document'
|
||||
if index is not None:
|
||||
self.setAttribute("index", "%d" % index)
|
||||
self.appendChild(self.createTextNode("%s" % referencedId))
|
||||
|
||||
|
||||
#
|
||||
# =======================================================================================
|
||||
# Helpers
|
||||
# =======================================================================================
|
||||
def getEndpoint(netObj, ifcObj):
|
||||
'''
|
||||
Create an Endpoint object given the network and the interface of interest
|
||||
'''
|
||||
ep = None
|
||||
l2devport=None
|
||||
|
||||
# if ifcObj references an interface of a node and is part of this network
|
||||
if ifcObj.net.objid == netObj.objid and hasattr(ifcObj,'node') and ifcObj.node:
|
||||
params = ifcObj.getparams()
|
||||
if isinstance(ifcObj.net, (nodes.HubNode, nodes.SwitchNode)):
|
||||
l2devport="%s/e%d" % (ifcObj.net.name, ifcObj.net.getifindex(ifcObj))
|
||||
ep = Endpoint(netObj,
|
||||
ifcObj,
|
||||
type = Attrib.MembType.INTERFACE,
|
||||
id="%s/%s" % (ifcObj.node.name, ifcObj.name),
|
||||
l2devport=l2devport,
|
||||
params=params)
|
||||
|
||||
# else if ifcObj references another node and is connected to this network
|
||||
elif hasattr(ifcObj,"othernet"):
|
||||
if ifcObj.othernet.objid == netObj.objid:
|
||||
# #hack used for upstream parameters for link between switches
|
||||
# #(see LxBrNet.linknet())
|
||||
ifcObj.swapparams('_params_up')
|
||||
params = ifcObj.getparams()
|
||||
ifcObj.swapparams('_params_up')
|
||||
owner = ifcObj.net
|
||||
l2devport="%s/e%d" % (ifcObj.othernet.name, ifcObj.othernet.getifindex(ifcObj))
|
||||
|
||||
# Create the endpoint.
|
||||
# XXX the interface index might not match what is shown in the gui. For switches and hubs,
|
||||
# The gui assigns its index but doesn't pass it to the daemon and vice versa.
|
||||
# The gui stores it's index in the IMN file, which it reads and writes without daemon intervention.
|
||||
# Fix this!
|
||||
ep = Endpoint(owner,
|
||||
ifcObj,
|
||||
type = Attrib.MembType.INTERFACE,
|
||||
id="%s/%s/e%d" % (netObj.name, owner.name, owner.getifindex(ifcObj)),
|
||||
l2devport=l2devport,
|
||||
params=params)
|
||||
# else this node has an interface that belongs to another network
|
||||
# i.e. a switch/hub interface connected to another switch/hub and CORE has the other switch/hub
|
||||
# as the containing network
|
||||
else :
|
||||
ep = Endpoint(netObj, ifcObj,type=None, id=None, l2devport=None, params=None)
|
||||
|
||||
|
||||
return ep
|
||||
|
||||
def getEndpoints(netObj):
|
||||
'''
|
||||
Gather all endpoints of the given network
|
||||
'''
|
||||
# Get all endpoints
|
||||
endpoints = []
|
||||
|
||||
# XXX TODO: How to represent physical interfaces.
|
||||
#
|
||||
# NOTE: The following code works except it would be missing physical (rj45) interfaces from Pt2pt links
|
||||
# TODO: Fix data in net.netifs to include Pt2Pt physical interfaces
|
||||
#
|
||||
# Iterate through all the nodes in the scenario, then iterate through all the interface for each node,
|
||||
# and check if the interface is connected to this network.
|
||||
|
||||
for ifcObj in netObj.netifs(sort=True):
|
||||
try:
|
||||
ep = getEndpoint(netObj, ifcObj)
|
||||
if ep is not None:
|
||||
endpoints.append(ep)
|
||||
except Exception:
|
||||
pass
|
||||
return endpoints
|
||||
|
||||
def getDowmstreamL2Devices(netObj):
|
||||
'''
|
||||
Helper function for getting a list of all downstream layer 2 devices from the given netObj
|
||||
'''
|
||||
l2devObjs = [netObj]
|
||||
allendpoints = []
|
||||
myendpoints = getEndpoints(netObj)
|
||||
allendpoints.extend(myendpoints)
|
||||
for ep in myendpoints:
|
||||
if ep.type and ep.net.objid != netObj.objid:
|
||||
l2s, eps = getDowmstreamL2Devices(ep.net)
|
||||
l2devObjs.extend(l2s)
|
||||
allendpoints.extend(eps)
|
||||
|
||||
return l2devObjs, allendpoints
|
||||
|
||||
|
||||
|
||||
def getAllNetworkInterfaces(session):
|
||||
'''
|
||||
Gather all network interfacecs in the session
|
||||
'''
|
||||
netifs = []
|
||||
for node in session.objs():
|
||||
for netif in node.netifs(sort=True):
|
||||
if netif not in netifs:
|
||||
netifs.append(netif)
|
||||
return netifs
|
||||
|
||||
def inOtherNetwork(netObj):
|
||||
'''
|
||||
Determine if CORE considers a given network object to be part of another network.
|
||||
Note: CORE considers layer 2 devices to be their own networks. However, if a l2 device
|
||||
is connected to another device, it is possible that one of its ports belong to the other
|
||||
l2 device's network (thus, "othernet").
|
||||
'''
|
||||
for netif in netObj.netifs(sort=True):
|
||||
if hasattr(netif,"othernet"):
|
||||
if netif.othernet.objid != netObj.objid:
|
||||
return True
|
||||
return False
|
File diff suppressed because it is too large
Load diff
|
@ -1,80 +1,89 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
nodes.py: definition of an LxcNode and CoreNode classes, and other node classes
|
||||
that inherit from the CoreNode, implementing specific node types.
|
||||
'''
|
||||
"""
|
||||
|
||||
from vnode import *
|
||||
from vnet import *
|
||||
from core.misc.ipaddr import *
|
||||
from core.api import coreapi
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
from socket import AF_INET
|
||||
from socket import AF_INET6
|
||||
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNetIf
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.coreobj import PyCoreObj
|
||||
from core.data import LinkData
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import ipaddress
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
from core.netns.vnet import GreTapBridge
|
||||
from core.netns.vnet import LxBrNet
|
||||
from core.netns.vnode import LxcNode
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class CtrlNet(LxBrNet):
|
||||
policy = "ACCEPT"
|
||||
CTRLIF_IDX_BASE = 99 # base control interface index
|
||||
DEFAULT_PREFIX_LIST = ["172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
|
||||
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
|
||||
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
|
||||
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24"]
|
||||
|
||||
def __init__(self, session, objid = "ctrlnet", name = None,
|
||||
verbose = False, prefix = None,
|
||||
hostid = None, start = True, assign_address = True,
|
||||
updown_script = None, serverintf = None):
|
||||
self.prefix = IPv4Prefix(prefix)
|
||||
# base control interface index
|
||||
CTRLIF_IDX_BASE = 99
|
||||
DEFAULT_PREFIX_LIST = [
|
||||
"172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
|
||||
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
|
||||
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
|
||||
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24"
|
||||
]
|
||||
|
||||
def __init__(self, session, objid="ctrlnet", name=None, prefix=None,
|
||||
hostid=None, start=True, assign_address=True,
|
||||
updown_script=None, serverintf=None):
|
||||
self.prefix = ipaddress.Ipv4Prefix(prefix)
|
||||
self.hostid = hostid
|
||||
self.assign_address = assign_address
|
||||
self.updown_script = updown_script
|
||||
self.serverintf = serverintf
|
||||
LxBrNet.__init__(self, session, objid = objid, name = name,
|
||||
verbose = verbose, start = start)
|
||||
LxBrNet.__init__(self, session, objid=objid, name=name, start=start)
|
||||
|
||||
def startup(self):
|
||||
if self.detectoldbridge():
|
||||
return
|
||||
|
||||
|
||||
LxBrNet.startup(self)
|
||||
if self.hostid:
|
||||
addr = self.prefix.addr(self.hostid)
|
||||
else:
|
||||
addr = self.prefix.maxaddr()
|
||||
addr = self.prefix.max_addr()
|
||||
msg = "Added control network bridge: %s %s" % \
|
||||
(self.brname, self.prefix)
|
||||
(self.brname, self.prefix)
|
||||
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
|
||||
if self.assign_address:
|
||||
self.addrconfig(addrlist = addrlist)
|
||||
self.addrconfig(addrlist=addrlist)
|
||||
msg += " address %s" % addr
|
||||
self.session.info(msg)
|
||||
logger.info(msg)
|
||||
if self.updown_script is not None:
|
||||
self.info("interface %s updown script '%s startup' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
check_call([self.updown_script, self.brname, "startup"])
|
||||
logger.info("interface %s updown script '%s startup' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
subprocess.check_call([self.updown_script, self.brname, "startup"])
|
||||
if self.serverintf is not None:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addif", self.brname, self.serverintf])
|
||||
check_call([IP_BIN, "link", "set", self.serverintf, "up"])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname,
|
||||
"Error joining server interface %s to controlnet bridge %s: %s" % \
|
||||
(self.serverintf, self.brname, e))
|
||||
|
||||
subprocess.check_call([constants.BRCTL_BIN, "addif", self.brname, self.serverintf])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.serverintf, "up"])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error joining server interface %s to controlnet bridge %s",
|
||||
self.serverintf, self.brname)
|
||||
|
||||
def detectoldbridge(self):
|
||||
''' Occassionally, control net bridges from previously closed sessions are not cleaned up.
|
||||
"""
|
||||
Occassionally, control net bridges from previously closed sessions are not cleaned up.
|
||||
Check if there are old control net bridges and delete them
|
||||
'''
|
||||
retstat, retstr = cmdresult([BRCTL_BIN,'show'])
|
||||
"""
|
||||
retstat, retstr = utils.cmdresult([constants.BRCTL_BIN, 'show'])
|
||||
if retstat != 0:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, None,
|
||||
"Unable to retrieve list of installed bridges")
|
||||
logger.error("Unable to retrieve list of installed bridges")
|
||||
lines = retstr.split('\n')
|
||||
for line in lines[1:]:
|
||||
cols = line.split('\t')
|
||||
|
@ -82,46 +91,47 @@ class CtrlNet(LxBrNet):
|
|||
flds = cols[0].split('.')
|
||||
if len(flds) == 3:
|
||||
if flds[0] == 'b' and flds[1] == self.objid:
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "CtrlNet.startup()", None,
|
||||
"Error: An active control net bridge (%s) found. "\
|
||||
"An older session might still be running. " \
|
||||
"Stop all sessions and, if needed, delete %s to continue." % \
|
||||
(oldbr, oldbr))
|
||||
logger.error(
|
||||
"Error: An active control net bridge (%s) found. " \
|
||||
"An older session might still be running. " \
|
||||
"Stop all sessions and, if needed, delete %s to continue." % \
|
||||
(oldbr, oldbr)
|
||||
)
|
||||
return True
|
||||
'''
|
||||
"""
|
||||
# Do this if we want to delete the old bridge
|
||||
self.warn("Warning: Old %s bridge found: %s" % (self.objid, oldbr))
|
||||
logger.warn("Warning: Old %s bridge found: %s" % (self.objid, oldbr))
|
||||
try:
|
||||
check_call([BRCTL_BIN, 'delbr', oldbr])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, oldbr,
|
||||
"Error deleting old bridge %s" % oldbr)
|
||||
self.info("Deleted %s" % oldbr)
|
||||
'''
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.exception("Error deleting old bridge %s", oldbr, e)
|
||||
logger.info("Deleted %s", oldbr)
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
def shutdown(self):
|
||||
if self.serverintf is not None:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "delif", self.brname, self.serverintf])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error deleting server interface %s to controlnet bridge %s: %s" % \
|
||||
(self.serverintf, self.brname, e))
|
||||
|
||||
subprocess.check_call([constants.BRCTL_BIN, "delif", self.brname, self.serverintf])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error deleting server interface %s to controlnet bridge %s",
|
||||
self.serverintf, self.brname)
|
||||
|
||||
if self.updown_script is not None:
|
||||
self.info("interface %s updown script '%s shutdown' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
check_call([self.updown_script, self.brname, "shutdown"])
|
||||
logger.info("interface %s updown script '%s shutdown' called" % (self.brname, self.updown_script))
|
||||
subprocess.check_call([self.updown_script, self.brname, "shutdown"])
|
||||
LxBrNet.shutdown(self)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Do not include CtrlNet in link messages describing this session.
|
||||
'''
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Do not include CtrlNet in link messages describing this session.
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class CoreNode(LxcNode):
|
||||
apitype = coreapi.CORE_NODE_DEF
|
||||
apitype = NodeTypes.DEFAULT.value
|
||||
|
||||
|
||||
class PtpNet(LxBrNet):
|
||||
policy = "ACCEPT"
|
||||
|
@ -129,127 +139,148 @@ class PtpNet(LxBrNet):
|
|||
def attach(self, netif):
|
||||
if len(self._netif) > 1:
|
||||
raise ValueError, \
|
||||
"Point-to-point links support at most 2 network interfaces"
|
||||
"Point-to-point links support at most 2 network interfaces"
|
||||
LxBrNet.attach(self, netif)
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
''' Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
'''
|
||||
def data(self, message_type):
|
||||
"""
|
||||
Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
"""
|
||||
pass
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
'''
|
||||
tlvdata = ""
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
"""
|
||||
|
||||
all_links = []
|
||||
|
||||
if len(self._netif) != 2:
|
||||
return tlvdata
|
||||
(if1, if2) = self._netif.items()
|
||||
return all_links
|
||||
|
||||
if1, if2 = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if2.node.objid)
|
||||
uni = False
|
||||
|
||||
unidirectional = 0
|
||||
if if1.getparams() != if2.getparams():
|
||||
uni = True
|
||||
tlvdata += self.netifparamstolink(if1)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
if uni:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, 1)
|
||||
unidirectional = 1
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
if if1.hwaddr:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1MAC,
|
||||
if1.hwaddr)
|
||||
for addr in if1.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
interface1_ip4 = None
|
||||
interface1_ip4_mask = None
|
||||
interface1_ip6 = None
|
||||
interface1_ip6_mask = None
|
||||
for address in if1.addrlist:
|
||||
ip, sep, mask = address.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface1_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface1_ip6_mask = mask
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
if if2.hwaddr:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2MAC,
|
||||
if2.hwaddr)
|
||||
for addr in if2.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
interface2_ip4 = None
|
||||
interface2_ip4_mask = None
|
||||
interface2_ip6 = None
|
||||
interface2_ip6_mask = None
|
||||
for address in if2.addrlist:
|
||||
ip, sep, mask = address.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
if not uni:
|
||||
return [msg,]
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip6_mask = mask
|
||||
|
||||
# TODO: not currently used
|
||||
# loss=netif.getparam('loss')
|
||||
link_data = LinkData(
|
||||
message_type=flags,
|
||||
node1_id=if1.node.objid,
|
||||
node2_id=if2.node.objid,
|
||||
link_type=self.linktype,
|
||||
unidirectional=unidirectional,
|
||||
delay=if1.getparam("delay"),
|
||||
bandwidth=if1.getparam("bw"),
|
||||
dup=if1.getparam("duplicate"),
|
||||
jitter=if1.getparam("jitter"),
|
||||
interface1_id=if1.node.getifindex(if1),
|
||||
interface1_mac=if1.hwaddr,
|
||||
interface1_ip4=interface1_ip4,
|
||||
interface1_ip4_mask=interface1_ip4_mask,
|
||||
interface1_ip6=interface1_ip6,
|
||||
interface1_ip6_mask=interface1_ip6_mask,
|
||||
interface2_id=if2.node.getifindex(if2),
|
||||
interface2_mac=if2.hwaddr,
|
||||
interface2_ip4=interface2_ip4,
|
||||
interface2_ip4_mask=interface2_ip4_mask,
|
||||
interface2_ip6=interface2_ip6,
|
||||
interface2_ip6_mask=interface2_ip6_mask,
|
||||
)
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
# build a 2nd link message for the upstream link parameters
|
||||
# (swap if1 and if2)
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if2.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += self.netifparamstolink(if2)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, 1)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
msg2 = coreapi.CoreLinkMessage.pack(0, tlvdata)
|
||||
return [msg, msg2]
|
||||
if unidirectional:
|
||||
link_data = LinkData(
|
||||
message_type=0,
|
||||
node1_id=if2.node.objid,
|
||||
node2_id=if1.node.objid,
|
||||
delay=if1.getparam("delay"),
|
||||
bandwidth=if1.getparam("bw"),
|
||||
dup=if1.getparam("duplicate"),
|
||||
jitter=if1.getparam("jitter"),
|
||||
unidirectional=1,
|
||||
interface1_id=if2.node.getifindex(if2),
|
||||
interface2_id=if1.node.getifindex(if1)
|
||||
)
|
||||
all_links.append(link_data)
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class SwitchNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_SWITCH
|
||||
apitype = NodeTypes.SWITCH.value
|
||||
policy = "ACCEPT"
|
||||
type = "lanswitch"
|
||||
|
||||
|
||||
class HubNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_HUB
|
||||
apitype = NodeTypes.HUB.value
|
||||
policy = "ACCEPT"
|
||||
type = "hub"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
''' the Hub node forwards packets to all bridge ports by turning off
|
||||
the MAC address learning
|
||||
'''
|
||||
LxBrNet.__init__(self, session, objid, name, verbose, start)
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
the Hub node forwards packets to all bridge ports by turning off
|
||||
the MAC address learning
|
||||
"""
|
||||
LxBrNet.__init__(self, session, objid, name, start)
|
||||
if start:
|
||||
check_call([BRCTL_BIN, "setageing", self.brname, "0"])
|
||||
subprocess.check_call([constants.BRCTL_BIN, "setageing", self.brname, "0"])
|
||||
|
||||
|
||||
class WlanNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
apitype = NodeTypes.WIRELESS_LAN.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
policy = "DROP"
|
||||
type = "wlan"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
LxBrNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
def __init__(self, session, objid=None, name=None, start=True, policy=None):
|
||||
LxBrNet.__init__(self, session, objid, name, start, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
|
@ -258,72 +289,75 @@ class WlanNode(LxBrNet):
|
|||
def attach(self, netif):
|
||||
LxBrNet.attach(self, netif)
|
||||
if self.model:
|
||||
netif.poshook = self.model._positioncallback
|
||||
netif.poshook = self.model.position_callback
|
||||
if netif.node is None:
|
||||
return
|
||||
(x,y,z) = netif.node.position.get()
|
||||
(x, y, z) = netif.node.position.get()
|
||||
# invokes any netif.poshook
|
||||
netif.setposition(x, y, z)
|
||||
#self.model.setlinkparams()
|
||||
# self.model.setlinkparams()
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' Mobility and wireless model.
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
if self.model._positioncallback:
|
||||
"""
|
||||
Sets the mobility and wireless model.
|
||||
|
||||
:param core.mobility.WirelessModel.cls model: wireless model to set to
|
||||
:param config:
|
||||
:return:
|
||||
"""
|
||||
logger.info("adding model %s" % model.name)
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
self.model = model(session=self.session, object_id=self.objid, values=config)
|
||||
if self.model.position_callback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
netif.poshook = self.model.position_callback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
(x, y, z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
|
||||
def updatemodel(self, model_name, values):
|
||||
''' Allow for model updates during runtime (similar to setmodel().)
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("updating model %s" % model_name)
|
||||
if self.model is None or self.model._name != model_name:
|
||||
"""
|
||||
Allow for model updates during runtime (similar to setmodel().)
|
||||
"""
|
||||
logger.info("updating model %s" % model_name)
|
||||
if self.model is None or self.model.name != model_name:
|
||||
return
|
||||
model = self.model
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
if not model.updateconfig(values):
|
||||
return
|
||||
if self.model._positioncallback:
|
||||
if self.model.position_callback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
netif.poshook = self.model.position_callback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
(x, y, z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
msgs = LxBrNet.tolinkmsgs(self, flags)
|
||||
def all_link_data(self, flags):
|
||||
all_links = LxBrNet.all_link_data(self, flags)
|
||||
|
||||
if self.model:
|
||||
msgs += self.model.tolinkmsgs(flags)
|
||||
return msgs
|
||||
all_links.extend(self.model.all_link_data(flags))
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class RJ45Node(PyCoreNode, PyCoreNetIf):
|
||||
''' RJ45Node is a physical interface on the host linked to the emulated
|
||||
network.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_RJ45
|
||||
"""
|
||||
RJ45Node is a physical interface on the host linked to the emulated
|
||||
network.
|
||||
"""
|
||||
apitype = NodeTypes.RJ45.value
|
||||
type = "rj45"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, mtu = 1500,
|
||||
verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
def __init__(self, session, objid=None, name=None, mtu=1500, start=True):
|
||||
PyCoreNode.__init__(self, session, objid, name, start=start)
|
||||
# this initializes net, params, poshook
|
||||
PyCoreNetIf.__init__(self, node=self, name=name, mtu = mtu)
|
||||
PyCoreNetIf.__init__(self, node=self, name=name, mtu=mtu)
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self.ifindex = None
|
||||
|
@ -334,27 +368,28 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
|
|||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
''' Set the interface in the up state.
|
||||
'''
|
||||
"""
|
||||
Set the interface in the up state.
|
||||
"""
|
||||
# interface will also be marked up during net.attach()
|
||||
self.savestate()
|
||||
try:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
except:
|
||||
self.warn("Failed to run command: %s link set %s up" % \
|
||||
(IP_BIN, self.localname))
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("failed to run command: %s link set %s up", constants.IP_BIN, self.localname)
|
||||
return
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
''' Bring the interface down. Remove any addresses and queuing
|
||||
disciplines.
|
||||
'''
|
||||
"""
|
||||
Bring the interface down. Remove any addresses and queuing
|
||||
disciplines.
|
||||
"""
|
||||
if not self.up:
|
||||
return
|
||||
check_call([IP_BIN, "link", "set", self.localname, "down"])
|
||||
check_call([IP_BIN, "addr", "flush", "dev", self.localname])
|
||||
mutecall([TC_BIN, "qdisc", "del", "dev", self.localname, "root"])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "down"])
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "flush", "dev", self.localname])
|
||||
utils.mutecall([constants.TC_BIN, "qdisc", "del", "dev", self.localname, "root"])
|
||||
self.up = False
|
||||
self.restorestate()
|
||||
|
||||
|
@ -364,25 +399,27 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
|
|||
def detachnet(self):
|
||||
PyCoreNetIf.detachnet(self)
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
''' This is called when linking with another node. Since this node
|
||||
represents an interface, we do not create another object here,
|
||||
but attach ourselves to the given network.
|
||||
'''
|
||||
def newnetif(self, net=None, addrlist=[], hwaddr=None,
|
||||
ifindex=None, ifname=None):
|
||||
"""
|
||||
This is called when linking with another node. Since this node
|
||||
represents an interface, we do not create another object here,
|
||||
but attach ourselves to the given network.
|
||||
"""
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if self.net is not None:
|
||||
raise ValueError, \
|
||||
"RJ45 nodes support at most 1 network interface"
|
||||
"RJ45 nodes support at most 1 network interface"
|
||||
self._netif[ifindex] = self
|
||||
self.node = self # PyCoreNetIf.node is self
|
||||
# PyCoreNetIf.node is self
|
||||
self.node = self
|
||||
self.ifindex = ifindex
|
||||
if net is not None:
|
||||
self.attachnet(net)
|
||||
for addr in maketuple(addrlist):
|
||||
for addr in utils.maketuple(addrlist):
|
||||
self.addaddr(addr)
|
||||
return ifindex
|
||||
finally:
|
||||
|
@ -400,10 +437,11 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
|
|||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
|
||||
def netif(self, ifindex, net=None):
|
||||
''' This object is considered the network interface, so we only
|
||||
return self here. This keeps the RJ45Node compatible with
|
||||
real nodes.
|
||||
'''
|
||||
"""
|
||||
This object is considered the network interface, so we only
|
||||
return self here. This keeps the RJ45Node compatible with
|
||||
real nodes.
|
||||
"""
|
||||
if net is not None and net == self.net:
|
||||
return self
|
||||
if ifindex is None:
|
||||
|
@ -419,27 +457,28 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
|
|||
|
||||
def addaddr(self, addr):
|
||||
if self.up:
|
||||
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
PyCoreNetIf.addaddr(self, addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
if self.up:
|
||||
check_call([IP_BIN, "addr", "del", str(addr), "dev", self.name])
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "del", str(addr), "dev", self.name])
|
||||
PyCoreNetIf.deladdr(self, addr)
|
||||
|
||||
def savestate(self):
|
||||
''' Save the addresses and other interface state before using the
|
||||
"""
|
||||
Save the addresses and other interface state before using the
|
||||
interface for emulation purposes. TODO: save/restore the PROMISC flag
|
||||
'''
|
||||
"""
|
||||
self.old_up = False
|
||||
self.old_addrs = []
|
||||
cmd = [IP_BIN, "addr", "show", "dev", self.localname]
|
||||
cmd = [constants.IP_BIN, "addr", "show", "dev", self.localname]
|
||||
try:
|
||||
tmp = subprocess.Popen(cmd, stdout = subprocess.PIPE)
|
||||
tmp = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
except OSError:
|
||||
self.warn("Failed to run %s command: %s" % (IP_BIN, cmd))
|
||||
logger.exception("Failed to run %s command: %s", constants.IP_BIN, cmd)
|
||||
if tmp.wait():
|
||||
self.warn("Command failed: %s" % cmd)
|
||||
logger.warn("Command failed: %s", cmd)
|
||||
return
|
||||
lines = tmp.stdout.read()
|
||||
tmp.stdout.close()
|
||||
|
@ -459,31 +498,27 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
|
|||
self.old_addrs.append((items[1], None))
|
||||
|
||||
def restorestate(self):
|
||||
''' Restore the addresses and other interface state after using it.
|
||||
'''
|
||||
"""
|
||||
Restore the addresses and other interface state after using it.
|
||||
"""
|
||||
for addr in self.old_addrs:
|
||||
if addr[1] is None:
|
||||
check_call([IP_BIN, "addr", "add", addr[0], "dev",
|
||||
self.localname])
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "add", addr[0], "dev", self.localname])
|
||||
else:
|
||||
check_call([IP_BIN, "addr", "add", addr[0], "brd", addr[1],
|
||||
"dev", self.localname])
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "add", addr[0], "brd", addr[1], "dev", self.localname])
|
||||
if self.old_up:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
''' Use setposition() from both parent classes.
|
||||
'''
|
||||
"""
|
||||
Use setposition() from both parent classes.
|
||||
"""
|
||||
PyCoreObj.setposition(self, x, y, z)
|
||||
# invoke any poshook
|
||||
PyCoreNetIf.setposition(self, x, y, z)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class TunnelNode(GreTapBridge):
|
||||
apitype = coreapi.CORE_NODE_TUNNEL
|
||||
apitype = NodeTypes.TUNNEL.value
|
||||
policy = "ACCEPT"
|
||||
type = "tunnel"
|
||||
|
||||
|
|
741
daemon/core/netns/openvswitch.py
Normal file
741
daemon/core/netns/openvswitch.py
Normal file
|
@ -0,0 +1,741 @@
|
|||
"""
|
||||
TODO: probably goes away, or implement the usage of "unshare", or docker formal.
|
||||
"""
|
||||
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
from socket import AF_INET
|
||||
from socket import AF_INET6
|
||||
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.data import LinkData
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import ipaddress
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
from core.netns.vif import GreTap
|
||||
from core.netns.vif import VEth
|
||||
from core.netns.vnet import EbtablesQueue
|
||||
from core.netns.vnet import GreTapBridge
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
# a global object because all WLANs share the same queue
|
||||
# cannot have multiple threads invoking the ebtables commnd
|
||||
ebtables_queue = EbtablesQueue()
|
||||
|
||||
ebtables_lock = threading.Lock()
|
||||
|
||||
utils.check_executables([
|
||||
constants.IP_BIN,
|
||||
constants.EBTABLES_BIN,
|
||||
constants.TC_BIN
|
||||
])
|
||||
|
||||
|
||||
def ebtables_commands(call, commands):
|
||||
ebtables_lock.acquire()
|
||||
try:
|
||||
for command in commands:
|
||||
call(command)
|
||||
finally:
|
||||
ebtables_lock.release()
|
||||
|
||||
|
||||
class OvsNet(PyCoreNet):
|
||||
"""
|
||||
Used to be LxBrNet.
|
||||
|
||||
Base class for providing Openvswitch functionality to objects that create bridges.
|
||||
"""
|
||||
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True, policy=None):
|
||||
"""
|
||||
Creates an OvsNet instance.
|
||||
|
||||
:param core.session.Session session: session this object is a part of
|
||||
:param objid:
|
||||
:param name:
|
||||
:param start:
|
||||
:param policy:
|
||||
:return:
|
||||
"""
|
||||
|
||||
PyCoreNet.__init__(self, session, objid, name, start)
|
||||
|
||||
if policy:
|
||||
self.policy = policy
|
||||
else:
|
||||
self.policy = self.__class__.policy
|
||||
|
||||
session_id = self.session.short_session_id()
|
||||
self.bridge_name = "b.%s.%s" % (str(self.objid), session_id)
|
||||
self.up = False
|
||||
|
||||
if start:
|
||||
self.startup()
|
||||
ebtables_queue.startupdateloop(self)
|
||||
|
||||
def startup(self):
|
||||
try:
|
||||
subprocess.check_call([constants.OVS_BIN, "add-br", self.bridge_name])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("error adding bridge")
|
||||
|
||||
try:
|
||||
# turn off spanning tree protocol and forwarding delay
|
||||
# TODO: appears stp and rstp are off by default, make sure this always holds true
|
||||
# TODO: apears ovs only supports rstp forward delay and again it's off by default
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.bridge_name, "up"])
|
||||
|
||||
# create a new ebtables chain for this bridge
|
||||
ebtables_commands(subprocess.check_call, [
|
||||
[constants.EBTABLES_BIN, "-N", self.bridge_name, "-P", self.policy],
|
||||
[constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name]
|
||||
])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error setting bridge parameters")
|
||||
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
logger.info("exiting shutdown, object is not up")
|
||||
return
|
||||
|
||||
ebtables_queue.stopupdateloop(self)
|
||||
|
||||
utils.mutecall([constants.IP_BIN, "link", "set", self.bridge_name, "down"])
|
||||
utils.mutecall([constants.OVS_BIN, "del-br", self.bridge_name])
|
||||
|
||||
ebtables_commands(utils.mutecall, [
|
||||
[constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name],
|
||||
[constants.EBTABLES_BIN, "-X", self.bridge_name]
|
||||
])
|
||||
|
||||
for interface in self.netifs():
|
||||
# removes veth pairs used for bridge-to-bridge connections
|
||||
interface.shutdown()
|
||||
|
||||
self._netif.clear()
|
||||
self._linked.clear()
|
||||
del self.session
|
||||
self.up = False
|
||||
|
||||
def attach(self, interface):
|
||||
if self.up:
|
||||
try:
|
||||
subprocess.check_call([constants.OVS_BIN, "add-port", self.bridge_name, interface.localname])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", interface.localname, "up"])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("error joining interface %s to bridge %s", interface.localname, self.bridge_name)
|
||||
return
|
||||
|
||||
PyCoreNet.attach(self, interface)
|
||||
|
||||
def detach(self, interface):
|
||||
if self.up:
|
||||
try:
|
||||
subprocess.check_call([constants.OVS_BIN, "del-port", self.bridge_name, interface.localname])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("error removing interface %s from bridge %s", interface.localname, self.bridge_name)
|
||||
return
|
||||
|
||||
PyCoreNet.detach(self, interface)
|
||||
|
||||
def linked(self, interface_one, interface_two):
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[interface_one.netifi] != interface_one:
|
||||
raise ValueError("inconsistency for interface %s" % interface_one.name)
|
||||
|
||||
if self._netif[interface_two.netifi] != interface_two:
|
||||
raise ValueError("inconsistency for interface %s" % interface_two.name)
|
||||
|
||||
try:
|
||||
linked = self._linked[interface_one][interface_two]
|
||||
except KeyError:
|
||||
if self.policy == "ACCEPT":
|
||||
linked = True
|
||||
elif self.policy == "DROP":
|
||||
linked = False
|
||||
else:
|
||||
raise ValueError("unknown policy: %s" % self.policy)
|
||||
|
||||
self._linked[interface_one][interface_two] = linked
|
||||
|
||||
return linked
|
||||
|
||||
def unlink(self, interface_one, interface_two):
|
||||
"""
|
||||
Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
|
||||
filtering rules.
|
||||
"""
|
||||
with self._linked_lock:
|
||||
if not self.linked(interface_one, interface_two):
|
||||
return
|
||||
|
||||
self._linked[interface_one][interface_two] = False
|
||||
|
||||
ebtables_queue.ebchange(self)
|
||||
|
||||
def link(self, interface_one, interface_two):
|
||||
"""
|
||||
Link two PyCoreNetIfs together, resulting in adding or removing
|
||||
ebtables filtering rules.
|
||||
"""
|
||||
with self._linked_lock:
|
||||
if self.linked(interface_one, interface_two):
|
||||
return
|
||||
|
||||
self._linked[interface_one][interface_two] = True
|
||||
|
||||
ebtables_queue.ebchange(self)
|
||||
|
||||
def linkconfig(self, interface, bw=None, delay=None, loss=None, duplicate=None,
|
||||
jitter=None, netif2=None, devname=None):
|
||||
"""
|
||||
Configure link parameters by applying tc queuing disciplines on the
|
||||
interface.
|
||||
"""
|
||||
if not devname:
|
||||
devname = interface.localname
|
||||
|
||||
tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname]
|
||||
parent = ["root"]
|
||||
|
||||
# attempt to set bandwidth and update as needed if value changed
|
||||
bandwidth_changed = interface.setparam("bw", bw)
|
||||
if bandwidth_changed:
|
||||
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
burst = max(2 * interface.mtu, bw / 1000)
|
||||
limit = 0xffff # max IP payload
|
||||
tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)]
|
||||
logger.info("linkconfig: %s" % [tc + parent + ["handle", "1:"] + tbf])
|
||||
subprocess.check_call(tc + parent + ["handle", "1:"] + tbf)
|
||||
interface.setparam("has_tbf", True)
|
||||
elif interface.getparam("has_tbf") and bw <= 0:
|
||||
tcd = [] + tc
|
||||
tcd[2] = "delete"
|
||||
|
||||
if self.up:
|
||||
subprocess.check_call(tcd + parent)
|
||||
|
||||
interface.setparam("has_tbf", False)
|
||||
# removing the parent removes the child
|
||||
interface.setparam("has_netem", False)
|
||||
|
||||
if interface.getparam("has_tbf"):
|
||||
parent = ["parent", "1:1"]
|
||||
|
||||
netem = ["netem"]
|
||||
delay_changed = interface.setparam("delay", delay)
|
||||
|
||||
if loss is not None:
|
||||
loss = float(loss)
|
||||
loss_changed = interface.setparam("loss", loss)
|
||||
|
||||
if duplicate is not None:
|
||||
duplicate = float(duplicate)
|
||||
duplicate_changed = interface.setparam("duplicate", duplicate)
|
||||
jitter_changed = interface.setparam("jitter", jitter)
|
||||
|
||||
# if nothing changed return
|
||||
if not any([bandwidth_changed, delay_changed, loss_changed, duplicate_changed, jitter_changed]):
|
||||
return
|
||||
|
||||
# jitter and delay use the same delay statement
|
||||
if delay is not None:
|
||||
netem += ["delay", "%sus" % delay]
|
||||
else:
|
||||
netem += ["delay", "0us"]
|
||||
|
||||
if jitter is not None:
|
||||
netem += ["%sus" % jitter, "25%"]
|
||||
|
||||
if loss is not None:
|
||||
netem += ["loss", "%s%%" % min(loss, 100)]
|
||||
|
||||
if duplicate is not None:
|
||||
netem += ["duplicate", "%s%%" % min(duplicate, 100)]
|
||||
|
||||
if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0:
|
||||
# possibly remove netem if it exists and parent queue wasn"t removed
|
||||
if not interface.getparam("has_netem"):
|
||||
return
|
||||
|
||||
tc[2] = "delete"
|
||||
|
||||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
|
||||
subprocess.check_call(tc + parent + ["handle", "10:"])
|
||||
interface.setparam("has_netem", False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
|
||||
subprocess.check_call(tc + parent + ["handle", "10:"] + netem)
|
||||
interface.setparam("has_netem", True)
|
||||
|
||||
def linknet(self, network):
|
||||
"""
|
||||
Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
"""
|
||||
session_id = self.session.short_session_id()
|
||||
|
||||
try:
|
||||
self_objid = "%x" % self.objid
|
||||
except TypeError:
|
||||
self_objid = "%s" % self.objid
|
||||
|
||||
try:
|
||||
net_objid = "%x" % network.objid
|
||||
except TypeError:
|
||||
net_objid = "%s" % network.objid
|
||||
|
||||
localname = "veth%s.%s.%s" % (self_objid, net_objid, session_id)
|
||||
|
||||
if len(localname) >= 16:
|
||||
raise ValueError("interface local name %s too long" % localname)
|
||||
|
||||
name = "veth%s.%s.%s" % (net_objid, self_objid, session_id)
|
||||
if len(name) >= 16:
|
||||
raise ValueError("interface name %s too long" % name)
|
||||
|
||||
interface = VEth(node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up)
|
||||
self.attach(interface)
|
||||
if network.up:
|
||||
# this is similar to net.attach() but uses netif.name instead
|
||||
# of localname
|
||||
subprocess.check_call([constants.OVS_BIN, "add-port", network.brname, interface.name])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", interface.name, "up"])
|
||||
|
||||
# TODO: is there a native method for this? see if this causes issues
|
||||
# i = network.newifindex()
|
||||
# network._netif[i] = interface
|
||||
# with network._linked_lock:
|
||||
# network._linked[interface] = {}
|
||||
# this method call is equal to the above, with a interface.netifi = call
|
||||
network.attach(interface)
|
||||
|
||||
interface.net = self
|
||||
interface.othernet = network
|
||||
return interface
|
||||
|
||||
def getlinknetif(self, network):
|
||||
"""
|
||||
Return the interface of that links this net with another net
|
||||
(that were linked using linknet()).
|
||||
"""
|
||||
for interface in self.netifs():
|
||||
if hasattr(interface, "othernet") and interface.othernet == network:
|
||||
return interface
|
||||
|
||||
return None
|
||||
|
||||
def addrconfig(self, addresses):
|
||||
"""
|
||||
Set addresses on the bridge.
|
||||
"""
|
||||
if not self.up:
|
||||
return
|
||||
|
||||
for address in addresses:
|
||||
try:
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "add", str(address), "dev", self.bridge_name])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("error adding IP address")
|
||||
|
||||
|
||||
class OvsCtrlNet(OvsNet):
|
||||
policy = "ACCEPT"
|
||||
CTRLIF_IDX_BASE = 99 # base control interface index
|
||||
DEFAULT_PREFIX_LIST = [
|
||||
"172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
|
||||
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
|
||||
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
|
||||
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24"
|
||||
]
|
||||
|
||||
def __init__(self, session, objid="ctrlnet", name=None, prefix=None, hostid=None,
|
||||
start=True, assign_address=True, updown_script=None, serverintf=None):
|
||||
OvsNet.__init__(self, session, objid=objid, name=name, start=start)
|
||||
self.prefix = ipaddress.Ipv4Prefix(prefix)
|
||||
self.hostid = hostid
|
||||
self.assign_address = assign_address
|
||||
self.updown_script = updown_script
|
||||
self.serverintf = serverintf
|
||||
|
||||
def startup(self):
|
||||
if self.detectoldbridge():
|
||||
return
|
||||
|
||||
OvsNet.startup(self)
|
||||
if self.hostid:
|
||||
addr = self.prefix.addr(self.hostid)
|
||||
else:
|
||||
addr = self.prefix.max_addr()
|
||||
|
||||
message = "Added control network bridge: %s %s" % (self.bridge_name, self.prefix)
|
||||
addresses = ["%s/%s" % (addr, self.prefix.prefixlen)]
|
||||
if self.assign_address:
|
||||
self.addrconfig(addresses=addresses)
|
||||
message += " address %s" % addr
|
||||
logger.info(message)
|
||||
|
||||
if self.updown_script:
|
||||
logger.info("interface %s updown script %s startup called" % (self.bridge_name, self.updown_script))
|
||||
subprocess.check_call([self.updown_script, self.bridge_name, "startup"])
|
||||
|
||||
if self.serverintf:
|
||||
try:
|
||||
subprocess.check_call([constants.OVS_BIN, "add-port", self.bridge_name, self.serverintf])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.serverintf, "up"])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("error joining server interface %s to controlnet bridge %s",
|
||||
self.serverintf, self.bridge_name)
|
||||
|
||||
def detectoldbridge(self):
|
||||
"""
|
||||
Occassionally, control net bridges from previously closed sessions are not cleaned up.
|
||||
Check if there are old control net bridges and delete them
|
||||
"""
|
||||
|
||||
status, output = utils.cmdresult([constants.OVS_BIN, "list-br"])
|
||||
output = output.strip()
|
||||
if output:
|
||||
for line in output.split("\n"):
|
||||
bride_name = line.split(".")
|
||||
if bride_name[0] == "b" and bride_name[1] == self.objid:
|
||||
logger.error("older session may still be running with conflicting id for bridge: %s", line)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def shutdown(self):
|
||||
if self.serverintf:
|
||||
try:
|
||||
subprocess.check_call([constants.OVS_BIN, "del-port", self.bridge_name, self.serverintf])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error deleting server interface %s to controlnet bridge %s",
|
||||
self.serverintf, self.bridge_name)
|
||||
|
||||
if self.updown_script:
|
||||
logger.info("interface %s updown script '%s shutdown' called", self.bridge_name, self.updown_script)
|
||||
subprocess.check_call([self.updown_script, self.bridge_name, "shutdown"])
|
||||
|
||||
OvsNet.shutdown(self)
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Do not include CtrlNet in link messages describing this session.
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class OvsPtpNet(OvsNet):
|
||||
policy = "ACCEPT"
|
||||
|
||||
def attach(self, interface):
|
||||
if len(self._netif) >= 2:
|
||||
raise ValueError("point-to-point links support at most 2 network interfaces")
|
||||
OvsNet.attach(self, interface)
|
||||
|
||||
def data(self, message_type):
|
||||
"""
|
||||
Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
"""
|
||||
pass
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build CORE API TLVs for a point-to-point link. One Link message describes this network.
|
||||
"""
|
||||
|
||||
all_links = []
|
||||
|
||||
if len(self._netif) != 2:
|
||||
return all_links
|
||||
|
||||
if1, if2 = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
|
||||
unidirectional = 0
|
||||
if if1.getparams() != if2.getparams():
|
||||
unidirectional = 1
|
||||
|
||||
interface1_ip4 = None
|
||||
interface1_ip4_mask = None
|
||||
interface1_ip6 = None
|
||||
interface1_ip6_mask = None
|
||||
for address in if1.addrlist:
|
||||
ip, sep, mask = address.partition('/')
|
||||
mask = int(mask)
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface1_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface1_ip6_mask = mask
|
||||
|
||||
interface2_ip4 = None
|
||||
interface2_ip4_mask = None
|
||||
interface2_ip6 = None
|
||||
interface2_ip6_mask = None
|
||||
for address in if2.addrlist:
|
||||
ip, sep, mask = address.partition('/')
|
||||
mask = int(mask)
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip6_mask = mask
|
||||
|
||||
# TODO: not currently used
|
||||
# loss=netif.getparam('loss')
|
||||
link_data = LinkData(
|
||||
message_type=flags,
|
||||
node1_id=if1.node.objid,
|
||||
node2_id=if2.node.objid,
|
||||
link_type=self.linktype,
|
||||
unidirectional=unidirectional,
|
||||
delay=if1.getparam("delay"),
|
||||
bandwidth=if1.getparam("bw"),
|
||||
dup=if1.getparam("duplicate"),
|
||||
jitter=if1.getparam("jitter"),
|
||||
interface1_id=if1.node.getifindex(if1),
|
||||
interface1_mac=if1.hwaddr,
|
||||
interface1_ip4=interface1_ip4,
|
||||
interface1_ip4_mask=interface1_ip4_mask,
|
||||
interface1_ip6=interface1_ip6,
|
||||
interface1_ip6_mask=interface1_ip6_mask,
|
||||
interface2_id=if2.node.getifindex(if2),
|
||||
interface2_mac=if2.hwaddr,
|
||||
interface2_ip4=interface2_ip4,
|
||||
interface2_ip4_mask=interface2_ip4_mask,
|
||||
interface2_ip6=interface2_ip6,
|
||||
interface2_ip6_mask=interface2_ip6_mask,
|
||||
)
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
# build a 2nd link message for the upstream link parameters
|
||||
# (swap if1 and if2)
|
||||
if unidirectional:
|
||||
link_data = LinkData(
|
||||
message_type=0,
|
||||
node1_id=if2.node.objid,
|
||||
node2_id=if1.node.objid,
|
||||
delay=if1.getparam("delay"),
|
||||
bandwidth=if1.getparam("bw"),
|
||||
dup=if1.getparam("duplicate"),
|
||||
jitter=if1.getparam("jitter"),
|
||||
unidirectional=1,
|
||||
interface1_id=if2.node.getifindex(if2),
|
||||
interface2_id=if1.node.getifindex(if1)
|
||||
)
|
||||
all_links.append(link_data)
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class OvsSwitchNode(OvsNet):
|
||||
apitype = NodeTypes.SWITCH.value
|
||||
policy = "ACCEPT"
|
||||
type = "lanswitch"
|
||||
|
||||
|
||||
class OvsHubNode(OvsNet):
|
||||
apitype = NodeTypes.HUB.value
|
||||
policy = "ACCEPT"
|
||||
type = "hub"
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
the Hub node forwards packets to all bridge ports by turning off
|
||||
the MAC address learning
|
||||
"""
|
||||
OvsNet.__init__(self, session, objid, name, start)
|
||||
|
||||
if start:
|
||||
# TODO: verify that the below flow accomplishes what is desired for a "HUB"
|
||||
# TODO: replace "brctl setageing 0"
|
||||
subprocess.check_call([constants.OVS_FLOW_BIN, "add-flow", self.bridge_name, "action=flood"])
|
||||
|
||||
|
||||
class OvsWlanNode(OvsNet):
|
||||
apitype = NodeTypes.WIRELESS_LAN.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
policy = "DROP"
|
||||
type = "wlan"
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True, policy=None):
|
||||
OvsNet.__init__(self, session, objid, name, start, policy)
|
||||
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
self.mobility = None
|
||||
|
||||
def attach(self, interface):
|
||||
OvsNet.attach(self, interface)
|
||||
|
||||
if self.model:
|
||||
interface.poshook = self.model.position_callback
|
||||
|
||||
if interface.node is None:
|
||||
return
|
||||
|
||||
x, y, z = interface.node.position.get()
|
||||
# invokes any netif.poshook
|
||||
interface.setposition(x, y, z)
|
||||
# self.model.setlinkparams()
|
||||
|
||||
def setmodel(self, model, config):
|
||||
"""
|
||||
Mobility and wireless model.
|
||||
"""
|
||||
logger.info("adding model %s", model.name)
|
||||
|
||||
if model.type == RegisterTlvs.WIRELESS.value:
|
||||
self.model = model(session=self.session, object_id=self.objid, values=config)
|
||||
if self.model.position_callback:
|
||||
for interface in self.netifs():
|
||||
interface.poshook = self.model.position_callback
|
||||
if interface.node is not None:
|
||||
x, y, z = interface.node.position.get()
|
||||
interface.poshook(interface, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model.type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
|
||||
def updatemodel(self, model_name, values):
|
||||
"""
|
||||
Allow for model updates during runtime (similar to setmodel().)
|
||||
"""
|
||||
logger.info("updating model %s", model_name)
|
||||
if self.model is None or self.model.name != model_name:
|
||||
logger.info(
|
||||
"failure to update model, model doesn't exist or invalid name: model(%s) - name(%s)",
|
||||
self.model, model_name
|
||||
)
|
||||
return
|
||||
|
||||
model = self.model
|
||||
if model.type == RegisterTlvs.WIRELESS.value:
|
||||
if not model.updateconfig(values):
|
||||
return
|
||||
if self.model.position_callback:
|
||||
for interface in self.netifs():
|
||||
interface.poshook = self.model.position_callback
|
||||
if interface.node is not None:
|
||||
x, y, z = interface.node.position.get()
|
||||
interface.poshook(interface, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
|
||||
def all_link_data(self, flags):
|
||||
all_links = OvsNet.all_link_data(self, flags)
|
||||
|
||||
if self.model:
|
||||
all_links.extend(self.model.all_link_data(flags))
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class OvsTunnelNode(GreTapBridge):
|
||||
apitype = NodeTypes.TUNNEL.value
|
||||
policy = "ACCEPT"
|
||||
type = "tunnel"
|
||||
|
||||
|
||||
class OvsGreTapBridge(OvsNet):
|
||||
"""
|
||||
A network consisting of a bridge with a gretap device for tunneling to
|
||||
another system.
|
||||
"""
|
||||
|
||||
def __init__(self, session, remoteip=None, objid=None, name=None, policy="ACCEPT",
|
||||
localip=None, ttl=255, key=None, start=True):
|
||||
OvsNet.__init__(self, session=session, objid=objid, name=name, policy=policy, start=False)
|
||||
self.grekey = key
|
||||
if self.grekey is None:
|
||||
self.grekey = self.session.session_id ^ self.objid
|
||||
|
||||
self.localnum = None
|
||||
self.remotenum = None
|
||||
self.remoteip = remoteip
|
||||
self.localip = localip
|
||||
self.ttl = ttl
|
||||
|
||||
if remoteip is None:
|
||||
self.gretap = None
|
||||
else:
|
||||
self.gretap = GreTap(node=self, name=None, session=session, remoteip=remoteip,
|
||||
objid=None, localip=localip, ttl=ttl, key=self.grekey)
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Creates a bridge and adds the gretap device to it.
|
||||
"""
|
||||
OvsNet.startup(self)
|
||||
|
||||
if self.gretap:
|
||||
self.attach(self.gretap)
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Detach the gretap device and remove the bridge.
|
||||
"""
|
||||
if self.gretap:
|
||||
self.detach(self.gretap)
|
||||
self.gretap.shutdown()
|
||||
self.gretap = None
|
||||
|
||||
OvsNet.shutdown(self)
|
||||
|
||||
def addrconfig(self, addresses):
|
||||
"""
|
||||
Set the remote tunnel endpoint. This is a one-time method for
|
||||
creating the GreTap device, which requires the remoteip at startup.
|
||||
The 1st address in the provided list is remoteip, 2nd optionally
|
||||
specifies localip.
|
||||
"""
|
||||
if self.gretap:
|
||||
raise ValueError("gretap already exists for %s" % self.name)
|
||||
|
||||
remoteip = addresses[0].split('/')[0]
|
||||
localip = None
|
||||
|
||||
if len(addresses) > 1:
|
||||
localip = addresses[1].split('/')[0]
|
||||
|
||||
self.gretap = GreTap(session=self.session, remoteip=remoteip, objid=None, name=None,
|
||||
localip=localip, ttl=self.ttl, key=self.grekey)
|
||||
self.attach(self.gretap)
|
||||
|
||||
def setkey(self, key):
|
||||
"""
|
||||
Set the GRE key used for the GreTap device. This needs to be set
|
||||
prior to instantiating the GreTap device (before addrconfig).
|
||||
"""
|
||||
self.grekey = key
|
|
@ -1,58 +1,53 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vif.py: PyCoreNetIf classes that implement the interfaces available
|
||||
"""
|
||||
PyCoreNetIf classes that implement the interfaces available
|
||||
under Linux.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os, signal, shutil, sys, subprocess, vnodeclient, threading, string
|
||||
import random, time
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.emane.nodes import EmaneNode
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNetIf
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
utils.check_executables([constants.IP_BIN])
|
||||
|
||||
checkexec([IP_BIN])
|
||||
|
||||
class VEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
def __init__(self, node, name, localname, mtu=1500, net=None, start=True):
|
||||
# note that net arg is ignored
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
check_call([IP_BIN, "link", "add", "name", self.localname,
|
||||
"type", "veth", "peer", "name", self.name])
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "add", "name", self.localname,
|
||||
"type", "veth", "peer", "name", self.name])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
if self.node:
|
||||
self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
self.node.cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
if self.localname:
|
||||
mutedetach([IP_BIN, "link", "delete", self.localname])
|
||||
utils.mutedetach([constants.IP_BIN, "link", "delete", self.localname])
|
||||
self.up = False
|
||||
|
||||
|
||||
class TunTap(PyCoreNetIf):
|
||||
''' TUN/TAP virtual device in TAP mode
|
||||
'''
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
"""
|
||||
TUN/TAP virtual device in TAP mode
|
||||
"""
|
||||
|
||||
def __init__(self, node, name, localname, mtu=1500, net=None, start=True):
|
||||
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.transport_type = "virtual"
|
||||
|
@ -62,24 +57,24 @@ class TunTap(PyCoreNetIf):
|
|||
def startup(self):
|
||||
# TODO: more sophisticated TAP creation here
|
||||
# Debian does not support -p (tap) option, RedHat does.
|
||||
# For now, this is disabled to allow the TAP to be created by another
|
||||
# For now, this is disabled to allow the TAP to be created by another
|
||||
# system (e.g. EMANE's emanetransportd)
|
||||
#check_call(["tunctl", "-t", self.name])
|
||||
# check_call(["tunctl", "-t", self.name])
|
||||
# self.install()
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
#if self.name:
|
||||
self.node.cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
# if self.name:
|
||||
# mutedetach(["tunctl", "-d", self.localname])
|
||||
self.up = False
|
||||
|
||||
def waitfor(self, func, attempts = 10, maxretrydelay = 0.25):
|
||||
'''\
|
||||
def waitfor(self, func, attempts=10, maxretrydelay=0.25):
|
||||
"""
|
||||
Wait for func() to return zero with exponential backoff
|
||||
'''
|
||||
"""
|
||||
delay = 0.01
|
||||
for i in xrange(1, attempts + 1):
|
||||
r = func()
|
||||
|
@ -88,98 +83,93 @@ class TunTap(PyCoreNetIf):
|
|||
msg = 'attempt %s failed with nonzero exit status %s' % (i, r)
|
||||
if i < attempts + 1:
|
||||
msg += ', retrying...'
|
||||
self.node.info(msg)
|
||||
logger.info(msg)
|
||||
time.sleep(delay)
|
||||
delay = delay + delay
|
||||
if delay > maxretrydelay:
|
||||
delay = maxretrydelay
|
||||
else:
|
||||
msg += ', giving up'
|
||||
self.node.info(msg)
|
||||
raise RuntimeError, 'command failed after %s attempts' % attempts
|
||||
logger.info(msg)
|
||||
|
||||
raise RuntimeError('command failed after %s attempts' % attempts)
|
||||
|
||||
def waitfordevicelocal(self):
|
||||
'''\
|
||||
"""
|
||||
Check for presence of a local device - tap device may not
|
||||
appear right away waits
|
||||
'''
|
||||
"""
|
||||
|
||||
def localdevexists():
|
||||
cmd = (IP_BIN, 'link', 'show', self.localname)
|
||||
return mutecall(cmd)
|
||||
cmd = (constants.IP_BIN, 'link', 'show', self.localname)
|
||||
return utils.mutecall(cmd)
|
||||
|
||||
self.waitfor(localdevexists)
|
||||
|
||||
def waitfordevicenode(self):
|
||||
'''\
|
||||
"""
|
||||
Check for presence of a node device - tap device may not
|
||||
appear right away waits
|
||||
'''
|
||||
"""
|
||||
|
||||
def nodedevexists():
|
||||
cmd = (IP_BIN, 'link', 'show', self.name)
|
||||
cmd = (constants.IP_BIN, 'link', 'show', self.name)
|
||||
return self.node.cmd(cmd)
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
self.waitfor(nodedevexists)
|
||||
break
|
||||
except RuntimeError:
|
||||
# check if this is an EMANE interface; if so, continue
|
||||
# waiting if EMANE is still running
|
||||
if count < 5 and isinstance(self.net, EmaneNode) and \
|
||||
self.node.session.emane.emanerunning(self.node):
|
||||
count += 1
|
||||
else:
|
||||
raise
|
||||
|
||||
self.waitfor(nodedevexists)
|
||||
|
||||
def install(self):
|
||||
''' Install this TAP into its namespace. This is not done from the
|
||||
startup() method but called at a later time when a userspace
|
||||
program (running on the host) has had a chance to open the socket
|
||||
end of the TAP.
|
||||
'''
|
||||
"""
|
||||
Install this TAP into its namespace. This is not done from the
|
||||
startup() method but called at a later time when a userspace
|
||||
program (running on the host) has had a chance to open the socket
|
||||
end of the TAP.
|
||||
"""
|
||||
self.waitfordevicelocal()
|
||||
netns = str(self.node.pid)
|
||||
try:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "netns", netns])
|
||||
except Exception, e:
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "netns", netns])
|
||||
except subprocess.CalledProcessError:
|
||||
msg = "error installing TAP interface %s, command:" % self.localname
|
||||
msg += "ip link set %s netns %s" % (self.localname, netns)
|
||||
self.node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.localname, msg)
|
||||
self.node.warn(msg)
|
||||
logger.exception(msg)
|
||||
return
|
||||
self.node.cmd([IP_BIN, "link", "set", self.localname,
|
||||
"name", self.name])
|
||||
self.node.cmd([IP_BIN, "link", "set", self.name, "up"])
|
||||
|
||||
self.node.cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name])
|
||||
self.node.cmd([constants.IP_BIN, "link", "set", self.name, "up"])
|
||||
|
||||
def setaddrs(self):
|
||||
''' Set interface addresses based on self.addrlist.
|
||||
'''
|
||||
"""
|
||||
Set interface addresses based on self.addrlist.
|
||||
"""
|
||||
self.waitfordevicenode()
|
||||
for addr in self.addrlist:
|
||||
self.node.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.name])
|
||||
self.node.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
|
||||
|
||||
class GreTap(PyCoreNetIf):
|
||||
''' GRE TAP device for tunneling between emulation servers.
|
||||
Uses the "gretap" tunnel device type from Linux which is a GRE device
|
||||
having a MAC address. The MAC address is required for bridging.
|
||||
'''
|
||||
def __init__(self, node = None, name = None, session = None, mtu = 1458,
|
||||
remoteip = None, objid = None, localip = None, ttl = 255,
|
||||
key = None, start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
"""
|
||||
GRE TAP device for tunneling between emulation servers.
|
||||
Uses the "gretap" tunnel device type from Linux which is a GRE device
|
||||
having a MAC address. The MAC address is required for bridging.
|
||||
"""
|
||||
|
||||
def __init__(self, node=None, name=None, session=None, mtu=1458,
|
||||
remoteip=None, objid=None, localip=None, ttl=255,
|
||||
key=None, start=True):
|
||||
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
|
||||
self.session = session
|
||||
if objid is None:
|
||||
# from PyCoreObj
|
||||
objid = (((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff)
|
||||
objid = ((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff
|
||||
self.objid = objid
|
||||
sessionid = self.session.shortsessionid()
|
||||
sessionid = self.session.short_session_id()
|
||||
# interface name on the local host machine
|
||||
self.localname = "gt.%s.%s" % (self.objid, sessionid)
|
||||
self.transport_type = "raw"
|
||||
if not start:
|
||||
self.up = False
|
||||
return
|
||||
|
||||
|
||||
if remoteip is None:
|
||||
raise ValueError, "missing remote IP required for GRE TAP device"
|
||||
cmd = ("ip", "link", "add", self.localname, "type", "gretap",
|
||||
|
@ -190,21 +180,21 @@ class GreTap(PyCoreNetIf):
|
|||
cmd += ("ttl", str(ttl))
|
||||
if key:
|
||||
cmd += ("key", str(key))
|
||||
check_call(cmd)
|
||||
subprocess.check_call(cmd)
|
||||
cmd = ("ip", "link", "set", self.localname, "up")
|
||||
check_call(cmd)
|
||||
subprocess.check_call(cmd)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if self.localname:
|
||||
cmd = ("ip", "link", "set", self.localname, "down")
|
||||
check_call(cmd)
|
||||
subprocess.check_call(cmd)
|
||||
cmd = ("ip", "link", "del", self.localname)
|
||||
check_call(cmd)
|
||||
subprocess.check_call(cmd)
|
||||
self.localname = None
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
|
||||
def data(self, message_type):
|
||||
return None
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
|
||||
def all_link_data(self, flags):
|
||||
return []
|
||||
|
|
|
@ -1,42 +1,48 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2016 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnet.py: PyCoreNet and LxBrNet classes that implement virtual networks using
|
||||
"""
|
||||
PyCoreNet and LxBrNet classes that implement virtual networks using
|
||||
Linux Ethernet bridging and ebtables rules.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os, sys, threading, time, subprocess
|
||||
import os
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreNet, PyCoreObj
|
||||
from core.netns.vif import VEth, GreTap
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
from core.netns.vif import GreTap
|
||||
from core.netns.vif import VEth
|
||||
|
||||
checkexec([BRCTL_BIN, IP_BIN, EBTABLES_BIN, TC_BIN])
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
utils.check_executables([
|
||||
constants.BRCTL_BIN,
|
||||
constants.IP_BIN,
|
||||
constants.EBTABLES_BIN,
|
||||
constants.TC_BIN
|
||||
])
|
||||
|
||||
ebtables_lock = threading.Lock()
|
||||
|
||||
|
||||
class EbtablesQueue(object):
|
||||
''' Helper class for queuing up ebtables commands into rate-limited
|
||||
"""
|
||||
Helper class for queuing up ebtables commands into rate-limited
|
||||
atomic commits. This improves performance and reliability when there are
|
||||
many WLAN link updates.
|
||||
'''
|
||||
"""
|
||||
# update rate is every 300ms
|
||||
rate = 0.3
|
||||
# ebtables
|
||||
atomic_file = "/tmp/pycore.ebtables.atomic"
|
||||
|
||||
|
||||
def __init__(self):
|
||||
''' Initialize the helper class, but don't start the update thread
|
||||
"""
|
||||
Initialize the helper class, but don't start the update thread
|
||||
until a WLAN is instantiated.
|
||||
'''
|
||||
"""
|
||||
self.doupdateloop = False
|
||||
self.updatethread = None
|
||||
# this lock protects cmds and updates lists
|
||||
|
@ -48,28 +54,31 @@ class EbtablesQueue(object):
|
|||
# timestamps of last WLAN update; this keeps track of WLANs that are
|
||||
# using this queue
|
||||
self.last_update_time = {}
|
||||
|
||||
|
||||
def startupdateloop(self, wlan):
|
||||
''' Kick off the update loop; only needs to be invoked once.
|
||||
'''
|
||||
"""
|
||||
Kick off the update loop; only needs to be invoked once.
|
||||
"""
|
||||
self.updatelock.acquire()
|
||||
self.last_update_time[wlan] = time.time()
|
||||
self.updatelock.release()
|
||||
if self.doupdateloop:
|
||||
return
|
||||
self.doupdateloop = True
|
||||
self.updatethread = threading.Thread(target = self.updateloop)
|
||||
self.updatethread = threading.Thread(target=self.updateloop)
|
||||
self.updatethread.daemon = True
|
||||
self.updatethread.start()
|
||||
|
||||
|
||||
def stopupdateloop(self, wlan):
|
||||
''' Kill the update loop thread if there are no more WLANs using it.
|
||||
'''
|
||||
"""
|
||||
Kill the update loop thread if there are no more WLANs using it.
|
||||
"""
|
||||
self.updatelock.acquire()
|
||||
try:
|
||||
del self.last_update_time[wlan]
|
||||
except KeyError:
|
||||
pass
|
||||
logger.exception("error deleting last update time for wlan: %s", wlan)
|
||||
|
||||
self.updatelock.release()
|
||||
if len(self.last_update_time) > 0:
|
||||
return
|
||||
|
@ -77,131 +86,132 @@ class EbtablesQueue(object):
|
|||
if self.updatethread:
|
||||
self.updatethread.join()
|
||||
self.updatethread = None
|
||||
|
||||
|
||||
def ebatomiccmd(self, cmd):
|
||||
''' Helper for building ebtables atomic file command list.
|
||||
'''
|
||||
r = [EBTABLES_BIN, "--atomic-file", self.atomic_file]
|
||||
"""
|
||||
Helper for building ebtables atomic file command list.
|
||||
"""
|
||||
r = [constants.EBTABLES_BIN, "--atomic-file", self.atomic_file]
|
||||
if cmd:
|
||||
r.extend(cmd)
|
||||
return r
|
||||
|
||||
|
||||
def lastupdate(self, wlan):
|
||||
''' Return the time elapsed since this WLAN was last updated.
|
||||
'''
|
||||
"""
|
||||
Return the time elapsed since this WLAN was last updated.
|
||||
"""
|
||||
try:
|
||||
elapsed = time.time() - self.last_update_time[wlan]
|
||||
except KeyError:
|
||||
self.last_update_time[wlan] = time.time()
|
||||
elapsed = 0.0
|
||||
return elapsed
|
||||
|
||||
|
||||
def updated(self, wlan):
|
||||
''' Keep track of when this WLAN was last updated.
|
||||
'''
|
||||
"""
|
||||
Keep track of when this WLAN was last updated.
|
||||
"""
|
||||
self.last_update_time[wlan] = time.time()
|
||||
self.updates.remove(wlan)
|
||||
|
||||
|
||||
def updateloop(self):
|
||||
''' Thread target that looks for WLANs needing update, and
|
||||
"""
|
||||
Thread target that looks for WLANs needing update, and
|
||||
rate limits the amount of ebtables activity. Only one userspace program
|
||||
should use ebtables at any given time, or results can be unpredictable.
|
||||
'''
|
||||
"""
|
||||
while self.doupdateloop:
|
||||
self.updatelock.acquire()
|
||||
for wlan in self.updates:
|
||||
'''
|
||||
Check if wlan is from a previously closed session. Because of the
|
||||
rate limiting scheme employed here, this may happen if a new session
|
||||
"""
|
||||
Check if wlan is from a previously closed session. Because of the
|
||||
rate limiting scheme employed here, this may happen if a new session
|
||||
is started soon after closing a previous session.
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
wlan.session
|
||||
except:
|
||||
# Just mark as updated to remove from self.updates.
|
||||
# Just mark as updated to remove from self.updates.
|
||||
self.updated(wlan)
|
||||
continue
|
||||
if self.lastupdate(wlan) > self.rate:
|
||||
self.buildcmds(wlan)
|
||||
#print "ebtables commit %d rules" % len(self.cmds)
|
||||
# print "ebtables commit %d rules" % len(self.cmds)
|
||||
self.ebcommit(wlan)
|
||||
self.updated(wlan)
|
||||
self.updatelock.release()
|
||||
time.sleep(self.rate)
|
||||
|
||||
|
||||
def ebcommit(self, wlan):
|
||||
''' Perform ebtables atomic commit using commands built in the
|
||||
"""
|
||||
Perform ebtables atomic commit using commands built in the
|
||||
self.cmds list.
|
||||
'''
|
||||
"""
|
||||
# save kernel ebtables snapshot to a file
|
||||
cmd = self.ebatomiccmd(["--atomic-save",])
|
||||
cmd = self.ebatomiccmd(["--atomic-save", ])
|
||||
try:
|
||||
check_call(cmd)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "atomic-save (%s)" % cmd, e)
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("atomic-save (%s)", cmd)
|
||||
# no atomic file, exit
|
||||
return
|
||||
# modify the table file using queued ebtables commands
|
||||
for c in self.cmds:
|
||||
cmd = self.ebatomiccmd(c)
|
||||
try:
|
||||
check_call(cmd)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "cmd=%s" % cmd, e)
|
||||
pass
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("cmd=%s", cmd)
|
||||
|
||||
self.cmds = []
|
||||
# commit the table file to the kernel
|
||||
cmd = self.ebatomiccmd(["--atomic-commit",])
|
||||
cmd = self.ebatomiccmd(["--atomic-commit", ])
|
||||
|
||||
try:
|
||||
check_call(cmd)
|
||||
subprocess.check_call(cmd)
|
||||
os.unlink(self.atomic_file)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "atomic-commit (%s)" % cmd, e)
|
||||
|
||||
except OSError:
|
||||
logger.exception("atomic-commit (%s)", cmd)
|
||||
|
||||
def ebchange(self, wlan):
|
||||
''' Flag a change to the given WLAN's _linked dict, so the ebtables
|
||||
"""
|
||||
Flag a change to the given WLAN's _linked dict, so the ebtables
|
||||
chain will be rebuilt at the next interval.
|
||||
'''
|
||||
"""
|
||||
self.updatelock.acquire()
|
||||
if wlan not in self.updates:
|
||||
self.updates.append(wlan)
|
||||
self.updatelock.release()
|
||||
|
||||
|
||||
def buildcmds(self, wlan):
|
||||
''' Inspect a _linked dict from a wlan, and rebuild the ebtables chain
|
||||
"""
|
||||
Inspect a _linked dict from a wlan, and rebuild the ebtables chain
|
||||
for that WLAN.
|
||||
'''
|
||||
"""
|
||||
wlan._linked_lock.acquire()
|
||||
# flush the chain
|
||||
self.cmds.extend([["-F", wlan.brname],])
|
||||
self.cmds.extend([["-F", wlan.brname], ])
|
||||
# rebuild the chain
|
||||
for (netif1, v) in wlan._linked.items():
|
||||
for (netif2, linked) in v.items():
|
||||
for netif1, v in wlan._linked.items():
|
||||
for netif2, linked in v.items():
|
||||
if wlan.policy == "DROP" and linked:
|
||||
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
|
||||
"-o", netif2.localname, "-j", "ACCEPT"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "ACCEPT"]])
|
||||
"-o", netif2.localname, "-j", "ACCEPT"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "ACCEPT"]])
|
||||
elif wlan.policy == "ACCEPT" and not linked:
|
||||
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
|
||||
"-o", netif2.localname, "-j", "DROP"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "DROP"]])
|
||||
"-o", netif2.localname, "-j", "DROP"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "DROP"]])
|
||||
wlan._linked_lock.release()
|
||||
|
||||
def eberror(self, wlan, source, error):
|
||||
''' Log an ebtables command error and send an exception.
|
||||
'''
|
||||
if not wlan:
|
||||
return
|
||||
wlan.exception(coreapi.CORE_EXCP_LEVEL_ERROR, wlan.brname,
|
||||
"ebtables command error: %s\n%s\n" % (source, error))
|
||||
|
||||
|
||||
|
||||
# a global object because all WLANs share the same queue
|
||||
# cannot have multiple threads invoking the ebtables commnd
|
||||
ebq = EbtablesQueue()
|
||||
|
||||
|
||||
def ebtablescmds(call, cmds):
|
||||
ebtables_lock.acquire()
|
||||
try:
|
||||
|
@ -210,19 +220,18 @@ def ebtablescmds(call, cmds):
|
|||
finally:
|
||||
ebtables_lock.release()
|
||||
|
||||
class LxBrNet(PyCoreNet):
|
||||
|
||||
class LxBrNet(PyCoreNet):
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
PyCoreNet.__init__(self, session, objid, name, verbose, start)
|
||||
def __init__(self, session, objid=None, name=None, start=True, policy=None):
|
||||
PyCoreNet.__init__(self, session, objid, name, start)
|
||||
if name is None:
|
||||
name = str(self.objid)
|
||||
if policy is not None:
|
||||
self.policy = policy
|
||||
self.name = name
|
||||
sessionid = self.session.shortsessionid()
|
||||
sessionid = self.session.short_session_id()
|
||||
self.brname = "b.%s.%s" % (str(self.objid), sessionid)
|
||||
self.up = False
|
||||
if start:
|
||||
|
@ -231,28 +240,26 @@ class LxBrNet(PyCoreNet):
|
|||
|
||||
def startup(self):
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addbr", self.brname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname,
|
||||
"Error adding bridge: %s" % e)
|
||||
subprocess.check_call([constants.BRCTL_BIN, "addbr", self.brname])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error adding bridge")
|
||||
|
||||
try:
|
||||
# turn off spanning tree protocol and forwarding delay
|
||||
check_call([BRCTL_BIN, "stp", self.brname, "off"])
|
||||
check_call([BRCTL_BIN, "setfd", self.brname, "0"])
|
||||
check_call([IP_BIN, "link", "set", self.brname, "up"])
|
||||
subprocess.check_call([constants.BRCTL_BIN, "stp", self.brname, "off"])
|
||||
subprocess.check_call([constants.BRCTL_BIN, "setfd", self.brname, "0"])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.brname, "up"])
|
||||
# create a new ebtables chain for this bridge
|
||||
ebtablescmds(check_call, [
|
||||
[EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
|
||||
[EBTABLES_BIN, "-A", "FORWARD",
|
||||
"--logical-in", self.brname, "-j", self.brname]])
|
||||
ebtablescmds(subprocess.check_call, [
|
||||
[constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
|
||||
[constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.brname, "-j", self.brname]
|
||||
])
|
||||
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
|
||||
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % \
|
||||
self.brname
|
||||
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % self.brname
|
||||
if os.path.exists(snoop):
|
||||
open(snoop, "w").write('0')
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_WARNING, self.brname,
|
||||
"Error setting bridge parameters: %s" % e)
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error setting bridge parameters")
|
||||
|
||||
self.up = True
|
||||
|
||||
|
@ -260,12 +267,12 @@ class LxBrNet(PyCoreNet):
|
|||
if not self.up:
|
||||
return
|
||||
ebq.stopupdateloop(self)
|
||||
mutecall([IP_BIN, "link", "set", self.brname, "down"])
|
||||
mutecall([BRCTL_BIN, "delbr", self.brname])
|
||||
ebtablescmds(mutecall, [
|
||||
[EBTABLES_BIN, "-D", "FORWARD",
|
||||
utils.mutecall([constants.IP_BIN, "link", "set", self.brname, "down"])
|
||||
utils.mutecall([constants.BRCTL_BIN, "delbr", self.brname])
|
||||
ebtablescmds(utils.mutecall, [
|
||||
[constants.EBTABLES_BIN, "-D", "FORWARD",
|
||||
"--logical-in", self.brname, "-j", self.brname],
|
||||
[EBTABLES_BIN, "-X", self.brname]])
|
||||
[constants.EBTABLES_BIN, "-X", self.brname]])
|
||||
for netif in self.netifs():
|
||||
# removes veth pairs used for bridge-to-bridge connections
|
||||
netif.shutdown()
|
||||
|
@ -277,23 +284,19 @@ class LxBrNet(PyCoreNet):
|
|||
def attach(self, netif):
|
||||
if self.up:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addif", self.brname, netif.localname])
|
||||
check_call([IP_BIN, "link", "set", netif.localname, "up"])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error joining interface %s to bridge %s: %s" % \
|
||||
(netif.localname, self.brname, e))
|
||||
subprocess.check_call([constants.BRCTL_BIN, "addif", self.brname, netif.localname])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", netif.localname, "up"])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error joining interface %s to bridge %s", netif.localname, self.brname)
|
||||
return
|
||||
PyCoreNet.attach(self, netif)
|
||||
|
||||
def detach(self, netif):
|
||||
if self.up:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "delif", self.brname, netif.localname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error removing interface %s from bridge %s: %s" % \
|
||||
(netif.localname, self.brname, e))
|
||||
subprocess.check_call([constants.BRCTL_BIN, "delif", self.brname, netif.localname])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error removing interface %s from bridge %s", netif.localname, self.brname)
|
||||
return
|
||||
PyCoreNet.detach(self, netif)
|
||||
|
||||
|
@ -316,9 +319,10 @@ class LxBrNet(PyCoreNet):
|
|||
return linked
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
''' Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
|
||||
"""
|
||||
Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
|
||||
filtering rules.
|
||||
'''
|
||||
"""
|
||||
self._linked_lock.acquire()
|
||||
if not self.linked(netif1, netif2):
|
||||
self._linked_lock.release()
|
||||
|
@ -328,9 +332,10 @@ class LxBrNet(PyCoreNet):
|
|||
ebq.ebchange(self)
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
''' Link two PyCoreNetIfs together, resulting in adding or removing
|
||||
"""
|
||||
Link two PyCoreNetIfs together, resulting in adding or removing
|
||||
ebtables filtering rules.
|
||||
'''
|
||||
"""
|
||||
self._linked_lock.acquire()
|
||||
if self.linked(netif1, netif2):
|
||||
self._linked_lock.release()
|
||||
|
@ -339,37 +344,35 @@ class LxBrNet(PyCoreNet):
|
|||
self._linked_lock.release()
|
||||
ebq.ebchange(self)
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None,
|
||||
devname = None):
|
||||
''' Configure link parameters by applying tc queuing disciplines on the
|
||||
interface.
|
||||
'''
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None,
|
||||
jitter=None, netif2=None, devname=None):
|
||||
"""
|
||||
Configure link parameters by applying tc queuing disciplines on the
|
||||
interface.
|
||||
"""
|
||||
if devname is None:
|
||||
devname = netif.localname
|
||||
tc = [TC_BIN, "qdisc", "replace", "dev", devname]
|
||||
tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname]
|
||||
parent = ["root"]
|
||||
changed = False
|
||||
if netif.setparam('bw', bw):
|
||||
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
|
||||
if bw is not None:
|
||||
burst = max(2 * netif.mtu, bw / 1000)
|
||||
limit = 0xffff # max IP payload
|
||||
limit = 0xffff # max IP payload
|
||||
tbf = ["tbf", "rate", str(bw),
|
||||
"burst", str(burst), "limit", str(limit)]
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
if (self.verbose):
|
||||
self.info("linkconfig: %s" % \
|
||||
([tc + parent + ["handle", "1:"] + tbf],))
|
||||
check_call(tc + parent + ["handle", "1:"] + tbf)
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],))
|
||||
subprocess.check_call(tc + parent + ["handle", "1:"] + tbf)
|
||||
netif.setparam('has_tbf', True)
|
||||
changed = True
|
||||
elif netif.getparam('has_tbf') and bw <= 0:
|
||||
tcd = [] + tc
|
||||
tcd[2] = "delete"
|
||||
if self.up:
|
||||
check_call(tcd + parent)
|
||||
subprocess.check_call(tcd + parent)
|
||||
netif.setparam('has_tbf', False)
|
||||
# removing the parent removes the child
|
||||
netif.setparam('has_netem', False)
|
||||
|
@ -395,7 +398,7 @@ class LxBrNet(PyCoreNet):
|
|||
netem += ["delay", "0us", "%sus" % jitter, "25%"]
|
||||
else:
|
||||
netem += ["%sus" % jitter, "25%"]
|
||||
|
||||
|
||||
if loss is not None:
|
||||
netem += ["loss", "%s%%" % min(loss, 100)]
|
||||
if duplicate is not None:
|
||||
|
@ -406,24 +409,21 @@ class LxBrNet(PyCoreNet):
|
|||
return
|
||||
tc[2] = "delete"
|
||||
if self.up:
|
||||
if self.verbose:
|
||||
self.info("linkconfig: %s" % \
|
||||
([tc + parent + ["handle", "10:"]],))
|
||||
check_call(tc + parent + ["handle", "10:"])
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
|
||||
subprocess.check_call(tc + parent + ["handle", "10:"])
|
||||
netif.setparam('has_netem', False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
if self.verbose:
|
||||
self.info("linkconfig: %s" % \
|
||||
([tc + parent + ["handle", "10:"] + netem],))
|
||||
check_call(tc + parent + ["handle", "10:"] + netem)
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
|
||||
subprocess.check_call(tc + parent + ["handle", "10:"] + netem)
|
||||
netif.setparam('has_netem', True)
|
||||
|
||||
def linknet(self, net):
|
||||
''' Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
'''
|
||||
sessionid = self.session.shortsessionid()
|
||||
"""
|
||||
Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
"""
|
||||
sessionid = self.session.short_session_id()
|
||||
try:
|
||||
self_objid = '%x' % self.objid
|
||||
except TypeError:
|
||||
|
@ -434,19 +434,18 @@ class LxBrNet(PyCoreNet):
|
|||
net_objid = '%s' % net.objid
|
||||
localname = 'veth%s.%s.%s' % (self_objid, net_objid, sessionid)
|
||||
if len(localname) >= 16:
|
||||
raise ValueError, "interface local name '%s' too long" % \
|
||||
localname
|
||||
raise ValueError("interface local name '%s' too long" % localname)
|
||||
name = 'veth%s.%s.%s' % (net_objid, self_objid, sessionid)
|
||||
if len(name) >= 16:
|
||||
raise ValueError, "interface name '%s' too long" % name
|
||||
netif = VEth(node = None, name = name, localname = localname,
|
||||
mtu = 1500, net = self, start = self.up)
|
||||
raise ValueError("interface name '%s' too long" % name)
|
||||
netif = VEth(node=None, name=name, localname=localname,
|
||||
mtu=1500, net=self, start=self.up)
|
||||
self.attach(netif)
|
||||
if net.up:
|
||||
# this is similar to net.attach() but uses netif.name instead
|
||||
# this is similar to net.attach() but uses netif.name instead
|
||||
# of localname
|
||||
check_call([BRCTL_BIN, "addif", net.brname, netif.name])
|
||||
check_call([IP_BIN, "link", "set", netif.name, "up"])
|
||||
subprocess.check_call([constants.BRCTL_BIN, "addif", net.brname, netif.name])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", netif.name, "up"])
|
||||
i = net.newifindex()
|
||||
net._netif[i] = netif
|
||||
with net._linked_lock:
|
||||
|
@ -454,38 +453,39 @@ class LxBrNet(PyCoreNet):
|
|||
netif.net = self
|
||||
netif.othernet = net
|
||||
return netif
|
||||
|
||||
|
||||
def getlinknetif(self, net):
|
||||
''' Return the interface of that links this net with another net
|
||||
"""
|
||||
Return the interface of that links this net with another net
|
||||
(that were linked using linknet()).
|
||||
'''
|
||||
"""
|
||||
for netif in self.netifs():
|
||||
if hasattr(netif, 'othernet') and netif.othernet == net:
|
||||
return netif
|
||||
return None
|
||||
|
||||
def addrconfig(self, addrlist):
|
||||
''' Set addresses on the bridge.
|
||||
'''
|
||||
"""
|
||||
Set addresses on the bridge.
|
||||
"""
|
||||
if not self.up:
|
||||
return
|
||||
for addr in addrlist:
|
||||
try:
|
||||
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.brname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error adding IP address: %s" % e)
|
||||
subprocess.check_call([constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("Error adding IP address")
|
||||
|
||||
|
||||
class GreTapBridge(LxBrNet):
|
||||
''' A network consisting of a bridge with a gretap device for tunneling to
|
||||
another system.
|
||||
'''
|
||||
def __init__(self, session, remoteip = None, objid = None, name = None,
|
||||
policy = "ACCEPT", localip = None, ttl = 255, key = None,
|
||||
verbose = False, start = True):
|
||||
LxBrNet.__init__(self, session = session, objid = objid,
|
||||
name = name, verbose = verbose, policy = policy,
|
||||
start = False)
|
||||
"""
|
||||
A network consisting of a bridge with a gretap device for tunneling to
|
||||
another system.
|
||||
"""
|
||||
|
||||
def __init__(self, session, remoteip=None, objid=None, name=None,
|
||||
policy="ACCEPT", localip=None, ttl=255, key=None, start=True):
|
||||
LxBrNet.__init__(self, session=session, objid=objid, name=name, policy=policy, start=False)
|
||||
self.grekey = key
|
||||
if self.grekey is None:
|
||||
self.grekey = self.session.sessionid ^ self.objid
|
||||
|
@ -497,47 +497,51 @@ class GreTapBridge(LxBrNet):
|
|||
if remoteip is None:
|
||||
self.gretap = None
|
||||
else:
|
||||
self.gretap = GreTap(node = self, name = None, session = session,
|
||||
remoteip = remoteip, objid = None, localip = localip, ttl = ttl,
|
||||
key = self.grekey)
|
||||
self.gretap = GreTap(node=self, name=None, session=session,
|
||||
remoteip=remoteip, objid=None, localip=localip, ttl=ttl,
|
||||
key=self.grekey)
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
''' Creates a bridge and adds the gretap device to it.
|
||||
'''
|
||||
"""
|
||||
Creates a bridge and adds the gretap device to it.
|
||||
"""
|
||||
LxBrNet.startup(self)
|
||||
if self.gretap:
|
||||
self.attach(self.gretap)
|
||||
|
||||
def shutdown(self):
|
||||
''' Detach the gretap device and remove the bridge.
|
||||
'''
|
||||
"""
|
||||
Detach the gretap device and remove the bridge.
|
||||
"""
|
||||
if self.gretap:
|
||||
self.detach(self.gretap)
|
||||
self.gretap.shutdown()
|
||||
self.gretap = None
|
||||
LxBrNet.shutdown(self)
|
||||
|
||||
|
||||
def addrconfig(self, addrlist):
|
||||
''' Set the remote tunnel endpoint. This is a one-time method for
|
||||
creating the GreTap device, which requires the remoteip at startup.
|
||||
The 1st address in the provided list is remoteip, 2nd optionally
|
||||
specifies localip.
|
||||
'''
|
||||
"""
|
||||
Set the remote tunnel endpoint. This is a one-time method for
|
||||
creating the GreTap device, which requires the remoteip at startup.
|
||||
The 1st address in the provided list is remoteip, 2nd optionally
|
||||
specifies localip.
|
||||
"""
|
||||
if self.gretap:
|
||||
raise ValueError, "gretap already exists for %s" % self.name
|
||||
remoteip = addrlist[0].split('/')[0]
|
||||
localip = None
|
||||
if len(addrlist) > 1:
|
||||
localip = addrlist[1].split('/')[0]
|
||||
self.gretap = GreTap(session = self.session, remoteip = remoteip,
|
||||
objid = None, name = None,
|
||||
localip = localip, ttl = self.ttl, key = self.grekey)
|
||||
self.gretap = GreTap(session=self.session, remoteip=remoteip,
|
||||
objid=None, name=None,
|
||||
localip=localip, ttl=self.ttl, key=self.grekey)
|
||||
self.attach(self.gretap)
|
||||
|
||||
def setkey(self, key):
|
||||
''' Set the GRE key used for the GreTap device. This needs to be set
|
||||
prior to instantiating the GreTap device (before addrconfig).
|
||||
'''
|
||||
"""
|
||||
Set the GRE key used for the GreTap device. This needs to be set
|
||||
prior to instantiating the GreTap device (before addrconfig).
|
||||
"""
|
||||
self.grekey = key
|
||||
|
|
|
@ -1,35 +1,37 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
vnode.py: PyCoreNode and LxcNode classes that implement the network namespace
|
||||
virtual node.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os, signal, sys, subprocess, vnodeclient, threading, string, shutil
|
||||
import random, time
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.netns.vif import VEth, TunTap
|
||||
from core.emane.nodes import EmaneNode
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import signal
|
||||
import string
|
||||
import subprocess
|
||||
import threading
|
||||
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNetIf
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import log
|
||||
from core.misc import nodeutils
|
||||
from core.misc import utils
|
||||
from core.netns import vnodeclient
|
||||
from core.netns.vif import TunTap
|
||||
from core.netns.vif import VEth
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
utils.check_executables([constants.IP_BIN])
|
||||
|
||||
checkexec([IP_BIN])
|
||||
|
||||
class SimpleLxcNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None, nodedir = None,
|
||||
verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
def __init__(self, session, objid=None, name=None, nodedir=None, start=True):
|
||||
PyCoreNode.__init__(self, session, objid, name, start=start)
|
||||
self.nodedir = nodedir
|
||||
self.ctrlchnlname = \
|
||||
os.path.abspath(os.path.join(self.session.sessiondir, self.name))
|
||||
self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name))
|
||||
self.vnodeclient = None
|
||||
self.pid = None
|
||||
self.up = False
|
||||
|
@ -44,68 +46,74 @@ class SimpleLxcNode(PyCoreNode):
|
|||
return True
|
||||
|
||||
def startup(self):
|
||||
''' Start a new namespace node by invoking the vnoded process that
|
||||
allocates a new namespace. Bring up the loopback device and set
|
||||
the hostname.
|
||||
'''
|
||||
"""
|
||||
Start a new namespace node by invoking the vnoded process that
|
||||
allocates a new namespace. Bring up the loopback device and set
|
||||
the hostname.
|
||||
"""
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
vnoded = ["%s/vnoded" % CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname,
|
||||
raise Exception("already up")
|
||||
vnoded = ["%s/vnoded" % constants.CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname,
|
||||
"-l", self.ctrlchnlname + ".log",
|
||||
"-p", self.ctrlchnlname + ".pid"]
|
||||
if self.nodedir:
|
||||
vnoded += ["-C", self.nodedir]
|
||||
env = self.session.getenviron(state=False)
|
||||
env = self.session.get_environment(state=False)
|
||||
env['NODE_NUMBER'] = str(self.objid)
|
||||
env['NODE_NAME'] = str(self.name)
|
||||
|
||||
try:
|
||||
tmp = subprocess.Popen(vnoded, stdout = subprocess.PIPE, env = env)
|
||||
except OSError, e:
|
||||
msg = "error running vnoded command: %s (%s)" % (vnoded, e)
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL,
|
||||
"SimpleLxcNode.startup()", msg)
|
||||
raise Exception, msg
|
||||
tmp = subprocess.Popen(vnoded, stdout=subprocess.PIPE, env=env)
|
||||
except OSError:
|
||||
msg = "error running vnoded command: %s" % vnoded
|
||||
logger.exception("SimpleLxcNode.startup(): %s", msg)
|
||||
raise Exception(msg)
|
||||
|
||||
try:
|
||||
self.pid = int(tmp.stdout.read())
|
||||
tmp.stdout.close()
|
||||
except Exception:
|
||||
except ValueError:
|
||||
msg = "vnoded failed to create a namespace; "
|
||||
msg += "check kernel support and user priveleges"
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL,
|
||||
"SimpleLxcNode.startup()", msg)
|
||||
logger.exception("SimpleLxcNode.startup(): %s", msg)
|
||||
|
||||
if tmp.wait():
|
||||
raise Exception, ("command failed: %s" % vnoded)
|
||||
self.vnodeclient = vnodeclient.VnodeClient(self.name,
|
||||
self.ctrlchnlname)
|
||||
self.info("bringing up loopback interface")
|
||||
self.cmd([IP_BIN, "link", "set", "lo", "up"])
|
||||
self.info("setting hostname: %s" % self.name)
|
||||
raise Exception("command failed: %s" % vnoded)
|
||||
|
||||
self.vnodeclient = vnodeclient.VnodeClient(self.name, self.ctrlchnlname)
|
||||
logger.info("bringing up loopback interface")
|
||||
self.cmd([constants.IP_BIN, "link", "set", "lo", "up"])
|
||||
logger.info("setting hostname: %s" % self.name)
|
||||
self.cmd(["hostname", self.name])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
|
||||
while self._mounts:
|
||||
source, target = self._mounts.pop(-1)
|
||||
self.umount(target)
|
||||
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
os.waitpid(self.pid, 0)
|
||||
except OSError:
|
||||
pass
|
||||
logger.exception("error killing process")
|
||||
|
||||
try:
|
||||
os.unlink(self.ctrlchnlname)
|
||||
except OSError:
|
||||
pass
|
||||
logger.exception("error removing file")
|
||||
|
||||
self._netif.clear()
|
||||
self.vnodeclient.close()
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
def cmd(self, args, wait=True):
|
||||
return self.vnodeclient.cmd(args, wait)
|
||||
|
||||
def cmdresult(self, args):
|
||||
|
@ -117,93 +125,107 @@ class SimpleLxcNode(PyCoreNode):
|
|||
def icmd(self, args):
|
||||
return self.vnodeclient.icmd(args)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait = True):
|
||||
def redircmd(self, infd, outfd, errfd, args, wait=True):
|
||||
return self.vnodeclient.redircmd(infd, outfd, errfd, args, wait)
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return self.vnodeclient.term(sh = sh)
|
||||
def term(self, sh="/bin/sh"):
|
||||
return self.vnodeclient.term(sh=sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
return self.vnodeclient.termcmdstring(sh = sh)
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
return self.vnodeclient.termcmdstring(sh=sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.vnodeclient.shcmd(cmdstr, sh = sh)
|
||||
def shcmd(self, cmdstr, sh="/bin/sh"):
|
||||
return self.vnodeclient.shcmd(cmdstr, sh=sh)
|
||||
|
||||
def boot(self):
|
||||
pass
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
logger.info("mounting %s at %s" % (source, target))
|
||||
try:
|
||||
shcmd = "mkdir -p '%s' && %s -n --bind '%s' '%s'" % \
|
||||
(target, MOUNT_BIN, source, target)
|
||||
(target, constants.MOUNT_BIN, source, target)
|
||||
self.shcmd(shcmd)
|
||||
self._mounts.append((source, target))
|
||||
except:
|
||||
self.warn("mounting failed for %s at %s" % (source, target))
|
||||
logger.exception("mounting failed for %s at %s", source, target)
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
logger.info("unmounting '%s'" % target)
|
||||
try:
|
||||
self.cmd([UMOUNT_BIN, "-n", "-l", target])
|
||||
self.cmd([constants.UMOUNT_BIN, "-n", "-l", target])
|
||||
except:
|
||||
self.warn("unmounting failed for %s" % target)
|
||||
logger.exception("unmounting failed for %s" % target)
|
||||
|
||||
def newifindex(self):
|
||||
with self.lock:
|
||||
return PyCoreNode.newifindex(self)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None):
|
||||
def newveth(self, ifindex=None, ifname=None, net=None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
|
||||
sessionid = self.session.short_session_id()
|
||||
|
||||
try:
|
||||
suffix = '%x.%s.%s' % (self.objid, ifindex, sessionid)
|
||||
except TypeError:
|
||||
suffix = '%s.%s.%s' % (self.objid, ifindex, sessionid)
|
||||
|
||||
localname = 'veth' + suffix
|
||||
if len(localname) >= 16:
|
||||
raise ValueError, "interface local name '%s' too long" % \
|
||||
localname
|
||||
localname
|
||||
name = localname + 'p'
|
||||
if len(name) >= 16:
|
||||
raise ValueError, "interface name '%s' too long" % name
|
||||
ifclass = VEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
veth = VEth(node=self, name=name, localname=localname, mtu=1500, net=net, start=self.up)
|
||||
|
||||
if self.up:
|
||||
check_call([IP_BIN, "link", "set", veth.name,
|
||||
"netns", str(self.pid)])
|
||||
self.cmd([IP_BIN, "link", "set", veth.name, "name", ifname])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)])
|
||||
self.cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname])
|
||||
|
||||
veth.name = ifname
|
||||
|
||||
# retrieve interface information
|
||||
result, output = self.cmdresult(["ip", "link", "show", veth.name])
|
||||
logger.info("interface command output: %s", output)
|
||||
output = output.split("\n")
|
||||
veth.flow_id = int(output[0].strip().split(":")[0]) + 1
|
||||
logger.info("interface flow index: %s - %s", veth.name, veth.flow_id)
|
||||
veth.hwaddr = output[1].strip().split()[1]
|
||||
logger.info("interface mac: %s - %s", veth.name, veth.hwaddr)
|
||||
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newtuntap(self, ifindex = None, ifname = None, net = None):
|
||||
def newtuntap(self, ifindex=None, ifname=None, net=None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
sessionid = self.session.short_session_id()
|
||||
localname = "tap%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
name = ifname
|
||||
ifclass = TunTap
|
||||
tuntap = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
tuntap = ifclass(node=self, name=name, localname=localname,
|
||||
mtu=1500, net=net, start=self.up)
|
||||
try:
|
||||
self.addnetif(tuntap, ifindex)
|
||||
except:
|
||||
|
@ -217,50 +239,49 @@ class SimpleLxcNode(PyCoreNode):
|
|||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
(status, result) = self.cmdresult([IP_BIN, "link", "set", "dev",
|
||||
self.ifname(ifindex), "address", str(addr)])
|
||||
(status, result) = self.cmdresult([constants.IP_BIN, "link", "set", "dev",
|
||||
self.ifname(ifindex), "address", str(addr)])
|
||||
if status:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"SimpleLxcNode.sethwaddr()",
|
||||
"error setting MAC address %s" % str(addr))
|
||||
logger.error("error setting MAC address %s", str(addr))
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self.cmd([constants.IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
logger.exception("trying to delete unknown address: %s" % addr)
|
||||
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self.cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def delalladdr(self, ifindex, addrtypes=valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan=True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
raise ValueError("addr type must be in: " + " ".join(self.valid_deladdrtype))
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
self.getaddr(self.ifname(ifindex), rescan=True)
|
||||
|
||||
def ifup(self, ifindex):
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
self.cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
def newnetif(self, net=None, addrlist=[], hwaddr=None,
|
||||
ifindex=None, ifname=None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if isinstance(net, EmaneNode):
|
||||
ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
if nodeutils.is_node(net, NodeTypes.EMANE):
|
||||
ifindex = self.newtuntap(ifindex=ifindex, ifname=ifname,
|
||||
net=net)
|
||||
# TUN/TAP is not ready for addressing yet; the device may
|
||||
# take some time to appear, and installing it into a
|
||||
# namespace after it has been bound removes addressing;
|
||||
|
@ -268,18 +289,21 @@ class SimpleLxcNode(PyCoreNode):
|
|||
self.attachnet(ifindex, net)
|
||||
netif = self.netif(ifindex)
|
||||
netif.sethwaddr(hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
for addr in utils.maketuple(addrlist):
|
||||
netif.addaddr(addr)
|
||||
return ifindex
|
||||
else:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net)
|
||||
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
|
||||
for addr in utils.maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
|
||||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
|
@ -291,44 +315,42 @@ class SimpleLxcNode(PyCoreNode):
|
|||
for x in xrange(tmplen)])
|
||||
tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
for x in xrange(tmplen)])
|
||||
check_call([IP_BIN, "link", "add", "name", tmp1,
|
||||
"type", "veth", "peer", "name", tmp2])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "add", "name", tmp1,
|
||||
"type", "veth", "peer", "name", tmp2])
|
||||
|
||||
check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
subprocess.call([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
self.cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
|
||||
|
||||
check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
othernode.cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
othernode.addnetif(PyCoreNetIf(othernode, otherifname),
|
||||
othernode.newifindex())
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
(filename, srcname, filename)
|
||||
(filename, srcname, filename)
|
||||
self.shcmd(shcmd)
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
return self.vnodeclient.getaddr(ifname=ifname, rescan=rescan)
|
||||
|
||||
def netifstats(self, ifname = None):
|
||||
return self.vnodeclient.netifstats(ifname = ifname)
|
||||
def netifstats(self, ifname=None):
|
||||
return self.vnodeclient.netifstats(ifname=ifname)
|
||||
|
||||
|
||||
class LxcNode(SimpleLxcNode):
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True):
|
||||
super(LxcNode, self).__init__(session = session, objid = objid,
|
||||
name = name, nodedir = nodedir,
|
||||
verbose = verbose, start = start)
|
||||
def __init__(self, session, objid=None, name=None,
|
||||
nodedir=None, bootsh="boot.sh", start=True):
|
||||
super(LxcNode, self).__init__(session=session, objid=objid,
|
||||
name=name, nodedir=nodedir, start=start)
|
||||
self.bootsh = bootsh
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
|
@ -339,10 +361,8 @@ class LxcNode(SimpleLxcNode):
|
|||
super(LxcNode, self).startup()
|
||||
self.privatedir("/var/run")
|
||||
self.privatedir("/var/log")
|
||||
except OSError, e:
|
||||
self.warn("Error with LxcNode.startup(): %s" % e)
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"LxcNode.startup()", "%s" % e)
|
||||
except OSError:
|
||||
logger.exception("error during LxcNode.startup()")
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
|
@ -351,7 +371,7 @@ class LxcNode(SimpleLxcNode):
|
|||
return
|
||||
self.lock.acquire()
|
||||
# services are instead stopped when session enters datacollect state
|
||||
#self.session.services.stopnodeservices(self)
|
||||
# self.session.services.stopnodeservices(self)
|
||||
try:
|
||||
super(LxcNode, self).shutdown()
|
||||
finally:
|
||||
|
@ -360,20 +380,20 @@ class LxcNode(SimpleLxcNode):
|
|||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir,
|
||||
os.path.normpath(path).strip('/').replace('/', '.'))
|
||||
raise ValueError("path not fully qualified: %s" % path)
|
||||
hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip('/').replace('/', '.'))
|
||||
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
logger.exception("error creating directory: %s", hostpath)
|
||||
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def hostfilename(self, filename):
|
||||
''' Return the name of a node's file on the host filesystem.
|
||||
'''
|
||||
"""
|
||||
Return the name of a node's file on the host filesystem.
|
||||
"""
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
|
@ -383,28 +403,27 @@ class LxcNode(SimpleLxcNode):
|
|||
dirname = os.path.join(self.nodedir, dirname)
|
||||
return os.path.join(dirname, basename)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
def opennodefile(self, filename, mode="w"):
|
||||
hostfilename = self.hostfilename(filename)
|
||||
dirname, basename = os.path.split(hostfilename)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
os.makedirs(dirname, mode=0755)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
def nodefile(self, filename, contents, mode=0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode = None):
|
||||
''' Copy a file to a node, following symlinks and preserving metadata.
|
||||
logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode=None):
|
||||
"""
|
||||
Copy a file to a node, following symlinks and preserving metadata.
|
||||
Change file mode if specified.
|
||||
'''
|
||||
"""
|
||||
hostfilename = self.hostfilename(filename)
|
||||
shutil.copy2(srcfilename, hostfilename)
|
||||
if mode is not None:
|
||||
os.chmod(hostfilename, mode)
|
||||
self.info("copied nodefile: '%s'; mode: %s" % (hostfilename, mode))
|
||||
|
||||
|
||||
logger.info("copied nodefile: '%s'; mode: %s" % (hostfilename, mode))
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
vnodeclient.py: implementation of the VnodeClient class for issuing commands
|
||||
over a control channel to the vnoded process running in a network namespace.
|
||||
The control channel can be accessed via calls to the vcmd Python module or
|
||||
by invoking the vcmd shell command.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os, stat, sys
|
||||
from core.constants import *
|
||||
import os
|
||||
import stat
|
||||
|
||||
from core import constants
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
USE_VCMD_MODULE = True
|
||||
|
||||
|
@ -22,7 +20,8 @@ if USE_VCMD_MODULE:
|
|||
else:
|
||||
import subprocess
|
||||
|
||||
VCMD = os.path.join(CORE_SBIN_DIR, "vcmd")
|
||||
VCMD = os.path.join(constants.CORE_SBIN_DIR, "vcmd")
|
||||
|
||||
|
||||
class VnodeClient(object):
|
||||
def __init__(self, name, ctrlchnlname):
|
||||
|
@ -34,9 +33,6 @@ class VnodeClient(object):
|
|||
self.cmdchnl = None
|
||||
self._addr = {}
|
||||
|
||||
def warn(self, msg):
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
|
||||
def connected(self):
|
||||
if USE_VCMD_MODULE:
|
||||
return self.cmdchnl.connected()
|
||||
|
@ -47,12 +43,13 @@ class VnodeClient(object):
|
|||
if USE_VCMD_MODULE:
|
||||
self.cmdchnl.close()
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
''' Execute a command on a node and return the status (return code).
|
||||
'''
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Execute a command on a node and return the status (return code).
|
||||
"""
|
||||
if USE_VCMD_MODULE:
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
raise ValueError("self.cmdchnl not connected")
|
||||
tmp = self.cmdchnl.qcmd(args)
|
||||
if not wait:
|
||||
return tmp
|
||||
|
@ -62,19 +59,19 @@ class VnodeClient(object):
|
|||
mode = os.P_WAIT
|
||||
else:
|
||||
mode = os.P_NOWAIT
|
||||
tmp = os.spawnlp(mode, VCMD, VCMD, "-c",
|
||||
self.ctrlchnlname, "-q", "--", *args)
|
||||
tmp = os.spawnlp(mode, VCMD, VCMD, "-c", self.ctrlchnlname, "-q", "--", *args)
|
||||
if not wait:
|
||||
return tmp
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
logger.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def cmdresult(self, args):
|
||||
''' Execute a command on a node and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
'''
|
||||
"""
|
||||
Execute a command on a node and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
"""
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(args)
|
||||
result = cmdout.read()
|
||||
result += cmderr.read()
|
||||
|
@ -82,43 +79,40 @@ class VnodeClient(object):
|
|||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
return (status, result)
|
||||
return status, result
|
||||
|
||||
def popen(self, args):
|
||||
if USE_VCMD_MODULE:
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
raise ValueError("self.cmdchnl not connected")
|
||||
return self.cmdchnl.popen(args)
|
||||
else:
|
||||
cmd = [VCMD, "-c", self.ctrlchnlname, "--"]
|
||||
cmd.extend(args)
|
||||
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
tmp = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname,
|
||||
"--", *args)
|
||||
return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname, "--", *args)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait = True):
|
||||
'''
|
||||
def redircmd(self, infd, outfd, errfd, args, wait=True):
|
||||
"""
|
||||
Execute a command on a node with standard input, output, and
|
||||
error redirected according to the given file descriptors.
|
||||
'''
|
||||
"""
|
||||
if not USE_VCMD_MODULE:
|
||||
raise NotImplementedError
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
raise ValueError("self.cmdchnl not connected")
|
||||
tmp = self.cmdchnl.redircmd(infd, outfd, errfd, args)
|
||||
if not wait:
|
||||
return tmp
|
||||
tmp = tmp.wait()
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
logger.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
def term(self, sh="/bin/sh"):
|
||||
cmd = ("xterm", "-ut", "-title", self.name, "-e",
|
||||
VCMD, "-c", self.ctrlchnlname, "--", sh)
|
||||
if "SUDO_USER" in os.environ:
|
||||
|
@ -127,19 +121,20 @@ class VnodeClient(object):
|
|||
os.environ["SUDO_USER"])
|
||||
return os.spawnvp(os.P_NOWAIT, cmd[0], cmd)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
return "%s -c %s -- %s" % (VCMD, self.ctrlchnlname, sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
def shcmd(self, cmdstr, sh="/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
tmp = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
cmd = [IP_BIN, "addr", "show", "dev", ifname]
|
||||
cmd = [constants.IP_BIN, "addr", "show", "dev", ifname]
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
|
||||
cmdin.close()
|
||||
|
||||
for line in cmdout:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
|
@ -152,21 +147,20 @@ class VnodeClient(object):
|
|||
elif line[3] == "link":
|
||||
tmp["inet6link"].append(line[1])
|
||||
else:
|
||||
self.warn("unknown scope: %s" % line[3])
|
||||
else:
|
||||
pass
|
||||
logger.warn("unknown scope: %s" % line[3])
|
||||
|
||||
err = cmderr.read()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
if status:
|
||||
self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
logger.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
if err:
|
||||
self.warn("error output: %s" % err)
|
||||
logger.warn("error output: %s" % err)
|
||||
self._addr[ifname] = tmp
|
||||
return tmp
|
||||
|
||||
def netifstats(self, ifname = None):
|
||||
def netifstats(self, ifname=None):
|
||||
stats = {}
|
||||
cmd = ["cat", "/proc/net/dev"]
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
|
||||
|
@ -195,34 +189,32 @@ class VnodeClient(object):
|
|||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
if status:
|
||||
self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
logger.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
if err:
|
||||
self.warn("error output: %s" % err)
|
||||
logger.warn("error output: %s" % err)
|
||||
if ifname is not None:
|
||||
return stats[ifname]
|
||||
else:
|
||||
return stats
|
||||
|
||||
def createclients(sessiondir, clientcls = VnodeClient,
|
||||
cmdchnlfilterfunc = None):
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x),
|
||||
os.listdir(sessiondir))
|
||||
|
||||
def createclients(sessiondir, clientcls=VnodeClient, cmdchnlfilterfunc=None):
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir))
|
||||
cmdchnls = filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries)
|
||||
if cmdchnlfilterfunc:
|
||||
cmdchnls = filter(cmdchnlfilterfunc, cmdchnls)
|
||||
cmdchnls.sort()
|
||||
return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls)
|
||||
|
||||
def createremoteclients(sessiondir, clientcls = VnodeClient,
|
||||
filterfunc = None):
|
||||
''' Creates remote VnodeClients, for nodes emulated on other machines. The
|
||||
|
||||
def createremoteclients(sessiondir, clientcls=VnodeClient, filterfunc=None):
|
||||
"""
|
||||
Creates remote VnodeClients, for nodes emulated on other machines. The
|
||||
session.Broker writes a n1.conf/server file having the server's info.
|
||||
'''
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x),
|
||||
os.listdir(sessiondir))
|
||||
"""
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir))
|
||||
nodedirs = filter(lambda x: stat.S_ISDIR(os.stat(x).st_mode), direntries)
|
||||
nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")),
|
||||
nodedirs)
|
||||
nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")), nodedirs)
|
||||
if filterfunc:
|
||||
nodedirs = filter(filterfunc, nodedirs)
|
||||
nodedirs.sort()
|
||||
|
|
|
@ -1,42 +1,34 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
''' PhysicalNode class for including real systems in the emulated network.
|
||||
'''
|
||||
import os, threading, subprocess
|
||||
"""
|
||||
PhysicalNode class for including real systems in the emulated network.
|
||||
"""
|
||||
|
||||
from core.misc.ipaddr import *
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode, PyCoreNetIf
|
||||
from core.emane.nodes import EmaneNode
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns.vnet import LxBrNet
|
||||
from core.netns.vif import GreTap
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from core.bsd.vnet import NetgraphNet
|
||||
import os
|
||||
import subprocess
|
||||
import threading
|
||||
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.misc import log
|
||||
from core.misc import utils
|
||||
from core.netns.vnet import GreTap
|
||||
from core.netns.vnet import LxBrNet
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class PhysicalNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
def __init__(self, session, objid=None, name=None, nodedir=None, start=True):
|
||||
PyCoreNode.__init__(self, session, objid, name, start=start)
|
||||
self.nodedir = nodedir
|
||||
self.up = start
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
|
@ -44,11 +36,10 @@ class PhysicalNode(PyCoreNode):
|
|||
self.lock.acquire()
|
||||
try:
|
||||
self.makenodedir()
|
||||
#self.privatedir("/var/run")
|
||||
#self.privatedir("/var/log")
|
||||
except OSError, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"PhysicalNode.startup()", e)
|
||||
# self.privatedir("/var/run")
|
||||
# self.privatedir("/var/log")
|
||||
except OSError:
|
||||
logger.exception("PhysicalNode.startup()")
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
|
@ -64,16 +55,17 @@ class PhysicalNode(PyCoreNode):
|
|||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' The broker will add the appropriate SSH command to open a terminal
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
The broker will add the appropriate SSH command to open a terminal
|
||||
on this physical node.
|
||||
'''
|
||||
"""
|
||||
return sh
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
''' run a command on the physical node
|
||||
'''
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
run a command on the physical node
|
||||
"""
|
||||
os.chdir(self.nodedir)
|
||||
try:
|
||||
if wait:
|
||||
|
@ -82,87 +74,88 @@ class PhysicalNode(PyCoreNode):
|
|||
else:
|
||||
# os.spawnlp(os.P_NOWAIT, args)
|
||||
subprocess.Popen(args)
|
||||
except CalledProcessError, e:
|
||||
self.warn("cmd exited with status %s: %s" % (e, str(args)))
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception("cmd exited with status: %s", str(args))
|
||||
|
||||
def cmdresult(self, args):
|
||||
''' run a command on the physical node and get the result
|
||||
'''
|
||||
"""
|
||||
run a command on the physical node and get the result
|
||||
"""
|
||||
os.chdir(self.nodedir)
|
||||
# in Python 2.7 we can use subprocess.check_output() here
|
||||
tmp = subprocess.Popen(args, stdin = open(os.devnull, 'r'),
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.STDOUT)
|
||||
result, err = tmp.communicate() # err will always be None
|
||||
tmp = subprocess.Popen(args, stdin=open(os.devnull, 'r'),
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
# err will always be None
|
||||
result, err = tmp.communicate()
|
||||
status = tmp.wait()
|
||||
return (status, result)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return status, result
|
||||
|
||||
def shcmd(self, cmdstr, sh="/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.sethwaddr()
|
||||
'''
|
||||
"""
|
||||
same as SimpleLxcNode.sethwaddr()
|
||||
"""
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
ifname = self.ifname(ifindex)
|
||||
if self.up:
|
||||
(status, result) = self.cmdresult([IP_BIN, "link", "set", "dev",
|
||||
ifname, "address", str(addr)])
|
||||
(status, result) = self.cmdresult(
|
||||
[constants.IP_BIN, "link", "set", "dev", ifname, "address", str(addr)])
|
||||
if status:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"PhysicalNode.sethwaddr()",
|
||||
"error setting MAC address %s" % str(addr))
|
||||
|
||||
logger.error("error setting MAC address %s", str(addr))
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.addaddr()
|
||||
'''
|
||||
"""
|
||||
same as SimpleLxcNode.addaddr()
|
||||
"""
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)])
|
||||
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.deladdr()
|
||||
'''
|
||||
"""
|
||||
same as SimpleLxcNode.deladdr()
|
||||
"""
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
logger.exception("trying to delete unknown address: %s", addr)
|
||||
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self.cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
|
||||
|
||||
def adoptnetif(self, netif, ifindex, hwaddr, addrlist):
|
||||
''' The broker builds a GreTap tunnel device to this physical node.
|
||||
"""
|
||||
The broker builds a GreTap tunnel device to this physical node.
|
||||
When a link message is received linking this node to another part of
|
||||
the emulation, no new interface is created; instead, adopt the
|
||||
GreTap netif as the node interface.
|
||||
'''
|
||||
"""
|
||||
netif.name = "gt%d" % ifindex
|
||||
netif.node = self
|
||||
self.addnetif(netif, ifindex)
|
||||
# use a more reasonable name, e.g. "gt0" instead of "gt.56286.150"
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "down"])
|
||||
self.cmd([IP_BIN, "link", "set", netif.localname, "name", netif.name])
|
||||
self.cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "down"])
|
||||
self.cmd([constants.IP_BIN, "link", "set", netif.localname, "name", netif.name])
|
||||
netif.localname = netif.name
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
for addr in utils.maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "up"])
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Apply tc queing disciplines using LxBrNet.linkconfig()
|
||||
'''
|
||||
if os.uname()[0] == "Linux":
|
||||
netcls = LxBrNet
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
netcls = NetgraphNet
|
||||
else:
|
||||
raise NotImplementedError, "unsupported platform"
|
||||
self.cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "up"])
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None,
|
||||
loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
Apply tc queing disciplines using LxBrNet.linkconfig()
|
||||
"""
|
||||
netcls = LxBrNet
|
||||
|
||||
# borrow the tc qdisc commands from LxBrNet.linkconfig()
|
||||
tmp = netcls(session=self.session, start=False)
|
||||
tmp.up = True
|
||||
|
@ -181,8 +174,8 @@ class PhysicalNode(PyCoreNode):
|
|||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
def newnetif(self, net=None, addrlist=[], hwaddr=None,
|
||||
ifindex=None, ifname=None):
|
||||
if self.up and net is None:
|
||||
raise NotImplementedError
|
||||
if ifindex is None:
|
||||
|
@ -193,22 +186,20 @@ class PhysicalNode(PyCoreNode):
|
|||
# tunnel to net not built yet, so build it now and adopt it
|
||||
gt = self.session.broker.addnettunnel(net.objid)
|
||||
if gt is None or len(gt) != 1:
|
||||
self.session.warn("Error building tunnel from PhysicalNode."
|
||||
"newnetif()")
|
||||
logger.warn("Error building tunnel from PhysicalNode.newnetif()")
|
||||
gt = gt[0]
|
||||
net.detach(gt)
|
||||
self.adoptnetif(gt, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
|
||||
# this is reached when configuring services (self.up=False)
|
||||
if ifname is None:
|
||||
ifname = "gt%d" % ifindex
|
||||
netif = GreTap(node = self, name = ifname, session = self.session,
|
||||
start = False)
|
||||
netif = GreTap(node=self, name=ifname, session=self.session,
|
||||
start=False)
|
||||
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
|
@ -217,32 +208,31 @@ class PhysicalNode(PyCoreNode):
|
|||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
logger.exception("error creating directory: %s", hostpath)
|
||||
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
logger.info("mounting %s at %s" % (source, target))
|
||||
|
||||
try:
|
||||
os.makedirs(target)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
self.cmd([MOUNT_BIN, "--bind", source, target])
|
||||
self.cmd([constants.MOUNT_BIN, "--bind", source, target])
|
||||
self._mounts.append((source, target))
|
||||
except OSError:
|
||||
logger.exception("error making directories")
|
||||
except:
|
||||
self.warn("mounting failed for %s at %s" % (source, target))
|
||||
logger.exception("mounting failed for %s at %s", source, target)
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
logger.info("unmounting '%s'" % target)
|
||||
try:
|
||||
self.cmd([UMOUNT_BIN, "-l", target])
|
||||
self.cmd([constants.UMOUNT_BIN, "-l", target])
|
||||
except:
|
||||
self.warn("unmounting failed for %s" % target)
|
||||
logger.exception("unmounting failed for %s", target)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
def opennodefile(self, filename, mode="w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
|
@ -251,15 +241,13 @@ class PhysicalNode(PyCoreNode):
|
|||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
os.makedirs(dirname, mode=0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
def nodefile(self, filename, contents, mode=0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
|
||||
logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
"""
|
||||
This is a convenience module that imports a set of platform-dependent
|
||||
defaults.
|
||||
"""
|
||||
|
||||
from misc.utils import ensurepath
|
||||
ensurepath(["/sbin", "/bin", "/usr/sbin", "/usr/bin"])
|
||||
del ensurepath
|
||||
|
||||
from session import Session
|
||||
|
||||
import os
|
||||
|
||||
if os.uname()[0] == "Linux":
|
||||
from netns import nodes
|
||||
try:
|
||||
from xen import xen
|
||||
except ImportError:
|
||||
#print "Xen support disabled."
|
||||
pass
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from bsd import nodes
|
||||
from phys import pnodes
|
||||
del os
|
|
@ -1,61 +1,78 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
sdt.py: Scripted Display Tool (SDT3D) helper
|
||||
'''
|
||||
"""
|
||||
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from coreobj import PyCoreNet, PyCoreObj
|
||||
from core.netns import nodes
|
||||
from urlparse import urlparse
|
||||
import socket
|
||||
from urlparse import urlparse
|
||||
|
||||
from core import constants
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.coreobj import PyCoreObj
|
||||
from core.enumerations import EventTypes
|
||||
from core.enumerations import LinkTlvs
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import MessageFlags
|
||||
from core.enumerations import MessageTypes
|
||||
from core.enumerations import NodeTlvs
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import log
|
||||
from core.misc import nodeutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class Bunch:
|
||||
"""
|
||||
Helper class for recording a collection of attributes.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
||||
|
||||
class Sdt(object):
|
||||
''' Helper class for exporting session objects to NRL's SDT3D.
|
||||
"""
|
||||
Helper class for exporting session objects to NRL's SDT3D.
|
||||
The connect() method initializes the display, and can be invoked
|
||||
when a node position or link has changed.
|
||||
'''
|
||||
"""
|
||||
DEFAULT_SDT_URL = "tcp://127.0.0.1:50000/"
|
||||
# default altitude (in meters) for flyto view
|
||||
DEFAULT_ALT = 2500
|
||||
# TODO: read in user's nodes.conf here; below are default node types
|
||||
# from the GUI
|
||||
DEFAULT_SPRITES = [('router', 'router.gif'), ('host', 'host.gif'),
|
||||
('PC', 'pc.gif'), ('mdr', 'mdr.gif'),
|
||||
('prouter', 'router_green.gif'), ('xen', 'xen.gif'),
|
||||
('hub', 'hub.gif'), ('lanswitch','lanswitch.gif'),
|
||||
('wlan', 'wlan.gif'), ('rj45','rj45.gif'),
|
||||
('tunnel','tunnel.gif'),
|
||||
]
|
||||
DEFAULT_SPRITES = [
|
||||
('router', 'router.gif'), ('host', 'host.gif'),
|
||||
('PC', 'pc.gif'), ('mdr', 'mdr.gif'),
|
||||
('prouter', 'router_green.gif'), ('xen', 'xen.gif'),
|
||||
('hub', 'hub.gif'), ('lanswitch', 'lanswitch.gif'),
|
||||
('wlan', 'wlan.gif'), ('rj45', 'rj45.gif'),
|
||||
('tunnel', 'tunnel.gif'),
|
||||
]
|
||||
|
||||
class Bunch:
|
||||
''' Helper class for recording a collection of attributes.
|
||||
'''
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
||||
def __init__(self, session):
|
||||
"""
|
||||
Creates a Sdt instance.
|
||||
|
||||
:param core.session.Session session: session this manager is tied to
|
||||
:return: nothing
|
||||
"""
|
||||
self.session = session
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
self.showerror = True
|
||||
self.url = self.DEFAULT_SDT_URL
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
# node information for remote nodes not in session._objs
|
||||
# local nodes also appear here since their obj may not exist yet
|
||||
self.remotes = {}
|
||||
session.broker.handlers.add(self.handledistributed)
|
||||
|
||||
session.broker.handlers += (self.handledistributed,)
|
||||
|
||||
def is_enabled(self):
|
||||
''' Check for 'enablesdt' session option. Return False by default if
|
||||
the option is missing.
|
||||
'''
|
||||
"""
|
||||
Check for 'enablesdt' session option. Return False by default if
|
||||
the option is missing.
|
||||
"""
|
||||
if not hasattr(self.session.options, 'enablesdt'):
|
||||
return False
|
||||
enabled = self.session.options.enablesdt
|
||||
|
@ -64,11 +81,12 @@ class Sdt(object):
|
|||
return False
|
||||
|
||||
def seturl(self):
|
||||
''' Read 'sdturl' from session options, or use the default value.
|
||||
Set self.url, self.address, self.protocol
|
||||
'''
|
||||
"""
|
||||
Read 'sdturl' from session options, or use the default value.
|
||||
Set self.url, self.address, self.protocol
|
||||
"""
|
||||
url = None
|
||||
if hasattr(self.session.options,'sdturl'):
|
||||
if hasattr(self.session.options, 'sdturl'):
|
||||
if self.session.options.sdturl != "":
|
||||
url = self.session.options.sdturl
|
||||
if url is None or url == "":
|
||||
|
@ -78,147 +96,151 @@ class Sdt(object):
|
|||
self.protocol = self.url.scheme
|
||||
|
||||
def connect(self, flags=0):
|
||||
''' Connect to the SDT address/port if enabled.
|
||||
'''
|
||||
"""
|
||||
Connect to the SDT address/port if enabled.
|
||||
"""
|
||||
if not self.is_enabled():
|
||||
return False
|
||||
if self.connected:
|
||||
return True
|
||||
if self.session.getstate() == coreapi.CORE_EVENT_SHUTDOWN_STATE:
|
||||
if self.session.state == EventTypes.SHUTDOWN_STATE.value:
|
||||
return False
|
||||
|
||||
self.seturl()
|
||||
if self.showerror:
|
||||
self.session.info("connecting to SDT at %s://%s" \
|
||||
% (self.protocol, self.address))
|
||||
logger.info("connecting to SDT at %s://%s" % (self.protocol, self.address))
|
||||
if self.sock is None:
|
||||
try:
|
||||
if (self.protocol.lower() == 'udp'):
|
||||
if self.protocol.lower() == 'udp':
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
self.sock.connect(self.address)
|
||||
else:
|
||||
# Default to tcp
|
||||
self.sock = socket.create_connection(self.address, 5)
|
||||
except Exception, e:
|
||||
if self.showerror:
|
||||
self.session.warn("SDT socket connect error: %s" % e)
|
||||
self.showerror = False
|
||||
except IOError:
|
||||
logger.exception("SDT socket connect error")
|
||||
return False
|
||||
|
||||
if not self.initialize():
|
||||
return False
|
||||
|
||||
self.connected = True
|
||||
# refresh all objects in SDT3D when connecting after session start
|
||||
if not flags & coreapi.CORE_API_ADD_FLAG:
|
||||
if not self.sendobjs():
|
||||
return False
|
||||
if not flags & MessageFlags.ADD.value and not self.sendobjs():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def initialize(self):
|
||||
''' Load icon sprites, and fly to the reference point location on
|
||||
the virtual globe.
|
||||
'''
|
||||
if not self.cmd('path "%s/icons/normal"' % CORE_DATA_DIR):
|
||||
"""
|
||||
Load icon sprites, and fly to the reference point location on
|
||||
the virtual globe.
|
||||
"""
|
||||
if not self.cmd('path "%s/icons/normal"' % constants.CORE_DATA_DIR):
|
||||
return False
|
||||
# send node type to icon mappings
|
||||
for (type, icon) in self.DEFAULT_SPRITES:
|
||||
for type, icon in self.DEFAULT_SPRITES:
|
||||
if not self.cmd('sprite %s image %s' % (type, icon)):
|
||||
return False
|
||||
(lat, long) = self.session.location.refgeo[:2]
|
||||
return self.cmd('flyto %.6f,%.6f,%d' % (long, lat, self.DEFAULT_ALT))
|
||||
|
||||
|
||||
def disconnect(self):
|
||||
try:
|
||||
self.sock.close()
|
||||
except:
|
||||
pass
|
||||
self.sock = None
|
||||
if self.sock:
|
||||
try:
|
||||
self.sock.close()
|
||||
except IOError:
|
||||
logger.error("error closing socket")
|
||||
finally:
|
||||
self.sock = None
|
||||
|
||||
self.connected = False
|
||||
|
||||
|
||||
def shutdown(self):
|
||||
''' Invoked from Session.shutdown() and Session.checkshutdown().
|
||||
'''
|
||||
"""
|
||||
Invoked from Session.shutdown() and Session.checkshutdown().
|
||||
"""
|
||||
self.cmd('clear all')
|
||||
self.disconnect()
|
||||
self.showerror = True
|
||||
|
||||
|
||||
def cmd(self, cmdstr):
|
||||
''' Send an SDT command over a UDP socket. socket.sendall() is used
|
||||
as opposed to socket.sendto() because an exception is raised when
|
||||
there is no socket listener.
|
||||
'''
|
||||
"""
|
||||
Send an SDT command over a UDP socket. socket.sendall() is used
|
||||
as opposed to socket.sendto() because an exception is raised when
|
||||
there is no socket listener.
|
||||
"""
|
||||
if self.sock is None:
|
||||
return False
|
||||
try:
|
||||
if self.verbose:
|
||||
self.session.info("sdt: %s" % cmdstr)
|
||||
logger.info("sdt: %s" % cmdstr)
|
||||
self.sock.sendall("%s\n" % cmdstr)
|
||||
return True
|
||||
except Exception, e:
|
||||
if self.showerror:
|
||||
self.session.warn("SDT connection error: %s" % e)
|
||||
self.showerror = False
|
||||
except IOError:
|
||||
logger.exception("SDT connection error")
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
return False
|
||||
|
||||
def updatenode(self, nodenum, flags, x, y, z,
|
||||
name=None, type=None, icon=None):
|
||||
''' Node is updated from a Node Message or mobility script.
|
||||
'''
|
||||
|
||||
def updatenode(self, nodenum, flags, x, y, z, name=None, type=None, icon=None):
|
||||
"""
|
||||
Node is updated from a Node Message or mobility script.
|
||||
"""
|
||||
if not self.connect():
|
||||
return
|
||||
if flags & coreapi.CORE_API_DEL_FLAG:
|
||||
if flags & MessageFlags.DELETE.value:
|
||||
self.cmd('delete node,%d' % nodenum)
|
||||
return
|
||||
if x is None or y is None:
|
||||
return
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
if flags & coreapi.CORE_API_ADD_FLAG:
|
||||
if flags & MessageFlags.ADD.value:
|
||||
if icon is not None:
|
||||
type = name
|
||||
icon = icon.replace("$CORE_DATA_DIR", CORE_DATA_DIR)
|
||||
icon = icon.replace("$CORE_CONF_DIR", CORE_CONF_DIR)
|
||||
icon = icon.replace("$CORE_DATA_DIR", constants.CORE_DATA_DIR)
|
||||
icon = icon.replace("$CORE_CONF_DIR", constants.CORE_CONF_DIR)
|
||||
self.cmd('sprite %s image %s' % (type, icon))
|
||||
self.cmd('node %d type %s label on,"%s" %s' % \
|
||||
(nodenum, type, name, pos))
|
||||
self.cmd('node %d type %s label on,"%s" %s' % (nodenum, type, name, pos))
|
||||
else:
|
||||
self.cmd('node %d %s' % (nodenum, pos))
|
||||
|
||||
def updatenodegeo(self, nodenum, lat, long, alt):
|
||||
''' Node is updated upon receiving an EMANE Location Event.
|
||||
TODO: received Node Message with lat/long/alt.
|
||||
'''
|
||||
"""
|
||||
Node is updated upon receiving an EMANE Location Event.
|
||||
TODO: received Node Message with lat/long/alt.
|
||||
"""
|
||||
if not self.connect():
|
||||
return
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
self.cmd('node %d %s' % (nodenum, pos))
|
||||
|
||||
|
||||
def updatelink(self, node1num, node2num, flags, wireless=False):
|
||||
''' Link is updated from a Link Message or by a wireless model.
|
||||
'''
|
||||
"""
|
||||
Link is updated from a Link Message or by a wireless model.
|
||||
"""
|
||||
if node1num is None or node2num is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
if flags & coreapi.CORE_API_DEL_FLAG:
|
||||
if flags & MessageFlags.DELETE.value:
|
||||
self.cmd('delete link,%s,%s' % (node1num, node2num))
|
||||
elif flags & coreapi.CORE_API_ADD_FLAG:
|
||||
elif flags & MessageFlags.ADD.value:
|
||||
attr = ""
|
||||
if wireless:
|
||||
attr = " line green,2"
|
||||
else:
|
||||
attr = " line red,2"
|
||||
self.cmd('link %s,%s%s' % (node1num, node2num, attr))
|
||||
|
||||
|
||||
def sendobjs(self):
|
||||
''' Session has already started, and the SDT3D GUI later connects.
|
||||
Send all node and link objects for display. Otherwise, nodes and
|
||||
links will only be drawn when they have been updated (e.g. moved).
|
||||
'''
|
||||
"""
|
||||
Session has already started, and the SDT3D GUI later connects.
|
||||
Send all node and link objects for display. Otherwise, nodes and
|
||||
links will only be drawn when they have been updated (e.g. moved).
|
||||
"""
|
||||
nets = []
|
||||
with self.session._objslock:
|
||||
for obj in self.session.objs():
|
||||
with self.session._objects_lock:
|
||||
for obj in self.session.objects.itervalues():
|
||||
if isinstance(obj, PyCoreNet):
|
||||
nets.append(obj)
|
||||
if not isinstance(obj, PyCoreObj):
|
||||
|
@ -226,88 +248,88 @@ class Sdt(object):
|
|||
(x, y, z) = obj.getposition()
|
||||
if x is None or y is None:
|
||||
continue
|
||||
self.updatenode(obj.objid, coreapi.CORE_API_ADD_FLAG, x, y, z,
|
||||
self.updatenode(obj.objid, MessageFlags.ADD.value, x, y, z,
|
||||
obj.name, obj.type, obj.icon)
|
||||
for nodenum in sorted(self.remotes.keys()):
|
||||
r = self.remotes[nodenum]
|
||||
(x, y, z) = r.pos
|
||||
self.updatenode(nodenum, coreapi.CORE_API_ADD_FLAG, x, y, z,
|
||||
self.updatenode(nodenum, MessageFlags.ADD.value, x, y, z,
|
||||
r.name, r.type, r.icon)
|
||||
|
||||
for net in nets:
|
||||
# use tolinkmsgs() to handle various types of links
|
||||
msgs = net.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
|
||||
for msg in msgs:
|
||||
msghdr = msg[:coreapi.CoreMessage.hdrsiz]
|
||||
flags = coreapi.CoreMessage.unpackhdr(msghdr)[1]
|
||||
m = coreapi.CoreLinkMessage(flags, msghdr,
|
||||
msg[coreapi.CoreMessage.hdrsiz:])
|
||||
n1num = m.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER)
|
||||
n2num = m.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER)
|
||||
link_msg_type = m.gettlv(coreapi.CORE_TLV_LINK_TYPE)
|
||||
if isinstance(net, nodes.WlanNode) or \
|
||||
isinstance(net, nodes.EmaneNode):
|
||||
if (n1num == net.objid):
|
||||
messages = net.all_link_data(flags=MessageFlags.ADD.value)
|
||||
for message in messages:
|
||||
msghdr = message[:coreapi.CoreMessage.header_len]
|
||||
flags = coreapi.CoreMessage.unpack_header(msghdr)[1]
|
||||
m = coreapi.CoreLinkMessage(flags, msghdr, message[coreapi.CoreMessage.header_len:])
|
||||
n1num = m.get_tlv(LinkTlvs.N1_NUMBER.value)
|
||||
n2num = m.get_tlv(LinkTlvs.N2_NUMBER.value)
|
||||
link_msg_type = m.get_tlv(LinkTlvs.TYPE.value)
|
||||
if nodeutils.is_node(net, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)):
|
||||
if n1num == net.objid:
|
||||
continue
|
||||
wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS)
|
||||
self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl)
|
||||
wl = link_msg_type == LinkTypes.WIRELESS.value
|
||||
self.updatelink(n1num, n2num, MessageFlags.ADD.value, wl)
|
||||
|
||||
for n1num in sorted(self.remotes.keys()):
|
||||
r = self.remotes[n1num]
|
||||
for (n2num, wl) in r.links:
|
||||
self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl)
|
||||
|
||||
def handledistributed(self, msg):
|
||||
''' Broker handler for processing CORE API messages as they are
|
||||
received. This is used to snoop the Node messages and update
|
||||
node positions.
|
||||
'''
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG:
|
||||
return self.handlelinkmsg(msg)
|
||||
elif msg.msgtype == coreapi.CORE_API_NODE_MSG:
|
||||
return self.handlenodemsg(msg)
|
||||
|
||||
for n2num, wl in r.links:
|
||||
self.updatelink(n1num, n2num, MessageFlags.ADD.value, wl)
|
||||
|
||||
def handledistributed(self, message):
|
||||
"""
|
||||
Broker handler for processing CORE API messages as they are
|
||||
received. This is used to snoop the Node messages and update
|
||||
node positions.
|
||||
"""
|
||||
if message.message_type == MessageTypes.LINK.value:
|
||||
return self.handlelinkmsg(message)
|
||||
elif message.message_type == MessageTypes.NODE.value:
|
||||
return self.handlenodemsg(message)
|
||||
|
||||
def handlenodemsg(self, msg):
|
||||
''' Process a Node Message to add/delete or move a node on
|
||||
the SDT display. Node properties are found in session._objs or
|
||||
self.remotes for remote nodes (or those not yet instantiated).
|
||||
'''
|
||||
"""
|
||||
Process a Node Message to add/delete or move a node on
|
||||
the SDT display. Node properties are found in session._objs or
|
||||
self.remotes for remote nodes (or those not yet instantiated).
|
||||
"""
|
||||
# for distributed sessions to work properly, the SDT option should be
|
||||
# enabled prior to starting the session
|
||||
if not self.is_enabled():
|
||||
return False
|
||||
# node.(objid, type, icon, name) are used.
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER)
|
||||
nodenum = msg.get_tlv(NodeTlvs.NUMBER.value)
|
||||
if not nodenum:
|
||||
return
|
||||
x = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS)
|
||||
y = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS)
|
||||
x = msg.get_tlv(NodeTlvs.X_POSITION.value)
|
||||
y = msg.get_tlv(NodeTlvs.Y_POSITION.value)
|
||||
z = None
|
||||
name = msg.gettlv(coreapi.CORE_TLV_NODE_NAME)
|
||||
|
||||
nodetype = msg.gettlv(coreapi.CORE_TLV_NODE_TYPE)
|
||||
model = msg.gettlv(coreapi.CORE_TLV_NODE_MODEL)
|
||||
icon = msg.gettlv(coreapi.CORE_TLV_NODE_ICON)
|
||||
name = msg.get_tlv(NodeTlvs.NAME.value)
|
||||
|
||||
nodetype = msg.get_tlv(NodeTlvs.TYPE.value)
|
||||
model = msg.get_tlv(NodeTlvs.MODEL.value)
|
||||
icon = msg.get_tlv(NodeTlvs.ICON.value)
|
||||
|
||||
net = False
|
||||
if nodetype == coreapi.CORE_NODE_DEF or \
|
||||
nodetype == coreapi.CORE_NODE_PHYS or \
|
||||
nodetype == coreapi.CORE_NODE_XEN:
|
||||
if nodetype == NodeTypes.DEFAULT.value or \
|
||||
nodetype == NodeTypes.PHYSICAL.value or \
|
||||
nodetype == NodeTypes.XEN.value:
|
||||
if model is None:
|
||||
model = "router"
|
||||
type = model
|
||||
elif nodetype != None:
|
||||
type = coreapi.node_class(nodetype).type
|
||||
elif nodetype is not None:
|
||||
type = nodeutils.get_node_class(NodeTypes(nodetype)).type
|
||||
net = True
|
||||
else:
|
||||
type = None
|
||||
|
||||
|
||||
try:
|
||||
node = self.session.obj(nodenum)
|
||||
node = self.session.get_object(nodenum)
|
||||
except KeyError:
|
||||
node = None
|
||||
if node:
|
||||
self.updatenode(node.objid, msg.flags, x, y, z,
|
||||
node.name, node.type, node.icon)
|
||||
self.updatenode(node.objid, msg.flags, x, y, z, node.name, node.type, node.icon)
|
||||
else:
|
||||
if nodenum in self.remotes:
|
||||
remote = self.remotes[nodenum]
|
||||
|
@ -318,29 +340,29 @@ class Sdt(object):
|
|||
if icon is None:
|
||||
icon = remote.icon
|
||||
else:
|
||||
remote = self.Bunch(objid=nodenum, type=type, icon=icon,
|
||||
name=name, net=net, links=set())
|
||||
remote = Bunch(objid=nodenum, type=type, icon=icon, name=name, net=net, links=set())
|
||||
self.remotes[nodenum] = remote
|
||||
remote.pos = (x, y, z)
|
||||
self.updatenode(nodenum, msg.flags, x, y, z, name, type, icon)
|
||||
|
||||
|
||||
def handlelinkmsg(self, msg):
|
||||
''' Process a Link Message to add/remove links on the SDT display.
|
||||
Links are recorded in the remotes[nodenum1].links set for updating
|
||||
the SDT display at a later time.
|
||||
'''
|
||||
"""
|
||||
Process a Link Message to add/remove links on the SDT display.
|
||||
Links are recorded in the remotes[nodenum1].links set for updating
|
||||
the SDT display at a later time.
|
||||
"""
|
||||
if not self.is_enabled():
|
||||
return False
|
||||
nodenum1 = msg.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER)
|
||||
nodenum2 = msg.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER)
|
||||
link_msg_type = msg.gettlv(coreapi.CORE_TLV_LINK_TYPE)
|
||||
nodenum1 = msg.get_tlv(LinkTlvs.N1_NUMBER.value)
|
||||
nodenum2 = msg.get_tlv(LinkTlvs.N2_NUMBER.value)
|
||||
link_msg_type = msg.get_tlv(LinkTlvs.TYPE.value)
|
||||
# this filters out links to WLAN and EMANE nodes which are not drawn
|
||||
if self.wlancheck(nodenum1):
|
||||
return
|
||||
wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS)
|
||||
wl = link_msg_type == LinkTypes.WIRELESS.value
|
||||
if nodenum1 in self.remotes:
|
||||
r = self.remotes[nodenum1]
|
||||
if msg.flags & coreapi.CORE_API_DEL_FLAG:
|
||||
if msg.flags & MessageFlags.DELETE.value:
|
||||
if (nodenum2, wl) in r.links:
|
||||
r.links.remove((nodenum2, wl))
|
||||
else:
|
||||
|
@ -348,18 +370,19 @@ class Sdt(object):
|
|||
self.updatelink(nodenum1, nodenum2, msg.flags, wireless=wl)
|
||||
|
||||
def wlancheck(self, nodenum):
|
||||
''' Helper returns True if a node number corresponds to a WlanNode
|
||||
or EmaneNode.
|
||||
'''
|
||||
"""
|
||||
Helper returns True if a node number corresponds to a WlanNode
|
||||
or EmaneNode.
|
||||
"""
|
||||
if nodenum in self.remotes:
|
||||
type = self.remotes[nodenum].type
|
||||
if type in ("wlan", "emane"):
|
||||
return True
|
||||
else:
|
||||
try:
|
||||
n = self.session.obj(nodenum)
|
||||
n = self.session.get_object(nodenum)
|
||||
except KeyError:
|
||||
return False
|
||||
if isinstance(n, (nodes.WlanNode, nodes.EmaneNode)):
|
||||
if nodeutils.is_node(n, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)):
|
||||
return True
|
||||
return False
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
"""Services
|
||||
"""
|
||||
Services
|
||||
|
||||
Services available to nodes can be put in this directory. Everything listed in
|
||||
__all__ is automatically loaded by the main core module.
|
||||
"""
|
||||
__all__ = ["quagga", "nrl", "xorp", "bird", "utility", "security", "ucarp", "dockersvc", 'startup']
|
||||
|
|
|
@ -1,24 +1,15 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 Jean-Tiare Le Bigot.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jean-Tiare Le Bigot <admin@jtlebi.fr>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
bird.py: defines routing services provided by the BIRD Internet Routing Daemon.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
class Bird(CoreService):
|
||||
''' Bird router support
|
||||
'''
|
||||
"""
|
||||
Bird router support
|
||||
"""
|
||||
_name = "bird"
|
||||
_group = "BIRD"
|
||||
_depends = ()
|
||||
|
@ -26,13 +17,14 @@ class Bird(CoreService):
|
|||
_configs = ("/etc/bird/bird.conf",)
|
||||
_startindex = 35
|
||||
_startup = ("bird -c %s" % (_configs[0]),)
|
||||
_shutdown = ("killall bird", )
|
||||
_validate = ("pidof bird", )
|
||||
_shutdown = ("killall bird",)
|
||||
_validate = ("pidof bird",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the bird.conf file contents.
|
||||
'''
|
||||
"""
|
||||
Return the bird.conf file contents.
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateBirdConf(node, services)
|
||||
else:
|
||||
|
@ -40,28 +32,30 @@ class Bird(CoreService):
|
|||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a .split('/') [0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return a.split('/')[0]
|
||||
# raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateBirdConf(cls, node, services):
|
||||
''' Returns configuration file text. Other services that depend on bird
|
||||
will have generatebirdifcconfig() and generatebirdconfig()
|
||||
hooks that are invoked here.
|
||||
'''
|
||||
cfg = """\
|
||||
"""
|
||||
Returns configuration file text. Other services that depend on bird
|
||||
will have generatebirdifcconfig() and generatebirdconfig()
|
||||
hooks that are invoked here.
|
||||
"""
|
||||
cfg = """\
|
||||
/* Main configuration file for BIRD. This is ony a template,
|
||||
* you will *need* to customize it according to your needs
|
||||
* Beware that only double quotes \'"\' are valid. No singles. */
|
||||
|
||||
|
||||
|
||||
log "/var/log/%s.log" all;
|
||||
#debug protocols all;
|
||||
|
@ -90,14 +84,16 @@ protocol device {
|
|||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdService(CoreService):
|
||||
''' Parent class for Bird services. Defines properties and methods
|
||||
"""
|
||||
Parent class for Bird services. Defines properties and methods
|
||||
common to Bird's routing daemons.
|
||||
'''
|
||||
"""
|
||||
|
||||
_name = "BirdDaemon"
|
||||
_group = "BIRD"
|
||||
_depends = ("bird", )
|
||||
_depends = ("bird",)
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
|
@ -106,7 +102,7 @@ class BirdService(CoreService):
|
|||
_meta = "The config file for this service can be found in the bird service."
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
def generatebirdconfig(cls, node):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
|
@ -118,20 +114,23 @@ class BirdService(CoreService):
|
|||
cfg = ""
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True: continue
|
||||
cfg += ' interface "%s";\n'% ifc.name
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += ' interface "%s";\n' % ifc.name
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdBgp(BirdService):
|
||||
'''BGP BIRD Service (configuration generation)'''
|
||||
"""
|
||||
BGP BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_BGP"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
def generatebirdconfig(cls, node):
|
||||
return """
|
||||
/* This is a sample config that should be customized with appropriate AS numbers
|
||||
* and peers; add one section like this for each neighbor */
|
||||
|
@ -152,13 +151,16 @@ protocol bgp {
|
|||
|
||||
"""
|
||||
|
||||
|
||||
class BirdOspf(BirdService):
|
||||
'''OSPF BIRD Service (configuration generation)'''
|
||||
"""
|
||||
OSPF BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = 'protocol ospf {\n'
|
||||
cfg += ' export filter {\n'
|
||||
cfg += ' if source = RTS_BGP then {\n'
|
||||
|
@ -168,7 +170,7 @@ class BirdOspf(BirdService):
|
|||
cfg += ' accept;\n'
|
||||
cfg += ' };\n'
|
||||
cfg += ' area 0.0.0.0 {\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' };\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
|
@ -176,17 +178,19 @@ class BirdOspf(BirdService):
|
|||
|
||||
|
||||
class BirdRadv(BirdService):
|
||||
'''RADV BIRD Service (configuration generation)'''
|
||||
"""
|
||||
RADV BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_RADV"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
|
||||
cfg += 'protocol radv {\n'
|
||||
cfg += ' # auto configuration on all interfaces\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' # Advertise DNS\n'
|
||||
cfg += ' rdnss {\n'
|
||||
cfg += '# lifetime mult 10;\n'
|
||||
|
@ -202,16 +206,18 @@ class BirdRadv(BirdService):
|
|||
|
||||
|
||||
class BirdRip(BirdService):
|
||||
'''RIP BIRD Service (configuration generation)'''
|
||||
"""
|
||||
RIP BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = 'protocol rip {\n'
|
||||
cfg += ' period 10;\n'
|
||||
cfg += ' garbage time 60;\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' honor neighbor;\n'
|
||||
cfg += ' authentication none;\n'
|
||||
cfg += ' import all;\n'
|
||||
|
@ -222,13 +228,15 @@ class BirdRip(BirdService):
|
|||
|
||||
|
||||
class BirdStatic(BirdService):
|
||||
'''Static Bird Service (configuration generation)'''
|
||||
"""
|
||||
Static Bird Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_static"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
|
||||
cfg += 'protocol static {\n'
|
||||
|
@ -240,10 +248,11 @@ class BirdStatic(BirdService):
|
|||
return cfg
|
||||
|
||||
|
||||
# Register all protocols
|
||||
addservice(Bird)
|
||||
addservice(BirdOspf)
|
||||
addservice(BirdBgp)
|
||||
#addservice(BirdRadv) # untested
|
||||
addservice(BirdRip)
|
||||
addservice(BirdStatic)
|
||||
def load_services():
|
||||
# Register all protocols
|
||||
ServiceManager.add(Bird)
|
||||
ServiceManager.add(BirdOspf)
|
||||
ServiceManager.add(BirdBgp)
|
||||
# ServiceManager.add(BirdRadv) # untested
|
||||
ServiceManager.add(BirdRip)
|
||||
ServiceManager.add(BirdStatic)
|
||||
|
|
|
@ -1,12 +1,5 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Stuart Marsden
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
''' Docker service allows running docker containers within CORE nodes.
|
||||
"""
|
||||
Docker service allows running docker containers within CORE nodes.
|
||||
|
||||
The running of Docker within a CORE node allows for additional extensibility to
|
||||
the CORE services. This allows network applications and protocols to be easily
|
||||
|
@ -20,7 +13,7 @@ service to the Docker group. The image will then be auto run if that service is
|
|||
selected.
|
||||
|
||||
This requires a recent version of Docker. This was tested using a PPA on Ubuntu
|
||||
with version 1.2.0. The version in the standard Ubuntu repo is to old for
|
||||
with version 1.2.0. The version in the standard Ubuntu repo is to old for
|
||||
this purpose (we need --net host).
|
||||
|
||||
It also requires docker-py (https://pypi.python.org/pypi/docker-py) which can be
|
||||
|
@ -47,13 +40,13 @@ The id will be different on your machine so use it in the following command:
|
|||
|
||||
sudo docker tag 4833487e66d2 stuartmarsden/multicastping:core
|
||||
|
||||
This image will be listed in the services after we restart the core-daemon:
|
||||
This image will be listed in the services after we restart the core-daemon:
|
||||
|
||||
sudo service core-daemon restart
|
||||
|
||||
You can set up a simple network with a number of PCs connected to a switch. Set
|
||||
the stuartmarsden/multicastping service for all the PCs. When started they will
|
||||
all begin sending Multicast pings.
|
||||
all begin sending Multicast pings.
|
||||
|
||||
In order to see what is happening you can go in to the terminal of a node and
|
||||
look at the docker log. Easy shorthand is:
|
||||
|
@ -89,11 +82,11 @@ Datagram 'Client: Ping' received from ('10.0.5.20', 8005)
|
|||
|
||||
Limitations:
|
||||
|
||||
1. Docker images must be downloaded on the host as usually a CORE node does not
|
||||
1. Docker images must be downloaded on the host as usually a CORE node does not
|
||||
have access to the internet.
|
||||
2. Each node isolates running containers (keeps things simple)
|
||||
3. Recent version of docker needed so that --net host can be used. This does
|
||||
not further abstract the network within a node and allows multicast which
|
||||
3. Recent version of docker needed so that --net host can be used. This does
|
||||
not further abstract the network within a node and allows multicast which
|
||||
is not enabled within Docker containers at the moment.
|
||||
4. The core-daemon must be restarted for new images to show up.
|
||||
5. A Docker-daemon is run within each node but the images are shared. This
|
||||
|
@ -101,43 +94,46 @@ Limitations:
|
|||
host. At startup all the nodes will try to access this and it will be locked
|
||||
for most due to contention. The service just does a hackish wait for 1 second
|
||||
and retry. This means all the docker containers can take a while to come up
|
||||
depending on how many nodes you have.
|
||||
depending on how many nodes you have.
|
||||
"""
|
||||
|
||||
'''
|
||||
from core.misc import log
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
import os
|
||||
import sys
|
||||
try:
|
||||
from docker import Client
|
||||
except Exception:
|
||||
pass
|
||||
except ImportError:
|
||||
logger.error("failure to import docker")
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
|
||||
|
||||
class DockerService(CoreService):
|
||||
''' This is a service which will allow running docker containers in a CORE
|
||||
node.
|
||||
'''
|
||||
"""
|
||||
This is a service which will allow running docker containers in a CORE
|
||||
node.
|
||||
"""
|
||||
_name = "Docker"
|
||||
_group = "Docker"
|
||||
_depends = ()
|
||||
_dirs = ('/var/lib/docker/containers/', '/run/shm', '/run/resolvconf',)
|
||||
_configs = ('docker.sh', )
|
||||
_configs = ('docker.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh docker.sh',)
|
||||
_shutdown = ('service docker stop', )
|
||||
_shutdown = ('service docker stop',)
|
||||
# Container image to start
|
||||
_image = ""
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Returns a string having contents of a docker.sh script that
|
||||
can be modified to start a specific docker image.
|
||||
'''
|
||||
"""
|
||||
Returns a string having contents of a docker.sh script that
|
||||
can be modified to start a specific docker image.
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by Docker (docker.py)\n"
|
||||
# Docker likes to think it has DNS set up or it complains.
|
||||
# Docker likes to think it has DNS set up or it complains.
|
||||
# Unless your network was attached to the Internet this is
|
||||
# non-functional but hides error messages.
|
||||
cfg += 'echo "nameserver 8.8.8.8" > /run/resolvconf/resolv.conf\n'
|
||||
|
@ -156,27 +152,30 @@ until [ $result -eq 0 ]; do
|
|||
# this is to alleviate contention to docker's SQLite database
|
||||
sleep 0.3
|
||||
done
|
||||
""" % (cls._image, )
|
||||
""" % (cls._image,)
|
||||
return cfg
|
||||
|
||||
addservice(DockerService)
|
||||
|
||||
# This auto-loads Docker images having a :core tag, adding them to the list
|
||||
# of services under the "Docker" group.
|
||||
if 'Client' in globals():
|
||||
client = Client(version='1.10')
|
||||
images = client.images()
|
||||
del client
|
||||
else:
|
||||
images = []
|
||||
for image in images:
|
||||
if u'<none>' in image['RepoTags'][0]:
|
||||
continue
|
||||
for repo in image['RepoTags']:
|
||||
if u':core' not in repo:
|
||||
def load_services():
|
||||
ServiceManager.add(DockerService)
|
||||
|
||||
# This auto-loads Docker images having a :core tag, adding them to the list
|
||||
# of services under the "Docker" group.
|
||||
# TODO: change this logic, should be a proper configurable, or docker needs to be a required library
|
||||
# TODO: also should make this call possible real time for reloading removing "magic" auto loading on import
|
||||
if 'Client' in globals():
|
||||
client = Client(version='1.10')
|
||||
images = client.images()
|
||||
del client
|
||||
else:
|
||||
images = []
|
||||
for image in images:
|
||||
if u'<none>' in image['RepoTags'][0]:
|
||||
continue
|
||||
dockerid = repo.encode('ascii','ignore').split(':')[0]
|
||||
SubClass = type('SubClass', (DockerService,),
|
||||
{'_name': dockerid, '_image': dockerid})
|
||||
addservice(SubClass)
|
||||
del images
|
||||
for repo in image['RepoTags']:
|
||||
if u':core' not in repo:
|
||||
continue
|
||||
dockerid = repo.encode('ascii', 'ignore').split(':')[0]
|
||||
sub_class = type('SubClass', (DockerService,), {'_name': dockerid, '_image': dockerid})
|
||||
ServiceManager.add(sub_class)
|
||||
del images
|
||||
|
|
|
@ -1,24 +1,19 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
nrl.py: defines services provided by NRL protolib tools hosted here:
|
||||
http://www.nrl.navy.mil/itd/ncs/products
|
||||
'''
|
||||
"""
|
||||
|
||||
from core.misc import utils
|
||||
from core.misc.ipaddress import Ipv4Prefix
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
class NrlService(CoreService):
|
||||
''' Parent class for NRL services. Defines properties and methods
|
||||
common to NRL's routing daemons.
|
||||
'''
|
||||
"""
|
||||
Parent class for NRL services. Defines properties and methods
|
||||
common to NRL's routing daemons.
|
||||
"""""
|
||||
_name = "Protean"
|
||||
_group = "ProtoSvc"
|
||||
_depends = ()
|
||||
|
@ -29,108 +24,111 @@ class NrlService(CoreService):
|
|||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
|
||||
@staticmethod
|
||||
def firstipv4prefix(node, prefixlen=24):
|
||||
''' Similar to QuaggaService.routerid(). Helper to return the first IPv4
|
||||
"""
|
||||
Similar to QuaggaService.routerid(). Helper to return the first IPv4
|
||||
prefix of a node, using the supplied prefix length. This ignores the
|
||||
interface's prefix length, so e.g. '/32' can turn into '/24'.
|
||||
'''
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
addr = a.split('/')[0]
|
||||
pre = IPv4Prefix("%s/%s" % (addr, prefixlen))
|
||||
pre = Ipv4Prefix("%s/%s" % (addr, prefixlen))
|
||||
return str(pre)
|
||||
#raise ValueError, "no IPv4 address found"
|
||||
# raise ValueError, "no IPv4 address found"
|
||||
return "0.0.0.0/%s" % prefixlen
|
||||
|
||||
|
||||
class MgenSinkService(NrlService):
|
||||
_name = "MGEN_Sink"
|
||||
_configs = ("sink.mgen", )
|
||||
_configs = ("sink.mgen",)
|
||||
_startindex = 5
|
||||
_startup = ("mgen input sink.mgen", )
|
||||
_validate = ("pidof mgen", )
|
||||
_shutdown = ("killall mgen", )
|
||||
_startup = ("mgen input sink.mgen",)
|
||||
_validate = ("pidof mgen",)
|
||||
_shutdown = ("killall mgen",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "0.0 LISTEN UDP 5000\n"
|
||||
for ifc in node.netifs():
|
||||
name = sysctldevname(ifc.name)
|
||||
name = utils.sysctldevname(ifc.name)
|
||||
cfg += "0.0 Join 224.225.1.2 INTERFACE %s\n" % name
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
cmd =cls._startup[0]
|
||||
def getstartup(cls, node, services):
|
||||
cmd = cls._startup[0]
|
||||
cmd += " output /tmp/mgen_%s.log" % node.name
|
||||
return (cmd, )
|
||||
return cmd,
|
||||
|
||||
addservice(MgenSinkService)
|
||||
|
||||
class NrlNhdp(NrlService):
|
||||
''' NeighborHood Discovery Protocol for MANET networks.
|
||||
'''
|
||||
"""
|
||||
NeighborHood Discovery Protocol for MANET networks.
|
||||
"""
|
||||
_name = "NHDP"
|
||||
_startup = ("nrlnhdp", )
|
||||
_shutdown = ("killall nrlnhdp", )
|
||||
_validate = ("pidof nrlnhdp", )
|
||||
_startup = ("nrlnhdp",)
|
||||
_shutdown = ("killall nrlnhdp",)
|
||||
_validate = ("pidof nrlnhdp",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
def getstartup(cls, node, services):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
cmd += " -l /var/log/nrlnhdp.log"
|
||||
cmd += " -rpipe %s_nhdp" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames:
|
||||
cmd += " -flooding ecds-etx sticky"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
cmd += " -i ".join(interfacenames)
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlNhdp)
|
||||
|
||||
return cmd,
|
||||
|
||||
|
||||
class NrlSmf(NrlService):
|
||||
''' Simplified Multicast Forwarding for MANET networks.
|
||||
'''
|
||||
"""
|
||||
Simplified Multicast Forwarding for MANET networks.
|
||||
"""
|
||||
_name = "SMF"
|
||||
_startup = ("sh startsmf.sh", )
|
||||
_shutdown = ("killall nrlsmf", )
|
||||
_validate = ("pidof nrlsmf", )
|
||||
_configs = ("startsmf.sh", )
|
||||
|
||||
_startup = ("sh startsmf.sh",)
|
||||
_shutdown = ("killall nrlsmf",)
|
||||
_validate = ("pidof nrlsmf",)
|
||||
_configs = ("startsmf.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a startup script for SMF. Because nrlsmf does not
|
||||
"""
|
||||
Generate a startup script for SMF. Because nrlsmf does not
|
||||
daemonize, it can cause problems in some situations when launched
|
||||
directly using vcmd.
|
||||
'''
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by nrl.py:NrlSmf.generateconfig()\n"
|
||||
comments = ""
|
||||
cmd = "nrlsmf instance %s_smf" % (node.name)
|
||||
cmd = "nrlsmf instance %s_smf" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) == 0:
|
||||
return ()
|
||||
|
||||
|
||||
if "arouted" in servicenames:
|
||||
comments += "# arouted service is enabled\n"
|
||||
cmd += " tap %s_tap" % (node.name,)
|
||||
|
@ -145,29 +143,30 @@ class NrlSmf(NrlService):
|
|||
cmd += " smpr "
|
||||
else:
|
||||
cmd += " cf "
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += ",".join(interfacenames)
|
||||
|
||||
|
||||
cmd += " hash MD5"
|
||||
cmd += " log /var/log/nrlsmf.log"
|
||||
|
||||
cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n"
|
||||
return cfg
|
||||
|
||||
addservice(NrlSmf)
|
||||
|
||||
|
||||
class NrlOlsr(NrlService):
|
||||
''' Optimized Link State Routing protocol for MANET networks.
|
||||
'''
|
||||
"""
|
||||
Optimized Link State Routing protocol for MANET networks.
|
||||
"""
|
||||
_name = "OLSR"
|
||||
_startup = ("nrlolsrd", )
|
||||
_shutdown = ("killall nrlolsrd", )
|
||||
_validate = ("pidof nrlolsrd", )
|
||||
|
||||
_startup = ("nrlolsrd",)
|
||||
_shutdown = ("killall nrlolsrd",)
|
||||
_validate = ("pidof nrlolsrd",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
def getstartup(cls, node, services):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
# are multiple interfaces supported? No.
|
||||
netifs = list(node.netifs())
|
||||
|
@ -177,78 +176,80 @@ class NrlOlsr(NrlService):
|
|||
cmd += " -l /var/log/nrlolsrd.log"
|
||||
cmd += " -rpipe %s_olsr" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames and not "NHDP" in servicenames:
|
||||
cmd += " -flooding s-mpr"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
if "zebra" in servicenames:
|
||||
cmd += " -z"
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlOlsr)
|
||||
return cmd,
|
||||
|
||||
|
||||
class NrlOlsrv2(NrlService):
|
||||
''' Optimized Link State Routing protocol version 2 for MANET networks.
|
||||
'''
|
||||
"""
|
||||
Optimized Link State Routing protocol version 2 for MANET networks.
|
||||
"""
|
||||
_name = "OLSRv2"
|
||||
_startup = ("nrlolsrv2", )
|
||||
_shutdown = ("killall nrlolsrv2", )
|
||||
_validate = ("pidof nrlolsrv2", )
|
||||
_startup = ("nrlolsrv2",)
|
||||
_shutdown = ("killall nrlolsrv2",)
|
||||
_validate = ("pidof nrlolsrv2",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
def getstartup(cls, node, services):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
cmd += " -l /var/log/nrlolsrv2.log"
|
||||
cmd += " -rpipe %s_olsrv2" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames:
|
||||
cmd += " -flooding ecds"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
||||
cmd += " -p olsr"
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
cmd += " -i ".join(interfacenames)
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlOlsrv2)
|
||||
|
||||
return cmd,
|
||||
|
||||
|
||||
class OlsrOrg(NrlService):
|
||||
''' Optimized Link State Routing protocol from olsr.org for MANET networks.
|
||||
'''
|
||||
"""
|
||||
Optimized Link State Routing protocol from olsr.org for MANET networks.
|
||||
"""
|
||||
_name = "OLSRORG"
|
||||
_configs = ("/etc/olsrd/olsrd.conf",)
|
||||
_dirs = ("/etc/olsrd",)
|
||||
_startup = ("olsrd", )
|
||||
_shutdown = ("killall olsrd", )
|
||||
_validate = ("pidof olsrd", )
|
||||
_startup = ("olsrd",)
|
||||
_shutdown = ("killall olsrd",)
|
||||
_validate = ("pidof olsrd",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
def getstartup(cls, node, services):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
cmd += " -i ".join(interfacenames)
|
||||
|
||||
return (cmd, )
|
||||
return cmd,
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a default olsrd config file to use the broadcast address of 255.255.255.255.
|
||||
'''
|
||||
"""
|
||||
Generate a default olsrd config file to use the broadcast address of 255.255.255.255.
|
||||
"""
|
||||
cfg = """\
|
||||
#
|
||||
# OLSR.org routing daemon config file
|
||||
|
@ -314,7 +315,7 @@ class OlsrOrg(NrlService):
|
|||
# 1 gets remapped by olsrd to 0 UNSPECIFIED (1 is reserved for ICMP redirects)
|
||||
# 2 KERNEL routes (not very wise to use)
|
||||
# 3 BOOT (should in fact not be used by routing daemons)
|
||||
# 4 STATIC
|
||||
# 4 STATIC
|
||||
# 8 .. 15 various routing daemons (gated, zebra, bird, & co)
|
||||
# (defaults to 0 which gets replaced by an OS-specific default value
|
||||
# under linux 3 (BOOT) (for backward compatibility)
|
||||
|
@ -510,7 +511,7 @@ LinkQualityFishEye 0
|
|||
# Olsrd plugins to load
|
||||
# This must be the absolute path to the file
|
||||
# or the loader will use the following scheme:
|
||||
# - Try the paths in the LD_LIBRARY_PATH
|
||||
# - Try the paths in the LD_LIBRARY_PATH
|
||||
# environment variable.
|
||||
# - The list of libraries cached in /etc/ld.so.cache
|
||||
# - /lib, followed by /usr/lib
|
||||
|
@ -566,11 +567,11 @@ InterfaceDefaults {
|
|||
"""
|
||||
return cfg
|
||||
|
||||
addservice(OlsrOrg)
|
||||
|
||||
class MgenActor(NrlService):
|
||||
''' ZpcMgenActor.
|
||||
'''
|
||||
"""
|
||||
ZpcMgenActor.
|
||||
"""
|
||||
|
||||
# a unique name is required, without spaces
|
||||
_name = "MgenActor"
|
||||
|
@ -582,53 +583,53 @@ class MgenActor(NrlService):
|
|||
_dirs = ()
|
||||
# generated files (without a full path this file goes in the node's dir,
|
||||
# e.g. /tmp/pycore.12345/n1.conf/)
|
||||
_configs = ('start_mgen_actor.sh', )
|
||||
_configs = ('start_mgen_actor.sh',)
|
||||
# this controls the starting order vs other enabled services
|
||||
_startindex = 50
|
||||
# list of startup commands, also may be generated during startup
|
||||
_startup = ("sh start_mgen_actor.sh", )
|
||||
_startup = ("sh start_mgen_actor.sh",)
|
||||
# list of validation commands
|
||||
_validate = ("pidof mgen", )
|
||||
_validate = ("pidof mgen",)
|
||||
# list of shutdown commands
|
||||
_shutdown = ("killall mgen", )
|
||||
_shutdown = ("killall mgen",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a startup script for MgenActor. Because mgenActor does not
|
||||
"""
|
||||
Generate a startup script for MgenActor. Because mgenActor does not
|
||||
daemonize, it can cause problems in some situations when launched
|
||||
directly using vcmd.
|
||||
'''
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by nrl.py:MgenActor.generateconfig()\n"
|
||||
comments = ""
|
||||
cmd = "mgenBasicActor.py -n %s -a 0.0.0.0" % (node.name)
|
||||
cmd = "mgenBasicActor.py -n %s -a 0.0.0.0" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) == 0:
|
||||
return ()
|
||||
|
||||
cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n"
|
||||
return cfg
|
||||
|
||||
# this line is required to add the above class to the list of available services
|
||||
addservice(MgenActor)
|
||||
|
||||
class Arouted(NrlService):
|
||||
''' Adaptive Routing
|
||||
'''
|
||||
"""
|
||||
Adaptive Routing
|
||||
"""
|
||||
_name = "arouted"
|
||||
_configs = ("startarouted.sh", )
|
||||
_configs = ("startarouted.sh",)
|
||||
_startindex = NrlService._startindex + 10
|
||||
_startup = ("sh startarouted.sh", )
|
||||
_shutdown = ("pkill arouted", )
|
||||
_validate = ("pidof arouted", )
|
||||
|
||||
_startup = ("sh startarouted.sh",)
|
||||
_shutdown = ("pkill arouted",)
|
||||
_validate = ("pidof arouted",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
'''
|
||||
"""
|
||||
Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
"""
|
||||
cfg = """
|
||||
#!/bin/sh
|
||||
for f in "/tmp/%s_smf"; do
|
||||
|
@ -643,12 +644,23 @@ for f in "/tmp/%s_smf"; do
|
|||
done
|
||||
done
|
||||
|
||||
""" % (node.name)
|
||||
""" % node.name
|
||||
cfg += "ip route add %s dev lo\n" % cls.firstipv4prefix(node, 24)
|
||||
cfg += "arouted instance %s_smf tap %s_tap" % (node.name, node.name)
|
||||
cfg += " stability 10" # seconds to consider a new route valid
|
||||
# seconds to consider a new route valid
|
||||
cfg += " stability 10"
|
||||
cfg += " 2>&1 > /var/log/arouted.log &\n\n"
|
||||
return cfg
|
||||
|
||||
# experimental
|
||||
#addservice(Arouted)
|
||||
|
||||
def load_services():
|
||||
ServiceManager.add(MgenSinkService)
|
||||
ServiceManager.add(NrlNhdp)
|
||||
ServiceManager.add(NrlSmf)
|
||||
ServiceManager.add(NrlOlsr)
|
||||
ServiceManager.add(NrlOlsrv2)
|
||||
ServiceManager.add(OlsrOrg)
|
||||
# this line is required to add the above class to the list of available services
|
||||
ServiceManager.add(MgenActor)
|
||||
# experimental
|
||||
# ServiceManager.add(Arouted)
|
||||
|
|
|
@ -1,48 +1,39 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
quagga.py: defines routing services provided by Quagga.
|
||||
'''
|
||||
"""
|
||||
quagga.py: defines routing services provided by Quagga.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns import nodes
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from core.bsd import nodes
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, isIPv4Address, isIPv6Address
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
from core import constants
|
||||
from core.enumerations import LinkTypes, NodeTypes
|
||||
from core.misc import ipaddress
|
||||
from core.misc import nodeutils
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
QUAGGA_USER="root"
|
||||
QUAGGA_GROUP="root"
|
||||
QUAGGA_USER = "root"
|
||||
QUAGGA_GROUP = "root"
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
QUAGGA_GROUP="wheel"
|
||||
QUAGGA_GROUP = "wheel"
|
||||
|
||||
|
||||
class Zebra(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "zebra"
|
||||
_group = "Quagga"
|
||||
_depends = ("vtysh", )
|
||||
_dirs = ("/usr/local/etc/quagga", "/var/run/quagga")
|
||||
_depends = ("vtysh",)
|
||||
_dirs = ("/usr/local/etc/quagga", "/var/run/quagga")
|
||||
_configs = ("/usr/local/etc/quagga/Quagga.conf",
|
||||
"quaggaboot.sh","/usr/local/etc/quagga/vtysh.conf")
|
||||
"quaggaboot.sh", "/usr/local/etc/quagga/vtysh.conf")
|
||||
_startindex = 35
|
||||
_startup = ("sh quaggaboot.sh zebra",)
|
||||
_shutdown = ("killall zebra", )
|
||||
_validate = ("pidof zebra", )
|
||||
_shutdown = ("killall zebra",)
|
||||
_validate = ("pidof zebra",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
'''
|
||||
"""
|
||||
Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateQuaggaConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
|
@ -51,19 +42,21 @@ class Zebra(CoreService):
|
|||
return cls.generateVtyshConf(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateVtyshConf(cls, node, services):
|
||||
''' Returns configuration file text.
|
||||
'''
|
||||
"""
|
||||
Returns configuration file text.
|
||||
"""
|
||||
return "service integrated-vtysh-config\n"
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaConf(cls, node, services):
|
||||
''' Returns configuration file text. Other services that depend on zebra
|
||||
will have generatequaggaifcconfig() and generatequaggaconfig()
|
||||
hooks that are invoked here.
|
||||
'''
|
||||
"""
|
||||
Returns configuration file text. Other services that depend on zebra
|
||||
will have generatequaggaifcconfig() and generatequaggaconfig()
|
||||
hooks that are invoked here.
|
||||
"""
|
||||
# we could verify here that filename == Quagga.conf
|
||||
cfg = ""
|
||||
for ifc in node.netifs():
|
||||
|
@ -81,7 +74,7 @@ class Zebra(CoreService):
|
|||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
ifccfg = s.generatequaggaifcconfig(node, ifc)
|
||||
ifccfg = s.generatequaggaifcconfig(node, ifc)
|
||||
if s._ipv4_routing:
|
||||
want_ipv4 = True
|
||||
if s._ipv6_routing:
|
||||
|
@ -89,47 +82,47 @@ class Zebra(CoreService):
|
|||
cfgv6 += ifccfg
|
||||
else:
|
||||
cfgv4 += ifccfg
|
||||
|
||||
|
||||
if want_ipv4:
|
||||
ipv4list = filter(lambda x: isIPv4Address(x.split('/')[0]),
|
||||
ifc.addrlist)
|
||||
ipv4list = filter(lambda x: ipaddress.is_ipv4_address(x.split('/')[0]), ifc.addrlist)
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ipv4list))
|
||||
cfg += "\n"
|
||||
cfg += cfgv4
|
||||
if want_ipv6:
|
||||
ipv6list = filter(lambda x: isIPv6Address(x.split('/')[0]),
|
||||
ifc.addrlist)
|
||||
ipv6list = filter(lambda x: ipaddress.is_ipv6_address(x.split('/')[0]), ifc.addrlist)
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ipv6list))
|
||||
cfg += "\n"
|
||||
cfg += cfgv6
|
||||
cfg += "!\n"
|
||||
|
||||
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
cfg += s.generatequaggaconfig(node)
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
''' helper for mapping IP addresses to zebra config statements
|
||||
'''
|
||||
"""
|
||||
helper for mapping IP addresses to zebra config statements
|
||||
"""
|
||||
if x.find(".") >= 0:
|
||||
return "ip address %s" % x
|
||||
elif x.find(":") >= 0:
|
||||
return "ipv6 address %s" % x
|
||||
else:
|
||||
raise Value, "invalid address: %s", x
|
||||
|
||||
raise ValueError("invalid address: %s", x)
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaBoot(cls, node, services):
|
||||
''' Generate a shell script used to boot the Quagga daemons.
|
||||
'''
|
||||
"""
|
||||
Generate a shell script used to boot the Quagga daemons.
|
||||
"""
|
||||
try:
|
||||
quagga_bin_search = node.session.cfg['quagga_bin_search']
|
||||
quagga_sbin_search = node.session.cfg['quagga_sbin_search']
|
||||
quagga_bin_search = node.session.config['quagga_bin_search']
|
||||
quagga_sbin_search = node.session.config['quagga_sbin_search']
|
||||
except KeyError:
|
||||
quagga_bin_search = '"/usr/local/bin /usr/bin /usr/lib/quagga"'
|
||||
quagga_sbin_search = '"/usr/local/sbin /usr/sbin /usr/lib/quagga"'
|
||||
|
@ -182,7 +175,7 @@ waitforvtyfiles()
|
|||
sleep 0.1
|
||||
count=$(($count + 1))
|
||||
done
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
|
@ -243,85 +236,87 @@ elif [ "$1" = "vtysh" ]; then
|
|||
else
|
||||
bootdaemon $1
|
||||
fi
|
||||
""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \
|
||||
QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP)
|
||||
""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, constants.QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP)
|
||||
|
||||
addservice(Zebra)
|
||||
|
||||
class QuaggaService(CoreService):
|
||||
''' Parent class for Quagga services. Defines properties and methods
|
||||
common to Quagga's routing daemons.
|
||||
'''
|
||||
"""
|
||||
Parent class for Quagga services. Defines properties and methods
|
||||
common to Quagga's routing daemons.
|
||||
"""
|
||||
_name = "QuaggaDaemon"
|
||||
_group = "Quagga"
|
||||
_depends = ("zebra", )
|
||||
_depends = ("zebra",)
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the Zebra service."
|
||||
|
||||
|
||||
_ipv4_routing = False
|
||||
_ipv6_routing = False
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a .split('/') [0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return a.split('/')[0]
|
||||
# raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
|
||||
@staticmethod
|
||||
def rj45check(ifc):
|
||||
''' Helper to detect whether interface is connected an external RJ45
|
||||
"""
|
||||
Helper to detect whether interface is connected an external RJ45
|
||||
link.
|
||||
'''
|
||||
"""
|
||||
if ifc.net:
|
||||
for peerifc in ifc.net.netifs():
|
||||
if peerifc == ifc:
|
||||
continue
|
||||
if isinstance(peerifc, nodes.RJ45Node):
|
||||
if nodeutils.is_node(peerifc, NodeTypes.RJ45):
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
return ""
|
||||
|
||||
|
||||
|
||||
class Ospfv2(QuaggaService):
|
||||
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
"""
|
||||
The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
_name = "OSPFv2"
|
||||
_startup = ("sh quaggaboot.sh ospfd",)
|
||||
_shutdown = ("killall ospfd", )
|
||||
_validate = ("pidof ospfd", )
|
||||
_shutdown = ("killall ospfd",)
|
||||
_validate = ("pidof ospfd",)
|
||||
_ipv4_routing = True
|
||||
|
||||
@staticmethod
|
||||
def mtucheck(ifc):
|
||||
''' Helper to detect MTU mismatch and add the appropriate OSPF
|
||||
"""
|
||||
Helper to detect MTU mismatch and add the appropriate OSPF
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
'''
|
||||
"""
|
||||
if ifc.mtu != 1500:
|
||||
# a workaround for PhysicalNode GreTap, which has no knowledge of
|
||||
# the other nodes/nets
|
||||
|
@ -335,64 +330,68 @@ class Ospfv2(QuaggaService):
|
|||
|
||||
@staticmethod
|
||||
def ptpcheck(ifc):
|
||||
''' Helper to detect whether interface is connected to a notional
|
||||
"""
|
||||
Helper to detect whether interface is connected to a notional
|
||||
point-to-point link.
|
||||
'''
|
||||
if isinstance(ifc.net, nodes.PtpNet):
|
||||
"""
|
||||
if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER):
|
||||
return " ip ospf network point-to-point\n"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router ospf\n"
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " router-id %s\n" % rtrid
|
||||
# network 10.0.0.0/24 area 0
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
net = IPv4Prefix(a)
|
||||
net = ipaddress.Ipv4Prefix(a)
|
||||
cfg += " network %s area 0\n" % net
|
||||
cfg += "!\n"
|
||||
cfg += "!\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
#cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
#if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
#cfg += cls.ptpcheck(ifc)
|
||||
|
||||
#return cfg + """\
|
||||
# ip ospf hello-interval 2
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
# cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
# if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
# cfg += cls.ptpcheck(ifc)
|
||||
|
||||
# return cfg + """\
|
||||
|
||||
|
||||
# ip ospf hello-interval 2
|
||||
# ip ospf dead-interval 6
|
||||
# ip ospf retransmit-interval 5
|
||||
#"""
|
||||
|
||||
addservice(Ospfv2)
|
||||
# """
|
||||
|
||||
|
||||
class Ospfv3(QuaggaService):
|
||||
''' The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
"""
|
||||
The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
_name = "OSPFv3"
|
||||
_startup = ("sh quaggaboot.sh ospf6d",)
|
||||
_shutdown = ("killall ospf6d", )
|
||||
_validate = ("pidof ospf6d", )
|
||||
_shutdown = ("killall ospf6d",)
|
||||
_validate = ("pidof ospf6d",)
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
|
||||
@staticmethod
|
||||
def minmtu(ifc):
|
||||
''' Helper to discover the minimum MTU of interfaces linked with the
|
||||
"""
|
||||
Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
'''
|
||||
"""
|
||||
mtu = ifc.mtu
|
||||
if not ifc.net:
|
||||
return mtu
|
||||
|
@ -400,13 +399,14 @@ class Ospfv3(QuaggaService):
|
|||
if i.mtu < mtu:
|
||||
mtu = i.mtu
|
||||
return mtu
|
||||
|
||||
|
||||
@classmethod
|
||||
def mtucheck(cls, ifc):
|
||||
''' Helper to detect MTU mismatch and add the appropriate OSPFv3
|
||||
"""
|
||||
Helper to detect MTU mismatch and add the appropriate OSPFv3
|
||||
ifmtu command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
'''
|
||||
"""
|
||||
minmtu = cls.minmtu(ifc)
|
||||
if minmtu < ifc.mtu:
|
||||
return " ipv6 ospf6 ifmtu %d\n" % minmtu
|
||||
|
@ -415,57 +415,59 @@ class Ospfv3(QuaggaService):
|
|||
|
||||
@staticmethod
|
||||
def ptpcheck(ifc):
|
||||
''' Helper to detect whether interface is connected to a notional
|
||||
"""
|
||||
Helper to detect whether interface is connected to a notional
|
||||
point-to-point link.
|
||||
'''
|
||||
if isinstance(ifc.net, nodes.PtpNet):
|
||||
"""
|
||||
if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER):
|
||||
return " ipv6 ospf6 network point-to-point\n"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router ospf6\n"
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " router-id %s\n" % rtrid
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += " interface %s area 0.0.0.0\n" % ifc.name
|
||||
cfg += "!\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
#cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
#if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
#cfg += cls.ptpcheck(ifc)
|
||||
|
||||
#return cfg + """\
|
||||
# ipv6 ospf6 hello-interval 2
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
# cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
# if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
# cfg += cls.ptpcheck(ifc)
|
||||
|
||||
# return cfg + """\
|
||||
|
||||
|
||||
# ipv6 ospf6 hello-interval 2
|
||||
# ipv6 ospf6 dead-interval 6
|
||||
# ipv6 ospf6 retransmit-interval 5
|
||||
#"""
|
||||
# """
|
||||
|
||||
addservice(Ospfv3)
|
||||
|
||||
class Ospfv3mdr(Ospfv3):
|
||||
''' The OSPFv3 MANET Designated Router (MDR) service provides IPv6
|
||||
routing for wireless networks. It does not build its own
|
||||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
"""
|
||||
The OSPFv3 MANET Designated Router (MDR) service provides IPv6
|
||||
routing for wireless networks. It does not build its own
|
||||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
_name = "OSPFv3MDR"
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
cfg = cls.mtucheck(ifc)
|
||||
cfg += " ipv6 ospf6 instance-id 65\n"
|
||||
if ifc.net is not None and \
|
||||
isinstance(ifc.net, (nodes.WlanNode, nodes.EmaneNode)):
|
||||
if ifc.net is not None and nodeutils.is_node(ifc.net, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)):
|
||||
return cfg + """\
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
|
@ -478,23 +480,23 @@ class Ospfv3mdr(Ospfv3):
|
|||
else:
|
||||
return cfg
|
||||
|
||||
addservice(Ospfv3mdr)
|
||||
|
||||
class Bgp(QuaggaService):
|
||||
'''' The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
'''
|
||||
"""
|
||||
The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
"""
|
||||
_name = "BGP"
|
||||
_startup = ("sh quaggaboot.sh bgpd",)
|
||||
_shutdown = ("killall bgpd", )
|
||||
_validate = ("pidof bgpd", )
|
||||
_shutdown = ("killall bgpd",)
|
||||
_validate = ("pidof bgpd",)
|
||||
_custom_needed = True
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "!\n! BGP configuration\n!\n"
|
||||
cfg += "! You should configure the AS number below,\n"
|
||||
cfg += "! along with this router's peers.\n!\n"
|
||||
|
@ -505,19 +507,19 @@ class Bgp(QuaggaService):
|
|||
cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n"
|
||||
return cfg
|
||||
|
||||
addservice(Bgp)
|
||||
|
||||
class Rip(QuaggaService):
|
||||
''' The RIP service provides IPv4 routing for wired networks.
|
||||
'''
|
||||
"""
|
||||
The RIP service provides IPv4 routing for wired networks.
|
||||
"""
|
||||
_name = "RIP"
|
||||
_startup = ("sh quaggaboot.sh ripd",)
|
||||
_shutdown = ("killall ripd", )
|
||||
_validate = ("pidof ripd", )
|
||||
_shutdown = ("killall ripd",)
|
||||
_validate = ("pidof ripd",)
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = """\
|
||||
router rip
|
||||
redistribute static
|
||||
|
@ -528,19 +530,19 @@ router rip
|
|||
"""
|
||||
return cfg
|
||||
|
||||
addservice(Rip)
|
||||
|
||||
class Ripng(QuaggaService):
|
||||
''' The RIP NG service provides IPv6 routing for wired networks.
|
||||
'''
|
||||
"""
|
||||
The RIP NG service provides IPv6 routing for wired networks.
|
||||
"""
|
||||
_name = "RIPNG"
|
||||
_startup = ("sh quaggaboot.sh ripngd",)
|
||||
_shutdown = ("killall ripngd", )
|
||||
_validate = ("pidof ripngd", )
|
||||
_shutdown = ("killall ripngd",)
|
||||
_validate = ("pidof ripngd",)
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = """\
|
||||
router ripng
|
||||
redistribute static
|
||||
|
@ -551,50 +553,49 @@ router ripng
|
|||
"""
|
||||
return cfg
|
||||
|
||||
addservice(Ripng)
|
||||
|
||||
class Babel(QuaggaService):
|
||||
''' The Babel service provides a loop-avoiding distance-vector routing
|
||||
"""
|
||||
The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
'''
|
||||
"""
|
||||
_name = "Babel"
|
||||
_startup = ("sh quaggaboot.sh babeld",)
|
||||
_shutdown = ("killall babeld", )
|
||||
_validate = ("pidof babeld", )
|
||||
_shutdown = ("killall babeld",)
|
||||
_validate = ("pidof babeld",)
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router babel\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += " network %s\n" % ifc.name
|
||||
cfg += " redistribute static\n redistribute connected\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
type = "wired"
|
||||
if ifc.net and ifc.net.linktype == coreapi.CORE_LINK_WIRELESS:
|
||||
if ifc.net and ifc.net.linktype == LinkTypes.WIRELESS.value:
|
||||
return " babel wireless\n no babel split-horizon\n"
|
||||
else:
|
||||
return " babel wired\n babel split-horizon\n"
|
||||
|
||||
addservice(Babel)
|
||||
|
||||
class Xpimd(QuaggaService):
|
||||
'''\
|
||||
"""
|
||||
PIM multicast routing based on XORP.
|
||||
'''
|
||||
"""
|
||||
_name = 'Xpimd'
|
||||
_startup = ('sh quaggaboot.sh xpimd',)
|
||||
_shutdown = ('killall xpimd', )
|
||||
_validate = ('pidof xpimd', )
|
||||
_shutdown = ('killall xpimd',)
|
||||
_validate = ('pidof xpimd',)
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
def generatequaggaconfig(cls, node):
|
||||
ifname = 'eth0'
|
||||
for ifc in node.netifs():
|
||||
if ifc.name != 'lo':
|
||||
|
@ -610,15 +611,15 @@ class Xpimd(QuaggaService):
|
|||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return ' ip mfea\n ip igmp\n ip pim\n'
|
||||
|
||||
addservice(Xpimd)
|
||||
|
||||
class Vtysh(CoreService):
|
||||
''' Simple service to run vtysh -b (boot) after all Quagga daemons have
|
||||
started.
|
||||
'''
|
||||
"""
|
||||
Simple service to run vtysh -b (boot) after all Quagga daemons have
|
||||
started.
|
||||
"""
|
||||
_name = "vtysh"
|
||||
_group = "Quagga"
|
||||
_startindex = 45
|
||||
|
@ -629,6 +630,15 @@ class Vtysh(CoreService):
|
|||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
addservice(Vtysh)
|
||||
|
||||
|
||||
def load_services():
|
||||
ServiceManager.add(Zebra)
|
||||
ServiceManager.add(Ospfv2)
|
||||
ServiceManager.add(Ospfv3)
|
||||
ServiceManager.add(Ospfv3mdr)
|
||||
ServiceManager.add(Bgp)
|
||||
ServiceManager.add(Rip)
|
||||
ServiceManager.add(Ripng)
|
||||
ServiceManager.add(Babel)
|
||||
ServiceManager.add(Xpimd)
|
||||
ServiceManager.add(Vtysh)
|
||||
|
|
|
@ -1,83 +1,75 @@
|
|||
#
|
||||
# CORE - define security services : vpnclient, vpnserver, ipsec and firewall
|
||||
#
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
'''
|
||||
security.py: defines security services (vpnclient, vpnserver, ipsec and
|
||||
"""
|
||||
security.py: defines security services (vpnclient, vpnserver, ipsec and
|
||||
firewall)
|
||||
'''
|
||||
"""
|
||||
|
||||
import os
|
||||
from core import constants
|
||||
from core.misc import log
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.constants import *
|
||||
|
||||
class VPNClient(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "VPNClient"
|
||||
_group = "Security"
|
||||
_configs = ('vpnclient.sh', )
|
||||
_configs = ('vpnclient.sh',)
|
||||
_startindex = 60
|
||||
_startup = ('sh vpnclient.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn", )
|
||||
_validate = ("pidof openvpn",)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the client.conf and vpnclient.sh file contents to
|
||||
'''
|
||||
"""
|
||||
Return the client.conf and vpnclient.sh file contents to
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom VPN Client configuration for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleVPNClient" % CORE_DATA_DIR
|
||||
fname = "%s/examples/services/sampleVPNClient" % constants.CORE_DATA_DIR
|
||||
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening VPN client configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
except IOError:
|
||||
logger.exception("Error opening VPN client configuration template (%s)", fname)
|
||||
|
||||
return cfg
|
||||
|
||||
# this line is required to add the above class to the list of available services
|
||||
addservice(VPNClient)
|
||||
|
||||
class VPNServer(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "VPNServer"
|
||||
_group = "Security"
|
||||
_configs = ('vpnserver.sh', )
|
||||
_configs = ('vpnserver.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh vpnserver.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn", )
|
||||
_validate = ("pidof openvpn",)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the sample server.conf and vpnserver.sh file contents to
|
||||
GUI for user customization.
|
||||
'''
|
||||
"""
|
||||
Return the sample server.conf and vpnserver.sh file contents to
|
||||
GUI for user customization.
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom VPN Server Configuration for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleVPNServer" % CORE_DATA_DIR
|
||||
fname = "%s/examples/services/sampleVPNServer" % constants.CORE_DATA_DIR
|
||||
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening VPN server configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
except IOError:
|
||||
logger.exception("Error opening VPN server configuration template (%s)", fname)
|
||||
|
||||
return cfg
|
||||
|
||||
addservice(VPNServer)
|
||||
|
||||
class IPsec(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "IPsec"
|
||||
_group = "Security"
|
||||
_configs = ('ipsec.sh', )
|
||||
_configs = ('ipsec.sh',)
|
||||
_startindex = 60
|
||||
_startup = ('sh ipsec.sh',)
|
||||
_shutdown = ("killall racoon",)
|
||||
|
@ -85,45 +77,51 @@ class IPsec(CoreService):
|
|||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the ipsec.conf and racoon.conf file contents to
|
||||
GUI for user customization.
|
||||
'''
|
||||
"""
|
||||
Return the ipsec.conf and racoon.conf file contents to
|
||||
GUI for user customization.
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# set up static tunnel mode security assocation for service "
|
||||
cfg += "(security.py)\n"
|
||||
fname = "%s/examples/services/sampleIPsec" % CORE_DATA_DIR
|
||||
fname = "%s/examples/services/sampleIPsec" % constants.CORE_DATA_DIR
|
||||
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening IPsec configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
except IOError:
|
||||
logger.exception("Error opening IPsec configuration template (%s)", fname)
|
||||
|
||||
return cfg
|
||||
|
||||
addservice(IPsec)
|
||||
|
||||
class Firewall(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "Firewall"
|
||||
_group = "Security"
|
||||
_configs = ('firewall.sh', )
|
||||
_configs = ('firewall.sh',)
|
||||
_startindex = 20
|
||||
_startup = ('sh firewall.sh',)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the firewall rule examples to GUI for user customization.
|
||||
'''
|
||||
"""
|
||||
Return the firewall rule examples to GUI for user customization.
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom node firewall rules for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleFirewall" % CORE_DATA_DIR
|
||||
fname = "%s/examples/services/sampleFirewall" % constants.CORE_DATA_DIR
|
||||
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening Firewall configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
except IOError:
|
||||
logger.exception("Error opening Firewall configuration template (%s)", fname)
|
||||
|
||||
return cfg
|
||||
|
||||
addservice(Firewall)
|
||||
|
||||
def load_services():
|
||||
# this line is required to add the above class to the list of available services
|
||||
ServiceManager.add(VPNClient)
|
||||
ServiceManager.add(VPNServer)
|
||||
ServiceManager.add(IPsec)
|
||||
ServiceManager.add(Firewall)
|
||||
|
|
|
@ -1,23 +1,27 @@
|
|||
from core.service import CoreService, addservice
|
||||
from sys import maxint
|
||||
from inspect import isclass
|
||||
from sys import maxint
|
||||
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
|
||||
class Startup(CoreService):
|
||||
'A CORE service to start other services in order, serially'
|
||||
"""
|
||||
A CORE service to start other services in order, serially
|
||||
"""
|
||||
_name = 'startup'
|
||||
_group = 'Utility'
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ('startup.sh', )
|
||||
_configs = ('startup.sh',)
|
||||
_startindex = maxint
|
||||
_startup = ('sh startup.sh', )
|
||||
_startup = ('sh startup.sh',)
|
||||
_shutdown = ()
|
||||
_validate = ()
|
||||
|
||||
@staticmethod
|
||||
def isStartupService(s):
|
||||
return isinstance(s, Startup) or \
|
||||
(isclass(s) and issubclass(s, Startup))
|
||||
def is_startup_service(s):
|
||||
return isinstance(s, Startup) or (isclass(s) and issubclass(s, Startup))
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
|
@ -26,12 +30,14 @@ class Startup(CoreService):
|
|||
script = '#!/bin/sh\n' \
|
||||
'# auto-generated by Startup (startup.py)\n\n' \
|
||||
'exec > startup.log 2>&1\n\n'
|
||||
for s in sorted(services, key = lambda x: x._startindex):
|
||||
if cls.isStartupService(s) or len(str(s._starttime)) > 0:
|
||||
for s in sorted(services, key=lambda x: x._startindex):
|
||||
if cls.is_startup_service(s) or len(str(s._starttime)) > 0:
|
||||
continue
|
||||
start = '\n'.join(s.getstartup(node, services))
|
||||
if start:
|
||||
script += start + '\n'
|
||||
return script
|
||||
|
||||
addservice(Startup)
|
||||
|
||||
def load_services():
|
||||
ServiceManager.add(Startup)
|
||||
|
|
|
@ -1,189 +1,185 @@
|
|||
#
|
||||
# CORE configuration for UCARP
|
||||
# Copyright (c) 2012 Jonathan deBoer
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
#
|
||||
# author: Jonathan deBoer <jdccdevel@gmail.com>
|
||||
#
|
||||
'''
|
||||
ucarp.py: defines high-availability IP address controlled by ucarp
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
|
||||
UCARP_ETC="/usr/local/etc/ucarp"
|
||||
|
||||
class Ucarp(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "ucarp"
|
||||
_group = "Utility"
|
||||
_depends = ( )
|
||||
_dirs = (UCARP_ETC, )
|
||||
_configs = (UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",)
|
||||
_startindex = 65
|
||||
_startup = ("sh ucarpboot.sh",)
|
||||
_shutdown = ("killall ucarp", )
|
||||
_validate = ("pidof ucarp", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the default file contents
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateUcarpConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateVipUp(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVipDown(node, services)
|
||||
elif filename == cls._configs[3]:
|
||||
return cls.generateUcarpBoot(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateUcarpConf(cls, node, services):
|
||||
''' Returns configuration file text.
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of UCARP executable
|
||||
UCARP_EXEC=%s
|
||||
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
# Logging Facility
|
||||
FACILITY=daemon
|
||||
|
||||
# Instance ID
|
||||
# Any number from 1 to 255
|
||||
INSTANCE_ID=1
|
||||
|
||||
# Password
|
||||
# Master and Backup(s) need to be the same
|
||||
PASSWORD="changeme"
|
||||
|
||||
# The failover application address
|
||||
VIRTUAL_ADDRESS=127.0.0.254
|
||||
VIRTUAL_NET=8
|
||||
|
||||
# Interface for IP Address
|
||||
INTERFACE=lo
|
||||
|
||||
# Maintanence address of the local machine
|
||||
SOURCE_ADDRESS=127.0.0.1
|
||||
|
||||
# The ratio number to be considered before marking the node as dead
|
||||
DEAD_RATIO=3
|
||||
|
||||
# UCARP base, lower number will be preferred master
|
||||
# set to same to have master stay as long as possible
|
||||
UCARP_BASE=1
|
||||
SKEW=0
|
||||
|
||||
# UCARP options
|
||||
# -z run shutdown script on exit
|
||||
# -P force preferred master
|
||||
# -n don't run down script at start up when we are backup
|
||||
# -M use broadcast instead of multicast
|
||||
# -S ignore interface state
|
||||
OPTIONS="-z -n -M"
|
||||
|
||||
# Send extra parameter to down and up scripts
|
||||
#XPARAM="-x <enter param here>"
|
||||
XPARAM="-x ${VIRTUAL_NET}"
|
||||
|
||||
# The start and stop scripts
|
||||
START_SCRIPT=${UCARP_CFGDIR}/default-up.sh
|
||||
STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh
|
||||
|
||||
# These line should not need to be touched
|
||||
UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM"
|
||||
|
||||
${UCARP_EXEC} -B ${UCARP_OPTS}
|
||||
""" % (ucarp_bin, UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateUcarpBoot(cls, node, services):
|
||||
''' Generate a shell script used to boot the Ucarp daemons.
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
chmod a+x ${UCARP_CFGDIR}/*.sh
|
||||
|
||||
# Start the default ucarp daemon configuration
|
||||
${UCARP_CFGDIR}/default.sh
|
||||
|
||||
""" % (UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateVipUp(cls, node, services):
|
||||
''' Generate a shell script used to start the virtual ip
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-up.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr add ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generateVipDown(cls, node, services):
|
||||
''' Generate a shell script used to stop the virtual ip
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-down.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr del ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
addservice(Ucarp)
|
||||
|
||||
"""
|
||||
ucarp.py: defines high-availability IP address controlled by ucarp
|
||||
"""
|
||||
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
UCARP_ETC = "/usr/local/etc/ucarp"
|
||||
|
||||
|
||||
class Ucarp(CoreService):
|
||||
_name = "ucarp"
|
||||
_group = "Utility"
|
||||
_depends = ( )
|
||||
_dirs = (UCARP_ETC,)
|
||||
_configs = (
|
||||
UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",)
|
||||
_startindex = 65
|
||||
_startup = ("sh ucarpboot.sh",)
|
||||
_shutdown = ("killall ucarp",)
|
||||
_validate = ("pidof ucarp",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
"""
|
||||
Return the default file contents
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateUcarpConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateVipUp(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVipDown(node, services)
|
||||
elif filename == cls._configs[3]:
|
||||
return cls.generateUcarpBoot(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateUcarpConf(cls, node, services):
|
||||
"""
|
||||
Returns configuration file text.
|
||||
"""
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of UCARP executable
|
||||
UCARP_EXEC=%s
|
||||
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
# Logging Facility
|
||||
FACILITY=daemon
|
||||
|
||||
# Instance ID
|
||||
# Any number from 1 to 255
|
||||
INSTANCE_ID=1
|
||||
|
||||
# Password
|
||||
# Master and Backup(s) need to be the same
|
||||
PASSWORD="changeme"
|
||||
|
||||
# The failover application address
|
||||
VIRTUAL_ADDRESS=127.0.0.254
|
||||
VIRTUAL_NET=8
|
||||
|
||||
# Interface for IP Address
|
||||
INTERFACE=lo
|
||||
|
||||
# Maintanence address of the local machine
|
||||
SOURCE_ADDRESS=127.0.0.1
|
||||
|
||||
# The ratio number to be considered before marking the node as dead
|
||||
DEAD_RATIO=3
|
||||
|
||||
# UCARP base, lower number will be preferred master
|
||||
# set to same to have master stay as long as possible
|
||||
UCARP_BASE=1
|
||||
SKEW=0
|
||||
|
||||
# UCARP options
|
||||
# -z run shutdown script on exit
|
||||
# -P force preferred master
|
||||
# -n don't run down script at start up when we are backup
|
||||
# -M use broadcast instead of multicast
|
||||
# -S ignore interface state
|
||||
OPTIONS="-z -n -M"
|
||||
|
||||
# Send extra parameter to down and up scripts
|
||||
#XPARAM="-x <enter param here>"
|
||||
XPARAM="-x ${VIRTUAL_NET}"
|
||||
|
||||
# The start and stop scripts
|
||||
START_SCRIPT=${UCARP_CFGDIR}/default-up.sh
|
||||
STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh
|
||||
|
||||
# These line should not need to be touched
|
||||
UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM"
|
||||
|
||||
${UCARP_EXEC} -B ${UCARP_OPTS}
|
||||
""" % (ucarp_bin, UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateUcarpBoot(cls, node, services):
|
||||
"""
|
||||
Generate a shell script used to boot the Ucarp daemons.
|
||||
"""
|
||||
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
chmod a+x ${UCARP_CFGDIR}/*.sh
|
||||
|
||||
# Start the default ucarp daemon configuration
|
||||
${UCARP_CFGDIR}/default.sh
|
||||
|
||||
""" % UCARP_ETC
|
||||
|
||||
@classmethod
|
||||
def generateVipUp(cls, node, services):
|
||||
"""
|
||||
Generate a shell script used to start the virtual ip
|
||||
"""
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-up.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr add ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generateVipDown(cls, node, services):
|
||||
"""
|
||||
Generate a shell script used to stop the virtual ip
|
||||
"""
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-down.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr del ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def load_services():
|
||||
ServiceManager.add(Ucarp)
|
||||
|
|
|
@ -1,24 +1,22 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
utility.py: defines miscellaneous utility services.
|
||||
'''
|
||||
"""
|
||||
utility.py: defines miscellaneous utility services.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from core import constants
|
||||
from core.misc import utils
|
||||
from core.misc.ipaddress import Ipv4Prefix
|
||||
from core.misc.ipaddress import Ipv6Prefix
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
class UtilService(CoreService):
|
||||
''' Parent class for utility services.
|
||||
'''
|
||||
"""
|
||||
Parent class for utility services.
|
||||
"""
|
||||
_name = "UtilityProcess"
|
||||
_group = "Utility"
|
||||
_depends = ()
|
||||
|
@ -29,15 +27,16 @@ class UtilService(CoreService):
|
|||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
|
||||
class IPForwardService(UtilService):
|
||||
_name = "IPForward"
|
||||
_configs = ("ipforward.sh", )
|
||||
_configs = ("ipforward.sh",)
|
||||
_startindex = 5
|
||||
_startup = ("sh ipforward.sh", )
|
||||
|
||||
_startup = ("sh ipforward.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
if os.uname()[0] == "Linux":
|
||||
|
@ -60,13 +59,13 @@ class IPForwardService(UtilService):
|
|||
%(sysctl)s -w net.ipv4.conf.default.send_redirects=0
|
||||
%(sysctl)s -w net.ipv4.conf.all.rp_filter=0
|
||||
%(sysctl)s -w net.ipv4.conf.default.rp_filter=0
|
||||
""" % {'sysctl': SYSCTL_BIN}
|
||||
""" % {'sysctl': constants.SYSCTL_BIN}
|
||||
for ifc in node.netifs():
|
||||
name = sysctldevname(ifc.name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (SYSCTL_BIN, name)
|
||||
name = utils.sysctldevname(ifc.name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (constants.SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \
|
||||
(SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (SYSCTL_BIN, name)
|
||||
(constants.SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (constants.SYSCTL_BIN, name)
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
|
@ -78,9 +77,8 @@ class IPForwardService(UtilService):
|
|||
%s -w net.inet6.ip6.forwarding=1
|
||||
%s -w net.inet.icmp.bmcastecho=1
|
||||
%s -w net.inet.icmp.icmplim=0
|
||||
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
|
||||
""" % (constants.SYSCTL_BIN, constants.SYSCTL_BIN, constants.SYSCTL_BIN, constants.SYSCTL_BIN)
|
||||
|
||||
addservice(IPForwardService)
|
||||
|
||||
class DefaultRouteService(UtilService):
|
||||
_name = "DefaultRoute"
|
||||
|
@ -92,21 +90,21 @@ class DefaultRouteService(UtilService):
|
|||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultRoute service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
net = Ipv6Prefix(x)
|
||||
fam = "inet6 ::"
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
net = Ipv4Prefix(x)
|
||||
fam = "inet 0.0.0.0"
|
||||
if net.maxaddr() == net.minaddr():
|
||||
if net.max_addr() == net.min_addr():
|
||||
return ""
|
||||
else:
|
||||
if os.uname()[0] == "Linux":
|
||||
|
@ -115,9 +113,8 @@ class DefaultRouteService(UtilService):
|
|||
rtcmd = "route add -%s" % fam
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
return "%s %s" % (rtcmd, net.minaddr())
|
||||
|
||||
addservice(DefaultRouteService)
|
||||
return "%s %s" % (rtcmd, net.min_addr())
|
||||
|
||||
|
||||
class DefaultMulticastRouteService(UtilService):
|
||||
_name = "DefaultMulticastRoute"
|
||||
|
@ -144,8 +141,7 @@ class DefaultMulticastRouteService(UtilService):
|
|||
cfg += "\n"
|
||||
break
|
||||
return cfg
|
||||
|
||||
addservice(DefaultMulticastRouteService)
|
||||
|
||||
|
||||
class StaticRouteService(UtilService):
|
||||
_name = "StaticRoute"
|
||||
|
@ -160,23 +156,23 @@ class StaticRouteService(UtilService):
|
|||
cfg += "# NOTE: this service must be customized to be of any use\n"
|
||||
cfg += "# Below are samples that you can uncomment and edit.\n#\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.routestr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def routestr(x):
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
net = Ipv6Prefix(x)
|
||||
fam = "inet6"
|
||||
dst = "3ffe:4::/64"
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
net = Ipv4Prefix(x)
|
||||
fam = "inet"
|
||||
dst = "10.9.8.0/24"
|
||||
if net.maxaddr() == net.minaddr():
|
||||
if net.max_addr() == net.min_addr():
|
||||
return ""
|
||||
else:
|
||||
if os.uname()[0] == "Linux":
|
||||
|
@ -185,9 +181,8 @@ class StaticRouteService(UtilService):
|
|||
rtcmd = "#/sbin/route add -%s %s" % (fam, dst)
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
return "%s %s" % (rtcmd, net.minaddr())
|
||||
return "%s %s" % (rtcmd, net.min_addr())
|
||||
|
||||
addservice(StaticRouteService)
|
||||
|
||||
class SshService(UtilService):
|
||||
_name = "SSH"
|
||||
|
@ -200,12 +195,13 @@ class SshService(UtilService):
|
|||
_startup = ("sh startsshd.sh",)
|
||||
_shutdown = ("killall sshd",)
|
||||
_validate = ()
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Use a startup script for launching sshd in order to wait for host
|
||||
key generation.
|
||||
'''
|
||||
"""
|
||||
Use a startup script for launching sshd in order to wait for host
|
||||
key generation.
|
||||
"""
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
sshcfgdir = node.nodedir
|
||||
sshstatedir = node.nodedir
|
||||
|
@ -264,7 +260,6 @@ UsePAM yes
|
|||
UseDNS no
|
||||
""" % (sshcfgdir, sshstatedir, sshlibdir)
|
||||
|
||||
addservice(SshService)
|
||||
|
||||
class DhcpService(UtilService):
|
||||
_name = "DHCP"
|
||||
|
@ -273,12 +268,13 @@ class DhcpService(UtilService):
|
|||
_startup = ("dhcpd",)
|
||||
_shutdown = ("killall dhcpd",)
|
||||
_validate = ("pidof dhcpd",)
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a dhcpd config file using the network address of
|
||||
each interface.
|
||||
'''
|
||||
"""
|
||||
Generate a dhcpd config file using the network address of
|
||||
each interface.
|
||||
"""
|
||||
cfg = """\
|
||||
# auto-generated by DHCP service (utility.py)
|
||||
# NOTE: move these option lines into the desired pool { } block(s) below
|
||||
|
@ -294,25 +290,26 @@ max-lease-time 7200;
|
|||
ddns-update-style none;
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv4 prefix string
|
||||
for inclusion in the dhcpd3 config file.
|
||||
'''
|
||||
"""
|
||||
Generate a subnet declaration block given an IPv4 prefix string
|
||||
for inclusion in the dhcpd3 config file.
|
||||
"""
|
||||
if x.find(":") >= 0:
|
||||
return ""
|
||||
else:
|
||||
addr = x.split("/")[0]
|
||||
net = IPv4Prefix(x)
|
||||
net = Ipv4Prefix(x)
|
||||
# divide the address space in half
|
||||
rangelow = net.addr(net.numaddr() / 2)
|
||||
rangehigh = net.maxaddr()
|
||||
rangelow = net.addr(net.num_addr() / 2)
|
||||
rangehigh = net.max_addr()
|
||||
return """
|
||||
subnet %s netmask %s {
|
||||
pool {
|
||||
|
@ -321,23 +318,24 @@ subnet %s netmask %s {
|
|||
option routers %s;
|
||||
}
|
||||
}
|
||||
""" % (net.prefixstr(), net.netmaskstr(), rangelow, rangehigh, addr)
|
||||
""" % (net.prefix_str(), net.netmask_str(), rangelow, rangehigh, addr)
|
||||
|
||||
addservice(DhcpService)
|
||||
|
||||
class DhcpClientService(UtilService):
|
||||
''' Use a DHCP client for all interfaces for addressing.
|
||||
'''
|
||||
"""
|
||||
Use a DHCP client for all interfaces for addressing.
|
||||
"""
|
||||
_name = "DHCPClient"
|
||||
_configs = ("startdhcpclient.sh",)
|
||||
_startup = ("sh startdhcpclient.sh",)
|
||||
_shutdown = ("killall dhclient",)
|
||||
_validate = ("pidof dhclient",)
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a script to invoke dhclient on all interfaces.
|
||||
'''
|
||||
"""
|
||||
Generate a script to invoke dhclient on all interfaces.
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DHCPClient service (utility.py)\n"
|
||||
cfg += "# uncomment this mkdir line and symlink line to enable client-"
|
||||
|
@ -350,25 +348,26 @@ class DhcpClientService(UtilService):
|
|||
cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name
|
||||
cfg += " /var/run/resolvconf/resolv.conf\n"
|
||||
cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name
|
||||
cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name)
|
||||
cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name)
|
||||
return cfg
|
||||
|
||||
addservice(DhcpClientService)
|
||||
|
||||
|
||||
class FtpService(UtilService):
|
||||
''' Start a vsftpd server.
|
||||
'''
|
||||
"""
|
||||
Start a vsftpd server.
|
||||
"""
|
||||
_name = "FTP"
|
||||
_configs = ("vsftpd.conf",)
|
||||
_dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
|
||||
_startup = ("vsftpd ./vsftpd.conf",)
|
||||
_shutdown = ("killall vsftpd",)
|
||||
_validate = ("pidof vsftpd",)
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a vsftpd.conf configuration file.
|
||||
'''
|
||||
"""
|
||||
Generate a vsftpd.conf configuration file.
|
||||
"""
|
||||
return """\
|
||||
# vsftpd.conf auto-generated by FTP service (utility.py)
|
||||
listen=YES
|
||||
|
@ -384,26 +383,27 @@ secure_chroot_dir=/var/run/vsftpd/empty
|
|||
anon_root=/var/ftp
|
||||
"""
|
||||
|
||||
addservice(FtpService)
|
||||
|
||||
class HttpService(UtilService):
|
||||
''' Start an apache server.
|
||||
'''
|
||||
"""
|
||||
Start an apache server.
|
||||
"""
|
||||
_name = "HTTP"
|
||||
_configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
|
||||
"/var/www/index.html",)
|
||||
_dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
|
||||
"/run/lock", "/var/lock/apache2", "/var/www", )
|
||||
"/run/lock", "/var/lock/apache2", "/var/www",)
|
||||
_startup = ("chown www-data /var/lock/apache2", "apache2ctl start",)
|
||||
_shutdown = ("apache2ctl stop",)
|
||||
_validate = ("pidof apache2",)
|
||||
|
||||
APACHEVER22, APACHEVER24 = (22, 24)
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate an apache2.conf configuration file.
|
||||
'''
|
||||
"""
|
||||
Generate an apache2.conf configuration file.
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateapache2conf(node, filename, services)
|
||||
elif filename == cls._configs[1]:
|
||||
|
@ -415,43 +415,45 @@ class HttpService(UtilService):
|
|||
|
||||
@classmethod
|
||||
def detectversionfromcmd(cls):
|
||||
''' Detect the apache2 version using the 'a2query' command.
|
||||
'''
|
||||
"""
|
||||
Detect the apache2 version using the 'a2query' command.
|
||||
"""
|
||||
try:
|
||||
status, result = cmdresult(['a2query', '-v'])
|
||||
except Exception:
|
||||
status, result = utils.cmdresult(['a2query', '-v'])
|
||||
except subprocess.CalledProcessError:
|
||||
status = -1
|
||||
|
||||
if status == 0 and result[:3] == '2.4':
|
||||
return cls.APACHEVER24
|
||||
return cls.APACHEVER22
|
||||
|
||||
return cls.APACHEVER22
|
||||
|
||||
@classmethod
|
||||
def generateapache2conf(cls, node, filename, services):
|
||||
lockstr = { cls.APACHEVER22:
|
||||
'LockFile ${APACHE_LOCK_DIR}/accept.lock\n',
|
||||
cls.APACHEVER24:
|
||||
'Mutex file:${APACHE_LOCK_DIR} default\n', }
|
||||
mpmstr = { cls.APACHEVER22: '', cls.APACHEVER24:
|
||||
'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', }
|
||||
lockstr = {cls.APACHEVER22:
|
||||
'LockFile ${APACHE_LOCK_DIR}/accept.lock\n',
|
||||
cls.APACHEVER24:
|
||||
'Mutex file:${APACHE_LOCK_DIR} default\n', }
|
||||
mpmstr = {cls.APACHEVER22: '', cls.APACHEVER24:
|
||||
'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', }
|
||||
|
||||
permstr = { cls.APACHEVER22:
|
||||
' Order allow,deny\n Deny from all\n Satisfy all\n',
|
||||
cls.APACHEVER24:
|
||||
' Require all denied\n', }
|
||||
permstr = {cls.APACHEVER22:
|
||||
' Order allow,deny\n Deny from all\n Satisfy all\n',
|
||||
cls.APACHEVER24:
|
||||
' Require all denied\n', }
|
||||
|
||||
authstr = { cls.APACHEVER22:
|
||||
'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n',
|
||||
cls.APACHEVER24:
|
||||
'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', }
|
||||
authstr = {cls.APACHEVER22:
|
||||
'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n',
|
||||
cls.APACHEVER24:
|
||||
'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', }
|
||||
|
||||
permstr2 = { cls.APACHEVER22:
|
||||
'\t\tOrder allow,deny\n\t\tallow from all\n',
|
||||
permstr2 = {cls.APACHEVER22:
|
||||
'\t\tOrder allow,deny\n\t\tallow from all\n',
|
||||
cls.APACHEVER24:
|
||||
'\t\tRequire all granted\n', }
|
||||
'\t\tRequire all granted\n', }
|
||||
|
||||
version = cls.detectversionfromcmd()
|
||||
cfg ="# apache2.conf generated by utility.py:HttpService\n"
|
||||
cfg = "# apache2.conf generated by utility.py:HttpService\n"
|
||||
cfg += lockstr[version]
|
||||
cfg += """\
|
||||
PidFile ${APACHE_PID_FILE}
|
||||
|
@ -474,7 +476,7 @@ KeepAliveTimeout 5
|
|||
<IfModule mpm_worker_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
|
@ -484,7 +486,7 @@ KeepAliveTimeout 5
|
|||
<IfModule mpm_event_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
|
@ -590,16 +592,16 @@ export LANG
|
|||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
|
||||
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
|
||||
return "<html><body>%s</body></html>" % body
|
||||
|
||||
addservice(HttpService)
|
||||
|
||||
class PcapService(UtilService):
|
||||
''' Pcap service for logging packets.
|
||||
'''
|
||||
"""
|
||||
Pcap service for logging packets.
|
||||
"""
|
||||
_name = "pcap"
|
||||
_configs = ("pcap.sh", )
|
||||
_configs = ("pcap.sh",)
|
||||
_dirs = ()
|
||||
_startindex = 1
|
||||
_startup = ("sh pcap.sh start",)
|
||||
|
@ -609,8 +611,9 @@ class PcapService(UtilService):
|
|||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a startpcap.sh traffic logging script.
|
||||
'''
|
||||
"""
|
||||
Generate a startpcap.sh traffic logging script.
|
||||
"""
|
||||
cfg = """
|
||||
#!/bin/sh
|
||||
# set tcpdump options here (see 'man tcpdump' for help)
|
||||
|
@ -625,7 +628,7 @@ if [ "x$1" = "xstart" ]; then
|
|||
cfg += '# '
|
||||
redir = "< /dev/null"
|
||||
cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \
|
||||
(node.name, ifc.name, ifc.name, redir)
|
||||
(node.name, ifc.name, ifc.name, redir)
|
||||
cfg += """
|
||||
|
||||
elif [ "x$1" = "xstop" ]; then
|
||||
|
@ -635,7 +638,6 @@ fi;
|
|||
"""
|
||||
return cfg
|
||||
|
||||
addservice(PcapService)
|
||||
|
||||
class RadvdService(UtilService):
|
||||
_name = "radvd"
|
||||
|
@ -644,12 +646,13 @@ class RadvdService(UtilService):
|
|||
_startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
|
||||
_shutdown = ("pkill radvd",)
|
||||
_validate = ("pidof radvd",)
|
||||
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a RADVD router advertisement daemon config file
|
||||
"""
|
||||
Generate a RADVD router advertisement daemon config file
|
||||
using the network address of each interface.
|
||||
'''
|
||||
"""
|
||||
cfg = "# auto-generated by RADVD service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
|
@ -679,29 +682,30 @@ interface %s
|
|||
""" % prefix
|
||||
cfg += "};\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv6 prefix string
|
||||
for inclusion in the RADVD config file.
|
||||
'''
|
||||
"""
|
||||
Generate a subnet declaration block given an IPv6 prefix string
|
||||
for inclusion in the RADVD config file.
|
||||
"""
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
net = Ipv6Prefix(x)
|
||||
return str(net)
|
||||
else:
|
||||
return ""
|
||||
|
||||
addservice(RadvdService)
|
||||
|
||||
class AtdService(UtilService):
|
||||
''' Atd service for scheduling at jobs
|
||||
'''
|
||||
"""
|
||||
Atd service for scheduling at jobs
|
||||
"""
|
||||
_name = "atd"
|
||||
_configs = ("startatd.sh",)
|
||||
_dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
|
||||
_startup = ("sh startatd.sh", )
|
||||
_shutdown = ("pkill atd", )
|
||||
|
||||
_startup = ("sh startatd.sh",)
|
||||
_shutdown = ("pkill atd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return """
|
||||
|
@ -711,14 +715,28 @@ chown -R daemon /var/spool/cron/*
|
|||
chmod -R 700 /var/spool/cron/*
|
||||
atd
|
||||
"""
|
||||
|
||||
addservice(AtdService)
|
||||
|
||||
|
||||
class UserDefinedService(UtilService):
|
||||
''' Dummy service allowing customization of anything.
|
||||
'''
|
||||
"""
|
||||
Dummy service allowing customization of anything.
|
||||
"""
|
||||
_name = "UserDefined"
|
||||
_startindex = 50
|
||||
_meta = "Customize this service to do anything upon startup."
|
||||
|
||||
addservice(UserDefinedService)
|
||||
|
||||
def load_services():
|
||||
ServiceManager.add(IPForwardService)
|
||||
ServiceManager.add(DefaultRouteService)
|
||||
ServiceManager.add(DefaultMulticastRouteService)
|
||||
ServiceManager.add(StaticRouteService)
|
||||
ServiceManager.add(SshService)
|
||||
ServiceManager.add(DhcpService)
|
||||
ServiceManager.add(DhcpClientService)
|
||||
ServiceManager.add(FtpService)
|
||||
ServiceManager.add(HttpService)
|
||||
ServiceManager.add(PcapService)
|
||||
ServiceManager.add(RadvdService)
|
||||
ServiceManager.add(AtdService)
|
||||
ServiceManager.add(UserDefinedService)
|
||||
|
|
|
@ -1,24 +1,19 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
xorp.py: defines routing services provided by the XORP routing suite.
|
||||
'''
|
||||
"""
|
||||
|
||||
import os
|
||||
from core.misc import log
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
class XorpRtrmgr(CoreService):
|
||||
''' XORP router manager service builds a config.boot file based on other
|
||||
"""
|
||||
XORP router manager service builds a config.boot file based on other
|
||||
enabled XORP services, and launches necessary daemons upon startup.
|
||||
'''
|
||||
"""
|
||||
_name = "xorp_rtrmgr"
|
||||
_group = "XORP"
|
||||
_depends = ()
|
||||
|
@ -26,15 +21,16 @@ class XorpRtrmgr(CoreService):
|
|||
_configs = ("/etc/xorp/config.boot",)
|
||||
_startindex = 35
|
||||
_startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (_configs[0], _name, _name),)
|
||||
_shutdown = ("killall xorp_rtrmgr", )
|
||||
_validate = ("pidof xorp_rtrmgr", )
|
||||
_shutdown = ("killall xorp_rtrmgr",)
|
||||
_validate = ("pidof xorp_rtrmgr",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Returns config.boot configuration file text. Other services that
|
||||
depend on this will have generatexorpconfig() hooks that are
|
||||
"""
|
||||
Returns config.boot configuration file text. Other services that
|
||||
depend on this will have generatexorpconfig() hooks that are
|
||||
invoked here. Filename currently ignored.
|
||||
'''
|
||||
"""
|
||||
cfg = "interfaces {\n"
|
||||
for ifc in node.netifs():
|
||||
cfg += " interface %s {\n" % ifc.name
|
||||
|
@ -50,40 +46,40 @@ class XorpRtrmgr(CoreService):
|
|||
s._depends.index(cls._name)
|
||||
cfg += s.generatexorpconfig(node)
|
||||
except ValueError:
|
||||
pass
|
||||
logger.exception("error getting value from service: %s", cls._name)
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
''' helper for mapping IP addresses to XORP config statements
|
||||
'''
|
||||
try:
|
||||
(addr, plen) = x.split("/")
|
||||
except Exception:
|
||||
raise ValueError, "invalid address"
|
||||
"""
|
||||
helper for mapping IP addresses to XORP config statements
|
||||
"""
|
||||
addr, plen = x.split("/")
|
||||
cfg = "\t address %s {\n" % addr
|
||||
cfg += "\t\tprefix-length: %s\n" % plen
|
||||
cfg +="\t }\n"
|
||||
cfg += "\t }\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def lladdrstr(ifc):
|
||||
''' helper for adding link-local address entries (required by OSPFv3)
|
||||
'''
|
||||
"""
|
||||
helper for adding link-local address entries (required by OSPFv3)
|
||||
"""
|
||||
cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\tprefix-length: 64\n"
|
||||
cfg += "\t }\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRtrmgr)
|
||||
|
||||
|
||||
class XorpService(CoreService):
|
||||
''' Parent class for XORP services. Defines properties and methods
|
||||
common to XORP's routing daemons.
|
||||
'''
|
||||
"""
|
||||
Parent class for XORP services. Defines properties and methods
|
||||
common to XORP's routing daemons.
|
||||
"""
|
||||
_name = "XorpDaemon"
|
||||
_group = "XORP"
|
||||
_depends = ("xorp_rtrmgr", )
|
||||
_depends = ("xorp_rtrmgr",)
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
|
@ -93,22 +89,24 @@ class XorpService(CoreService):
|
|||
|
||||
@staticmethod
|
||||
def fea(forwarding):
|
||||
''' Helper to add a forwarding engine entry to the config file.
|
||||
'''
|
||||
"""
|
||||
Helper to add a forwarding engine entry to the config file.
|
||||
"""
|
||||
cfg = "fea {\n"
|
||||
cfg += " %s {\n" % forwarding
|
||||
cfg += "\tdisable:false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def mfea(forwarding, ifcs):
|
||||
''' Helper to add a multicast forwarding engine entry to the config file.
|
||||
'''
|
||||
"""
|
||||
Helper to add a multicast forwarding engine entry to the config file.
|
||||
"""
|
||||
names = []
|
||||
for ifc in ifcs:
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
names.append("register_vif")
|
||||
|
@ -125,11 +123,11 @@ class XorpService(CoreService):
|
|||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def policyexportconnected():
|
||||
''' Helper to add a policy statement for exporting connected routes.
|
||||
'''
|
||||
"""
|
||||
Helper to add a policy statement for exporting connected routes.
|
||||
"""
|
||||
cfg = "policy {\n"
|
||||
cfg += " policy-statement export-connected {\n"
|
||||
cfg += "\tterm 100 {\n"
|
||||
|
@ -143,34 +141,37 @@ class XorpService(CoreService):
|
|||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a.split('/')[0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return a.split('/')[0]
|
||||
# raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
return ""
|
||||
|
||||
|
||||
class XorpOspfv2(XorpService):
|
||||
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
'''
|
||||
"""
|
||||
The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
"""
|
||||
_name = "XORP_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
|
@ -178,7 +179,7 @@ class XorpOspfv2(XorpService):
|
|||
cfg += "\trouter-id: %s\n" % rtrid
|
||||
cfg += "\tarea 0.0.0.0 {\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\t interface %s {\n" % ifc.name
|
||||
cfg += "\t\tvif %s {\n" % ifc.name
|
||||
|
@ -194,18 +195,18 @@ class XorpOspfv2(XorpService):
|
|||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOspfv2)
|
||||
|
||||
|
||||
class XorpOspfv3(XorpService):
|
||||
''' The OSPFv3 service provides IPv6 routing. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
'''
|
||||
"""
|
||||
The OSPFv3 service provides IPv6 routing. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
"""
|
||||
_name = "XORP_OSPFv3"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding6")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
|
@ -213,7 +214,7 @@ class XorpOspfv3(XorpService):
|
|||
cfg += "\trouter-id: %s\n" % rtrid
|
||||
cfg += "\tarea 0.0.0.0 {\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\t interface %s {\n" % ifc.name
|
||||
cfg += "\t\tvif %s {\n" % ifc.name
|
||||
|
@ -223,15 +224,15 @@ class XorpOspfv3(XorpService):
|
|||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOspfv3)
|
||||
|
||||
|
||||
class XorpBgp(XorpService):
|
||||
''' IPv4 inter-domain routing. AS numbers and peers must be customized.
|
||||
'''
|
||||
"""
|
||||
IPv4 inter-domain routing. AS numbers and peers must be customized.
|
||||
"""
|
||||
_name = "XORP_BGP"
|
||||
_custom_needed = True
|
||||
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = "/* This is a sample config that should be customized with\n"
|
||||
|
@ -253,22 +254,23 @@ class XorpBgp(XorpService):
|
|||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpBgp)
|
||||
|
||||
class XorpRip(XorpService):
|
||||
''' RIP IPv4 unicast routing.
|
||||
'''
|
||||
"""
|
||||
RIP IPv4 unicast routing.
|
||||
"""
|
||||
|
||||
_name = "XORP_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
cfg += cls.policyexportconnected()
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " rip {\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
|
@ -284,68 +286,68 @@ class XorpRip(XorpService):
|
|||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRip)
|
||||
|
||||
|
||||
class XorpRipng(XorpService):
|
||||
''' RIP NG IPv6 unicast routing.
|
||||
'''
|
||||
"""
|
||||
RIP NG IPv6 unicast routing.
|
||||
"""
|
||||
_name = "XORP_RIPNG"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding6")
|
||||
cfg += cls.policyexportconnected()
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ripng {\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
# for a in ifc.addrlist:
|
||||
# if a.find(":") < 0:
|
||||
# continue
|
||||
# addr = a.split("/")[0]
|
||||
# cfg += "\t\taddress %s {\n" % addr
|
||||
# cfg += "\t\t disable: false\n"
|
||||
# cfg += "\t\t}\n"
|
||||
# for a in ifc.addrlist:
|
||||
# if a.find(":") < 0:
|
||||
# continue
|
||||
# addr = a.split("/")[0]
|
||||
# cfg += "\t\taddress %s {\n" % addr
|
||||
# cfg += "\t\t disable: false\n"
|
||||
# cfg += "\t\t}\n"
|
||||
cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\t disable: false\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRipng)
|
||||
|
||||
|
||||
class XorpPimSm4(XorpService):
|
||||
''' PIM Sparse Mode IPv4 multicast routing.
|
||||
'''
|
||||
"""
|
||||
PIM Sparse Mode IPv4 multicast routing.
|
||||
"""
|
||||
_name = "XORP_PIMSM4"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.mfea("mfea4", node.netifs())
|
||||
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " igmp {\n"
|
||||
names = []
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " pimsm4 {\n"
|
||||
|
||||
|
@ -368,46 +370,46 @@ class XorpPimSm4(XorpService):
|
|||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
|
||||
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " fib2mrib {\n"
|
||||
cfg += "\tdisable: false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpPimSm4)
|
||||
|
||||
|
||||
class XorpPimSm6(XorpService):
|
||||
''' PIM Sparse Mode IPv6 multicast routing.
|
||||
'''
|
||||
"""
|
||||
PIM Sparse Mode IPv6 multicast routing.
|
||||
"""
|
||||
_name = "XORP_PIMSM6"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.mfea("mfea6", node.netifs())
|
||||
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " mld {\n"
|
||||
names = []
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " pimsm6 {\n"
|
||||
|
||||
|
||||
names.append("register_vif")
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
|
@ -427,33 +429,33 @@ class XorpPimSm6(XorpService):
|
|||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
|
||||
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " fib2mrib {\n"
|
||||
cfg += "\tdisable: false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpPimSm6)
|
||||
|
||||
|
||||
class XorpOlsr(XorpService):
|
||||
''' OLSR IPv4 unicast MANET routing.
|
||||
'''
|
||||
"""
|
||||
OLSR IPv4 unicast MANET routing.
|
||||
"""
|
||||
_name = "XORP_OLSR"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " olsr4 {\n"
|
||||
cfg += "\tmain-address: %s\n" % rtrid
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
|
@ -468,5 +470,15 @@ class XorpOlsr(XorpService):
|
|||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOlsr)
|
||||
|
||||
|
||||
def load_services():
|
||||
ServiceManager.add(XorpRtrmgr)
|
||||
ServiceManager.add(XorpOspfv2)
|
||||
ServiceManager.add(XorpOspfv3)
|
||||
ServiceManager.add(XorpBgp)
|
||||
ServiceManager.add(XorpRip)
|
||||
ServiceManager.add(XorpRipng)
|
||||
ServiceManager.add(XorpPimSm4)
|
||||
ServiceManager.add(XorpPimSm6)
|
||||
ServiceManager.add(XorpOlsr)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,51 +1,43 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
'''
|
||||
xen.py: implementation of the XenNode and XenVEth classes that support
|
||||
"""
|
||||
xen.py: implementation of the XenNode and XenVEth classes that support
|
||||
generating Xen domUs based on an ISO image and persistent configuration area
|
||||
'''
|
||||
"""
|
||||
|
||||
from core.netns.vnet import *
|
||||
import base64
|
||||
import os
|
||||
import shutil
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import crypt
|
||||
|
||||
from core import constants
|
||||
from core.coreobj import PyCoreNetIf
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import log
|
||||
from core.misc import nodeutils
|
||||
from core.misc import utils
|
||||
from core.netns.vnode import LxcNode
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf
|
||||
from core.misc.ipaddr import *
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.netns.vif import TunTap
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
try:
|
||||
import parted
|
||||
except ImportError, e:
|
||||
#print "Failed to load parted Python module required by Xen support."
|
||||
#print "Error was:", e
|
||||
raise ImportError
|
||||
except ImportError:
|
||||
logger.error("failed to import parted for xen nodes")
|
||||
|
||||
import base64
|
||||
import crypt
|
||||
import subprocess
|
||||
try:
|
||||
import fsimage
|
||||
except ImportError, e:
|
||||
except ImportError:
|
||||
# fix for fsimage under Ubuntu
|
||||
sys.path.append("/usr/lib/xen-default/lib/python")
|
||||
try:
|
||||
import fsimage
|
||||
except ImportError, e:
|
||||
#print "Failed to load fsimage Python module required by Xen support."
|
||||
#print "Error was:", e
|
||||
raise ImportError
|
||||
|
||||
|
||||
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
import string
|
||||
except ImportError:
|
||||
logger.error("failed to import fsimage for xen nodes")
|
||||
|
||||
# XXX move these out to config file
|
||||
AWK_PATH = "/bin/awk"
|
||||
|
@ -60,11 +52,12 @@ SED_PATH = "/bin/sed"
|
|||
XM_PATH = "/usr/sbin/xm"
|
||||
UDEVADM_PATH = "/sbin/udevadm"
|
||||
|
||||
|
||||
class XenVEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True, hwaddr = None):
|
||||
def __init__(self, node, name, localname, mtu=1500, net=None,
|
||||
start=True, hwaddr=None):
|
||||
# note that net arg is ignored
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.hwaddr = hwaddr
|
||||
|
@ -76,8 +69,8 @@ class XenVEth(PyCoreNetIf):
|
|||
'vifname=%s' % self.localname, 'script=vif-core']
|
||||
if self.hwaddr is not None:
|
||||
cmd.append('mac=%s' % self.hwaddr)
|
||||
check_call(cmd)
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
subprocess.check_call(cmd)
|
||||
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -87,28 +80,28 @@ class XenVEth(PyCoreNetIf):
|
|||
if self.hwaddr is not None:
|
||||
pass
|
||||
# this should be doable, but some argument isn't a string
|
||||
#check_call([XM_PATH, 'network-detach', self.node.vmname,
|
||||
# check_call([XM_PATH, 'network-detach', self.node.vmname,
|
||||
# self.hwaddr])
|
||||
self.up = False
|
||||
|
||||
|
||||
class XenNode(PyCoreNode):
|
||||
apitype = coreapi.CORE_NODE_XEN
|
||||
apitype = NodeTypes.XEN.value
|
||||
|
||||
FilesToIgnore = frozenset([
|
||||
#'ipforward.sh',
|
||||
# 'ipforward.sh',
|
||||
'quaggaboot.sh',
|
||||
])
|
||||
|
||||
FilesRedirection = {
|
||||
'ipforward.sh' : '/core-tmp/ipforward.sh',
|
||||
'ipforward.sh': '/core-tmp/ipforward.sh',
|
||||
}
|
||||
|
||||
CmdsToIgnore = frozenset([
|
||||
#'sh ipforward.sh',
|
||||
#'sh quaggaboot.sh zebra',
|
||||
#'sh quaggaboot.sh ospfd',
|
||||
#'sh quaggaboot.sh ospf6d',
|
||||
# 'sh ipforward.sh',
|
||||
# 'sh quaggaboot.sh zebra',
|
||||
# 'sh quaggaboot.sh ospfd',
|
||||
# 'sh quaggaboot.sh ospf6d',
|
||||
'sh quaggaboot.sh vtysh',
|
||||
'killall zebra',
|
||||
'killall ospfd',
|
||||
|
@ -117,43 +110,39 @@ class XenNode(PyCoreNode):
|
|||
])
|
||||
|
||||
def RedirCmd_ipforward(self):
|
||||
sysctlFile = open(os.path.join(self.mountdir, self.etcdir,
|
||||
'sysctl.conf'), 'a')
|
||||
p1 = subprocess.Popen([AWK_PATH,
|
||||
'/^\/sbin\/sysctl -w/ {print $NF}',
|
||||
os.path.join(self.nodedir,
|
||||
'core-tmp/ipforward.sh') ],
|
||||
stdout=sysctlFile)
|
||||
sysctlFile = open(os.path.join(self.mountdir, self.etcdir, 'sysctl.conf'), 'a')
|
||||
p1 = subprocess.Popen([AWK_PATH, '/^\/sbin\/sysctl -w/ {print $NF}',
|
||||
os.path.join(self.nodedir, 'core-tmp/ipforward.sh')], stdout=sysctlFile)
|
||||
p1.wait()
|
||||
sysctlFile.close()
|
||||
|
||||
def RedirCmd_zebra(self):
|
||||
check_call([SED_PATH, '-i', '-e', 's/^zebra=no/zebra=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
subprocess.check_call([SED_PATH, '-i', '-e', 's/^zebra=no/zebra=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
|
||||
def RedirCmd_ospfd(self):
|
||||
check_call([SED_PATH, '-i', '-e', 's/^ospfd=no/ospfd=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
subprocess.check_call([SED_PATH, '-i', '-e', 's/^ospfd=no/ospfd=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
|
||||
def RedirCmd_ospf6d(self):
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/^ospf6d=no/ospf6d=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
subprocess.check_call([SED_PATH, '-i', '-e',
|
||||
's/^ospf6d=no/ospf6d=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
|
||||
CmdsRedirection = {
|
||||
'sh ipforward.sh' : RedirCmd_ipforward,
|
||||
'sh quaggaboot.sh zebra' : RedirCmd_zebra,
|
||||
'sh quaggaboot.sh ospfd' : RedirCmd_ospfd,
|
||||
'sh quaggaboot.sh ospf6d' : RedirCmd_ospf6d,
|
||||
'sh ipforward.sh': RedirCmd_ipforward,
|
||||
'sh quaggaboot.sh zebra': RedirCmd_zebra,
|
||||
'sh quaggaboot.sh ospfd': RedirCmd_ospfd,
|
||||
'sh quaggaboot.sh ospf6d': RedirCmd_ospf6d,
|
||||
}
|
||||
|
||||
# CoreNode: no __init__, take from LxcNode & SimpleLxcNode
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True, model = None,
|
||||
vgname = None, ramsize = None, disksize = None,
|
||||
isofile = None):
|
||||
def __init__(self, session, objid=None, name=None,
|
||||
nodedir=None, bootsh="boot.sh", start=True, model=None,
|
||||
vgname=None, ramsize=None, disksize=None,
|
||||
isofile=None):
|
||||
# SimpleLxcNode initialization
|
||||
PyCoreNode.__init__(self, session = session, objid = objid, name = name,
|
||||
verbose = verbose)
|
||||
PyCoreNode.__init__(self, session=session, objid=objid, name=name)
|
||||
self.nodedir = nodedir
|
||||
self.model = model
|
||||
# indicates startup() has been invoked and disk has been initialized
|
||||
|
@ -181,36 +170,35 @@ class XenNode(PyCoreNode):
|
|||
# TODO: remove this temporary hack
|
||||
self.FilesRedirection['/usr/local/etc/quagga/Quagga.conf'] = \
|
||||
os.path.join(self.getconfigitem('mount_path'), self.etcdir,
|
||||
'quagga/Quagga.conf')
|
||||
'quagga/Quagga.conf')
|
||||
|
||||
# LxcNode initialization
|
||||
# self.makenodedir()
|
||||
if self.nodedir is None:
|
||||
self.nodedir = \
|
||||
os.path.join(session.sessiondir, self.name + ".conf")
|
||||
self.nodedir = os.path.join(session.sessiondir, self.name + ".conf")
|
||||
self.mountdir = self.nodedir + self.getconfigitem('mount_path')
|
||||
if not os.path.isdir(self.mountdir):
|
||||
os.makedirs(self.mountdir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
raise Exception, "Xen PVM node requires a temporary nodedir"
|
||||
raise Exception("Xen PVM node requires a temporary nodedir")
|
||||
self.tmpnodedir = False
|
||||
self.bootsh = bootsh
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def getconfigitem(self, name, default=None):
|
||||
''' Configuration items come from the xen.conf file and/or input from
|
||||
the GUI, and are stored in the session using the XenConfigManager
|
||||
object. self.model is used to identify particular profiles
|
||||
associated with a node type in the GUI.
|
||||
'''
|
||||
return self.session.xen.getconfigitem(name=name, model=self.model,
|
||||
node=self, value=default)
|
||||
"""
|
||||
Configuration items come from the xen.conf file and/or input from
|
||||
the GUI, and are stored in the session using the XenConfigManager
|
||||
object. self.model is used to identify particular profiles
|
||||
associated with a node type in the GUI.
|
||||
"""
|
||||
return self.session.xen.getconfigitem(name=name, model=self.model, node=self, value=default)
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def startup(self):
|
||||
self.warn("XEN PVM startup() called: preparing disk for %s" % self.name)
|
||||
logger.warn("XEN PVM startup() called: preparing disk for %s" % self.name)
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.up:
|
||||
|
@ -218,10 +206,10 @@ class XenNode(PyCoreNode):
|
|||
self.createlogicalvolume()
|
||||
self.createpartitions()
|
||||
persistdev = self.createfilesystems()
|
||||
check_call([MOUNT_BIN, '-t', 'ext4', persistdev, self.mountdir])
|
||||
subprocess.check_call([constants.MOUNT_BIN, '-t', 'ext4', persistdev, self.mountdir])
|
||||
self.untarpersistent(tarname=self.getconfigitem('persist_tar_iso'),
|
||||
iso=True)
|
||||
self.setrootpassword(pw = self.getconfigitem('root_password'))
|
||||
self.setrootpassword(pw=self.getconfigitem('root_password'))
|
||||
self.sethostname(old='UBASE', new=self.name)
|
||||
self.setupssh(keypath=self.getconfigitem('ssh_key_path'))
|
||||
self.createvm()
|
||||
|
@ -231,11 +219,11 @@ class XenNode(PyCoreNode):
|
|||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def boot(self):
|
||||
self.warn("XEN PVM boot() called")
|
||||
logger.warn("XEN PVM boot() called")
|
||||
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
raise Exception, "Can't boot VM without initialized disk"
|
||||
raise Exception("Can't boot VM without initialized disk")
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
|
@ -247,18 +235,17 @@ class XenNode(PyCoreNode):
|
|||
self.untarpersistent(tarname=tarname, iso=False)
|
||||
|
||||
try:
|
||||
check_call([UMOUNT_BIN, self.mountdir])
|
||||
subprocess.check_call([constants.UMOUNT_BIN, self.mountdir])
|
||||
self.unmount_all(self.mountdir)
|
||||
check_call([UDEVADM_PATH, 'settle'])
|
||||
check_call([KPARTX_PATH, '-d', self.lvpath])
|
||||
subprocess.check_call([UDEVADM_PATH, 'settle'])
|
||||
subprocess.check_call([KPARTX_PATH, '-d', self.lvpath])
|
||||
|
||||
#time.sleep(5)
|
||||
#time.sleep(1)
|
||||
# time.sleep(5)
|
||||
# time.sleep(1)
|
||||
|
||||
# unpause VM
|
||||
if self.verbose:
|
||||
self.warn("XEN PVM boot() unpause domU %s" % self.vmname)
|
||||
mutecheck_call([XM_PATH, 'unpause', self.vmname])
|
||||
logger.warn("XEN PVM boot() unpause domU %s" % self.vmname)
|
||||
utils.mutecheck_call([XM_PATH, 'unpause', self.vmname])
|
||||
|
||||
self.booted = True
|
||||
finally:
|
||||
|
@ -266,10 +253,10 @@ class XenNode(PyCoreNode):
|
|||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def shutdown(self):
|
||||
self.warn("XEN PVM shutdown() called")
|
||||
logger.warn("XEN PVM shutdown() called")
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
|
@ -282,27 +269,25 @@ class XenNode(PyCoreNode):
|
|||
try:
|
||||
# RJE XXX what to do here
|
||||
if self.booted:
|
||||
mutecheck_call([XM_PATH, 'destroy', self.vmname])
|
||||
utils.mutecheck_call([XM_PATH, 'destroy', self.vmname])
|
||||
self.booted = False
|
||||
except OSError:
|
||||
pass
|
||||
except subprocess.CalledProcessError:
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
# ignore this error too, the VM may have exited already
|
||||
pass
|
||||
logger.exception("error during shutdown")
|
||||
|
||||
# discard LVM volume
|
||||
lvmRemoveCount = 0
|
||||
while os.path.exists(self.lvpath):
|
||||
try:
|
||||
check_call([UDEVADM_PATH, 'settle'])
|
||||
mutecall([LVCHANGE_PATH, '-an', self.lvpath])
|
||||
subprocess.check_call([UDEVADM_PATH, 'settle'])
|
||||
utils.mutecall([LVCHANGE_PATH, '-an', self.lvpath])
|
||||
lvmRemoveCount += 1
|
||||
mutecall([LVREMOVE_PATH, '-f', self.lvpath])
|
||||
utils.mutecall([LVREMOVE_PATH, '-f', self.lvpath])
|
||||
except OSError:
|
||||
pass
|
||||
if (lvmRemoveCount > 1):
|
||||
self.warn("XEN PVM shutdown() required %d lvremove " \
|
||||
"executions." % lvmRemoveCount)
|
||||
logger.exception("error during shutdown")
|
||||
|
||||
if lvmRemoveCount > 1:
|
||||
logger.warn("XEN PVM shutdown() required %d lvremove executions." % lvmRemoveCount)
|
||||
|
||||
self._netif.clear()
|
||||
del self.session
|
||||
|
@ -314,117 +299,124 @@ class XenNode(PyCoreNode):
|
|||
self.lock.release()
|
||||
|
||||
def createlogicalvolume(self):
|
||||
''' Create a logical volume for this Xen domU. Called from startup().
|
||||
'''
|
||||
"""
|
||||
Create a logical volume for this Xen domU. Called from startup().
|
||||
"""
|
||||
if os.path.exists(self.lvpath):
|
||||
raise Exception, "LVM volume already exists"
|
||||
mutecheck_call([LVCREATE_PATH, '--size', self.disksize,
|
||||
'--name', self.lvname, self.vgname])
|
||||
utils.mutecheck_call([LVCREATE_PATH, '--size', self.disksize,
|
||||
'--name', self.lvname, self.vgname])
|
||||
|
||||
def createpartitions(self):
|
||||
''' Partition the LVM volume into persistent and swap partitions
|
||||
using the parted module.
|
||||
'''
|
||||
"""
|
||||
Partition the LVM volume into persistent and swap partitions
|
||||
using the parted module.
|
||||
"""
|
||||
dev = parted.Device(path=self.lvpath)
|
||||
dev.removeFromCache()
|
||||
disk = parted.freshDisk(dev, 'msdos')
|
||||
constraint = parted.Constraint(device=dev)
|
||||
persist_size = int(0.75 * constraint.maxSize);
|
||||
persist_size = int(0.75 * constraint.maxSize)
|
||||
self.createpartition(device=dev, disk=disk, start=1,
|
||||
end=(persist_size - 1) , type="ext4")
|
||||
end=persist_size - 1, type="ext4")
|
||||
self.createpartition(device=dev, disk=disk, start=persist_size,
|
||||
end=(constraint.maxSize - 1) , type="linux-swap(v1)")
|
||||
end=constraint.maxSize - 1, type="linux-swap(v1)")
|
||||
disk.commit()
|
||||
|
||||
def createpartition(self, device, disk, start, end, type):
|
||||
''' Create a single partition of the specified type and size and add
|
||||
it to the disk object, using the parted module.
|
||||
'''
|
||||
"""
|
||||
Create a single partition of the specified type and size and add
|
||||
it to the disk object, using the parted module.
|
||||
"""
|
||||
geo = parted.Geometry(device=device, start=start, end=end)
|
||||
fs = parted.FileSystem(type=type, geometry=geo)
|
||||
part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL,
|
||||
geometry=geo)
|
||||
part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL, geometry=geo)
|
||||
constraint = parted.Constraint(exactGeom=geo)
|
||||
disk.addPartition(partition=part, constraint=constraint)
|
||||
|
||||
def createfilesystems(self):
|
||||
''' Make an ext4 filesystem and swap space. Return the device name for
|
||||
the persistent partition so we can mount it.
|
||||
'''
|
||||
"""
|
||||
Make an ext4 filesystem and swap space. Return the device name for
|
||||
the persistent partition so we can mount it.
|
||||
"""
|
||||
output = subprocess.Popen([KPARTX_PATH, '-l', self.lvpath],
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
lines = output.splitlines()
|
||||
persistdev = '/dev/mapper/' + lines[0].strip().split(' ')[0].strip()
|
||||
swapdev = '/dev/mapper/' + lines[1].strip().split(' ')[0].strip()
|
||||
check_call([KPARTX_PATH, '-a', self.lvpath])
|
||||
mutecheck_call([MKFSEXT4_PATH, '-L', 'persist', persistdev])
|
||||
mutecheck_call([MKSWAP_PATH, '-f', '-L', 'swap', swapdev])
|
||||
subprocess.check_call([KPARTX_PATH, '-a', self.lvpath])
|
||||
utils.mutecheck_call([MKFSEXT4_PATH, '-L', 'persist', persistdev])
|
||||
utils.mutecheck_call([MKSWAP_PATH, '-f', '-L', 'swap', swapdev])
|
||||
return persistdev
|
||||
|
||||
def untarpersistent(self, tarname, iso):
|
||||
''' Unpack a persistent template tar file to the mounted mount dir.
|
||||
Uses fsimage library to read from an ISO file.
|
||||
'''
|
||||
tarname = tarname.replace('%h', self.name) # filename may use hostname
|
||||
"""
|
||||
Unpack a persistent template tar file to the mounted mount dir.
|
||||
Uses fsimage library to read from an ISO file.
|
||||
"""
|
||||
tarname = tarname.replace('%h', self.name) # filename may use hostname
|
||||
if iso:
|
||||
try:
|
||||
fs = fsimage.open(self.isofile, 0)
|
||||
except IOError, e:
|
||||
self.warn("Failed to open ISO file: %s (%s)" % (self.isofile,e))
|
||||
except IOError:
|
||||
logger.exception("Failed to open ISO file: %s", self.isofile)
|
||||
return
|
||||
try:
|
||||
tardata = fs.open_file(tarname).read();
|
||||
except IOError, e:
|
||||
self.warn("Failed to open tar file: %s (%s)" % (tarname, e))
|
||||
tardata = fs.open_file(tarname).read()
|
||||
except IOError:
|
||||
logger.exception("Failed to open tar file: %s", tarname)
|
||||
return
|
||||
finally:
|
||||
del fs;
|
||||
del fs
|
||||
else:
|
||||
try:
|
||||
f = open(tarname)
|
||||
tardata = f.read()
|
||||
f.close()
|
||||
except IOError, e:
|
||||
self.warn("Failed to open tar file: %s (%s)" % (tarname, e))
|
||||
except IOError:
|
||||
logger.exception("Failed to open tar file: %s", tarname)
|
||||
return
|
||||
p = subprocess.Popen([TAR_PATH, '-C', self.mountdir, '--numeric-owner',
|
||||
'-xf', '-'], stdin=subprocess.PIPE)
|
||||
'-xf', '-'], stdin=subprocess.PIPE)
|
||||
p.communicate(input=tardata)
|
||||
p.wait()
|
||||
|
||||
def setrootpassword(self, pw):
|
||||
''' Set the root password by updating the shadow password file that
|
||||
is on the filesystem mounted in the temporary area.
|
||||
'''
|
||||
saltedpw = crypt.crypt(pw, '$6$'+base64.b64encode(os.urandom(12)))
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
'/^root:/s_^root:\([^:]*\):_root:' + saltedpw + ':_',
|
||||
os.path.join(self.mountdir, self.etcdir, 'shadow')])
|
||||
"""
|
||||
Set the root password by updating the shadow password file that
|
||||
is on the filesystem mounted in the temporary area.
|
||||
"""
|
||||
saltedpw = crypt.crypt(pw, '$6$' + base64.b64encode(os.urandom(12)))
|
||||
subprocess.check_call([SED_PATH, '-i', '-e',
|
||||
'/^root:/s_^root:\([^:]*\):_root:' + saltedpw + ':_',
|
||||
os.path.join(self.mountdir, self.etcdir, 'shadow')])
|
||||
|
||||
def sethostname(self, old, new):
|
||||
''' Set the hostname by updating the hostname and hosts files that
|
||||
reside on the filesystem mounted in the temporary area.
|
||||
'''
|
||||
check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hostname')])
|
||||
check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hosts')])
|
||||
"""
|
||||
Set the hostname by updating the hostname and hosts files that
|
||||
reside on the filesystem mounted in the temporary area.
|
||||
"""
|
||||
subprocess.check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hostname')])
|
||||
subprocess.check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hosts')])
|
||||
|
||||
def setupssh(self, keypath):
|
||||
''' Configure SSH access by installing host keys and a system-wide
|
||||
authorized_keys file.
|
||||
'''
|
||||
"""
|
||||
Configure SSH access by installing host keys and a system-wide
|
||||
authorized_keys file.
|
||||
"""
|
||||
sshdcfg = os.path.join(self.mountdir, self.etcdir, 'ssh/sshd_config')
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/PermitRootLogin no/PermitRootLogin yes/', sshdcfg])
|
||||
subprocess.check_call([SED_PATH, '-i', '-e',
|
||||
's/PermitRootLogin no/PermitRootLogin yes/', sshdcfg])
|
||||
sshdir = os.path.join(self.getconfigitem('mount_path'), self.etcdir,
|
||||
'ssh')
|
||||
sshdir = sshdir.replace('/','\\/') # backslash slashes for use in sed
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/' + \
|
||||
'AuthorizedKeysFile ' + sshdir + '\/authorized_keys/',
|
||||
sshdcfg])
|
||||
for f in ('ssh_host_rsa_key','ssh_host_rsa_key.pub','authorized_keys'):
|
||||
sshdir = sshdir.replace('/', '\\/') # backslash slashes for use in sed
|
||||
subprocess.check_call([SED_PATH, '-i', '-e',
|
||||
's/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/' + \
|
||||
'AuthorizedKeysFile ' + sshdir + '\/authorized_keys/',
|
||||
sshdcfg])
|
||||
for f in 'ssh_host_rsa_key', 'ssh_host_rsa_key.pub', 'authorized_keys':
|
||||
src = os.path.join(keypath, f)
|
||||
dst = os.path.join(self.mountdir, self.etcdir, 'ssh', f)
|
||||
shutil.copy(src, dst)
|
||||
|
@ -432,10 +424,11 @@ class XenNode(PyCoreNode):
|
|||
os.chmod(dst, 0600)
|
||||
|
||||
def createvm(self):
|
||||
''' Instantiate a *paused* domU VM
|
||||
Instantiate it now, so we can add network interfaces,
|
||||
pause it so we can have the filesystem open for configuration.
|
||||
'''
|
||||
"""
|
||||
Instantiate a *paused* domU VM
|
||||
Instantiate it now, so we can add network interfaces,
|
||||
pause it so we can have the filesystem open for configuration.
|
||||
"""
|
||||
args = [XM_PATH, 'create', os.devnull, '--paused']
|
||||
args.extend(['name=' + self.vmname, 'memory=' + str(self.ramsize)])
|
||||
args.append('disk=tap:aio:' + self.isofile + ',hda,r')
|
||||
|
@ -446,110 +439,109 @@ class XenNode(PyCoreNode):
|
|||
for action in ('poweroff', 'reboot', 'suspend', 'crash', 'halt'):
|
||||
args.append('on_%s=destroy' % action)
|
||||
args.append('extra=' + self.getconfigitem('xm_create_extra'))
|
||||
mutecheck_call(args)
|
||||
utils.mutecheck_call(args)
|
||||
|
||||
# from class LxcNode
|
||||
def privatedir(self, path):
|
||||
#self.warn("XEN PVM privatedir() called")
|
||||
# self.warn("XEN PVM privatedir() called")
|
||||
# Do nothing, Xen PVM nodes are fully private
|
||||
pass
|
||||
|
||||
# from class LxcNode
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
self.warn("XEN PVM opennodefile() called")
|
||||
raise Exception, "Can't open VM file with opennodefile()"
|
||||
def opennodefile(self, filename, mode="w"):
|
||||
logger.warn("XEN PVM opennodefile() called")
|
||||
raise Exception("Can't open VM file with opennodefile()")
|
||||
|
||||
# from class LxcNode
|
||||
# open a file on a paused Xen node
|
||||
def openpausednodefile(self, filename, mode = "w"):
|
||||
def openpausednodefile(self, filename, mode="w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
#dirname = dirname.replace("/", ".")
|
||||
# dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
os.makedirs(dirname, mode=0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
# from class LxcNode
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
def nodefile(self, filename, contents, mode=0644):
|
||||
if filename in self.FilesToIgnore:
|
||||
#self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename])
|
||||
# self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename])
|
||||
return
|
||||
|
||||
if filename in self.FilesRedirection:
|
||||
redirFilename = self.FilesRedirection[filename]
|
||||
self.warn("XEN PVM nodefile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
logger.warn("XEN PVM nodefile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
filename = redirFilename
|
||||
|
||||
self.warn("XEN PVM nodefile(filename=%s) called" % [filename])
|
||||
|
||||
logger.warn("XEN PVM nodefile(filename=%s) called" % [filename])
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM disk isn't ready"
|
||||
return
|
||||
raise Exception("Can't access VM file as VM disk isn't ready")
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM is already running"
|
||||
return
|
||||
raise Exception("Can't access VM file as VM is already running")
|
||||
|
||||
try:
|
||||
f = self.openpausednodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def alive(self):
|
||||
# is VM running?
|
||||
return False # XXX
|
||||
return False # XXX
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
def cmd(self, args, wait=True):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
if cmdAsString in self.CmdsToIgnore:
|
||||
#self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
# self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return 0
|
||||
if cmdAsString in self.CmdsRedirection:
|
||||
self.CmdsRedirection[cmdAsString](self)
|
||||
return 0
|
||||
|
||||
self.warn("XEN PVM cmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return 0
|
||||
logger("XEN PVM cmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return 0
|
||||
|
||||
def cmdresult(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
if cmdAsString in self.CmdsToIgnore:
|
||||
#self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return (0, "")
|
||||
self.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return (0, "")
|
||||
# self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return 0, ""
|
||||
logger.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return 0, ""
|
||||
|
||||
def popen(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
self.warn("XEN PVM popen(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
logger.warn("XEN PVM popen(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return
|
||||
|
||||
def icmd(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
self.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
logger.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
self.warn("XEN PVM term() called, but not yet implemented")
|
||||
def term(self, sh="/bin/sh"):
|
||||
logger.warn("XEN PVM term() called, but not yet implemented")
|
||||
return
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' We may add 'sudo' to the command string because the GUI runs as a
|
||||
normal user. Use SSH if control interface is available, otherwise
|
||||
use Xen console with a keymapping for easy login.
|
||||
'''
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
We may add 'sudo' to the command string because the GUI runs as a
|
||||
normal user. Use SSH if control interface is available, otherwise
|
||||
use Xen console with a keymapping for easy login.
|
||||
"""
|
||||
controlifc = None
|
||||
for ifc in self.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
|
@ -561,33 +553,22 @@ class XenNode(PyCoreNode):
|
|||
controlip = controlifc.addrlist[0].split('/')[0]
|
||||
cmd += "-e ssh root@%s" % controlip
|
||||
return cmd
|
||||
# otherwise use 'xm console'
|
||||
#pw = self.getconfigitem('root_password')
|
||||
#cmd += "-xrm 'XTerm*VT100.translations: #override <Key>F1: "
|
||||
#cmd += "string(\"root\\n\") \\n <Key>F2: string(\"%s\\n\")' " % pw
|
||||
# otherwise use 'xm console'
|
||||
# pw = self.getconfigitem('root_password')
|
||||
# cmd += "-xrm 'XTerm*VT100.translations: #override <Key>F1: "
|
||||
# cmd += "string(\"root\\n\") \\n <Key>F2: string(\"%s\\n\")' " % pw
|
||||
cmd += "-e sudo %s console %s" % (XM_PATH, self.vmname)
|
||||
return cmd
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
self.warn("XEN PVM shcmd(args=[%s]) called, but not yet implemented" % cmdstr)
|
||||
def shcmd(self, cmdstr, sh="/bin/sh"):
|
||||
logger("XEN PVM shcmd(args=[%s]) called, but not yet implemented" % cmdstr)
|
||||
return
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def info(self, msg):
|
||||
if self.verbose:
|
||||
print "%s: %s" % (self.name, msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def warn(self, msg):
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def mount(self, source, target):
|
||||
self.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
logger.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
|
||||
def umount(self, target):
|
||||
self.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
logger.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
|
||||
def newifindex(self):
|
||||
self.lock.acquire()
|
||||
|
@ -607,16 +588,16 @@ class XenNode(PyCoreNode):
|
|||
return -1
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
self.warn("XEN PVM addnetif() called")
|
||||
logger.warn("XEN PVM addnetif() called")
|
||||
PyCoreNode.addnetif(self, netif, ifindex)
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
self.warn("XEN PVM delnetif() called")
|
||||
logger.warn("XEN PVM delnetif() called")
|
||||
PyCoreNode.delnetif(self, ifindex)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None, hwaddr = None):
|
||||
self.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
def newveth(self, ifindex=None, ifname=None, net=None, hwaddr=None):
|
||||
logger.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
|
||||
self.lock.acquire()
|
||||
try:
|
||||
|
@ -624,12 +605,12 @@ class XenNode(PyCoreNode):
|
|||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
sessionid = self.session.short_session_id()
|
||||
name = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
localname = "n%s.%s.%s" % (self.objid, ifname, sessionid)
|
||||
ifclass = XenVEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, hwaddr = hwaddr)
|
||||
veth = ifclass(node=self, name=name, localname=localname,
|
||||
mtu=1500, net=net, hwaddr=hwaddr)
|
||||
|
||||
veth.name = ifname
|
||||
try:
|
||||
|
@ -642,14 +623,14 @@ class XenNode(PyCoreNode):
|
|||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newtuntap(self, ifindex = None, ifname = None, net = None):
|
||||
self.warn("XEN PVM newtuntap() called but not implemented")
|
||||
def newtuntap(self, ifindex=None, ifname=None, net=None):
|
||||
logger.warn("XEN PVM newtuntap() called but not implemented")
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
pass
|
||||
#self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex),
|
||||
# self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex),
|
||||
# "address", str(addr)])
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
|
@ -663,49 +644,49 @@ class XenNode(PyCoreNode):
|
|||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
logger.exception("trying to delete unknown address: %s", addr)
|
||||
|
||||
if self.up:
|
||||
pass
|
||||
# self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
# "dev", self.ifname(ifindex)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def delalladdr(self, ifindex, addrtypes=valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan=True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
self.getaddr(self.ifname(ifindex), rescan=True)
|
||||
|
||||
# Xen PVM relies on boot process to bring up links
|
||||
#def ifup(self, ifindex):
|
||||
# def ifup(self, ifindex):
|
||||
# if self.up:
|
||||
# self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
def newnetif(self, net=None, addrlist=[], hwaddr=None,
|
||||
ifindex=None, ifname=None):
|
||||
logger.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
|
||||
self.lock.acquire()
|
||||
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access add veth as VM disk isn't ready"
|
||||
return
|
||||
raise Exception("Can't access add veth as VM disk isn't ready")
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access add veth as VM is already running"
|
||||
return
|
||||
raise Exception("Can't access add veth as VM is already running")
|
||||
|
||||
try:
|
||||
if isinstance(net, EmaneNode):
|
||||
raise Exception, "Xen PVM doesn't yet support Emane nets"
|
||||
if nodeutils.is_node(net, NodeTypes.EMANE):
|
||||
raise Exception("Xen PVM doesn't yet support Emane nets")
|
||||
|
||||
# ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
|
||||
# net = net)
|
||||
|
@ -720,8 +701,8 @@ class XenNode(PyCoreNode):
|
|||
# netif.addaddr(addr)
|
||||
# return ifindex
|
||||
else:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net, hwaddr = hwaddr)
|
||||
ifindex = self.newveth(ifindex=ifindex, ifname=ifname,
|
||||
net=net, hwaddr=hwaddr)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
|
||||
|
@ -729,24 +710,27 @@ class XenNode(PyCoreNode):
|
|||
self.etcdir,
|
||||
'udev/rules.d/70-persistent-net.rules')
|
||||
f = self.openpausednodefile(rulefile, "a")
|
||||
f.write('\n# Xen PVM virtual interface #%s %s with MAC address %s\n' % (ifindex, self.ifname(ifindex), hwaddr))
|
||||
f.write(
|
||||
'\n# Xen PVM virtual interface #%s %s with MAC address %s\n' % (ifindex, self.ifname(ifindex), hwaddr))
|
||||
# Using MAC address as we're now loading PVM net driver "early"
|
||||
# OLD: Would like to use MAC address, but udev isn't working with paravirtualized NICs. Perhaps the "set hw address" isn't triggering a rescan.
|
||||
f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % (hwaddr, self.ifname(ifindex)))
|
||||
#f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n' % (ifindex, self.ifname(ifindex)))
|
||||
f.write(
|
||||
'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % (
|
||||
hwaddr, self.ifname(ifindex)))
|
||||
# f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n' % (ifindex, self.ifname(ifindex)))
|
||||
f.close()
|
||||
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
for addr in utils.maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
#self.ifup(ifindex)
|
||||
# self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def connectnode(self, ifname, othernode, otherifname):
|
||||
self.warn("XEN PVM connectnode() called")
|
||||
logger.warn("XEN PVM connectnode() called")
|
||||
|
||||
# tmplen = 8
|
||||
# tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
|
@ -769,21 +753,19 @@ class XenNode(PyCoreNode):
|
|||
self.lock.acquire()
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM disk isn't ready"
|
||||
return
|
||||
raise Exception("Can't access VM file as VM disk isn't ready")
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM is already running"
|
||||
return
|
||||
raise Exception("Can't access VM file as VM is already running")
|
||||
|
||||
if filename in self.FilesToIgnore:
|
||||
#self.warn("XEN PVM addfile(filename=%s) ignored" % [filename])
|
||||
# self.warn("XEN PVM addfile(filename=%s) ignored" % [filename])
|
||||
return
|
||||
|
||||
if filename in self.FilesRedirection:
|
||||
redirFilename = self.FilesRedirection[filename]
|
||||
self.warn("XEN PVM addfile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
logger.warn("XEN PVM addfile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
filename = redirFilename
|
||||
|
||||
try:
|
||||
|
@ -795,24 +777,24 @@ class XenNode(PyCoreNode):
|
|||
fout.write(contents)
|
||||
os.chmod(fout.name, mode)
|
||||
fout.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (fout.name, mode))
|
||||
logger.info("created nodefile: '%s'; mode: 0%o" % (fout.name, mode))
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
self.warn("XEN PVM addfile(filename=%s) called" % [filename])
|
||||
logger.warn("XEN PVM addfile(filename=%s) called" % [filename])
|
||||
|
||||
#shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
# shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
# (filename, srcname, filename)
|
||||
#self.shcmd(shcmd)
|
||||
# self.shcmd(shcmd)
|
||||
|
||||
def unmount_all(self, path):
|
||||
''' Namespaces inherit the host mounts, so we need to ensure that all
|
||||
namespaces have unmounted our temporary mount area so that the
|
||||
kpartx command will succeed.
|
||||
'''
|
||||
"""
|
||||
Namespaces inherit the host mounts, so we need to ensure that all
|
||||
namespaces have unmounted our temporary mount area so that the
|
||||
kpartx command will succeed.
|
||||
"""
|
||||
# Session.bootnodes() already has self.session._objslock
|
||||
for o in self.session.objs():
|
||||
for o in self.session.objects.itervalues():
|
||||
if not isinstance(o, LxcNode):
|
||||
continue
|
||||
o.umount(path)
|
||||
|
||||
|
|
|
@ -1,11 +1,4 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
"""
|
||||
xenconfig.py: Implementation of the XenConfigManager class for managing
|
||||
configurable items for XenNodes.
|
||||
|
||||
|
@ -17,72 +10,93 @@ Node type config: XenConfigManager.configs[0] = (type='mytype', values)
|
|||
All nodes of this type have this config.
|
||||
Node-specific config: XenConfigManager.configs[nodenumber] = (type, values)
|
||||
The node having this specific number has this config.
|
||||
'''
|
||||
"""
|
||||
|
||||
import sys, os, threading, subprocess, time, string
|
||||
import ConfigParser
|
||||
from xml.dom.minidom import parseString, Document
|
||||
from core.constants import *
|
||||
import os
|
||||
import string
|
||||
|
||||
from core import constants
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.conf import Configurable
|
||||
from core.conf import ConfigurableManager
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
from core.enumerations import ConfigTlvs
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import log
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class XenConfigManager(ConfigurableManager):
|
||||
''' Xen controller object. Lives in a Session instance and is used for
|
||||
building Xen profiles.
|
||||
'''
|
||||
_name = "xen"
|
||||
_type = coreapi.CORE_TLV_REG_EMULSRV
|
||||
|
||||
"""
|
||||
Xen controller object. Lives in a Session instance and is used for
|
||||
building Xen profiles.
|
||||
"""
|
||||
name = "xen"
|
||||
config_type = RegisterTlvs.EMULATION_SERVER.value
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.default_config = XenDefaultConfig(session, objid=None)
|
||||
"""
|
||||
Creates a XenConfigManager instance.
|
||||
|
||||
:param core.session.Session session: session this manager is tied to
|
||||
:return: nothing
|
||||
"""
|
||||
ConfigurableManager.__init__(self)
|
||||
self.default_config = XenDefaultConfig(session, object_id=None)
|
||||
self.loadconfigfile()
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
''' add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
'''
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
"""
|
||||
add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
"""
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.setconfig(self, nodenum, conftype, values)
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
''' get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied; if conftype
|
||||
is None then we return a match on any conftype.
|
||||
'''
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.getconfig(self, nodenum, conftype,
|
||||
defaultvalues)
|
||||
"""
|
||||
get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied; if conftype
|
||||
is None then we return a match on any conftype.
|
||||
"""
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.getconfig(self, nodenum, conftype, defaultvalues)
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
''' remove configuration values for a node
|
||||
'''
|
||||
"""
|
||||
remove configuration values for a node
|
||||
"""
|
||||
ConfigurableManager.clearconfig(self, nodenum)
|
||||
if 0 in self.configs:
|
||||
self.configs.pop(0)
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configuration messages for global Xen config.
|
||||
'''
|
||||
return self.default_config.configure(self, msg)
|
||||
def configure(self, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for global Xen config.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
return self.default_config.configure(self, config_data)
|
||||
|
||||
def loadconfigfile(self, filename=None):
|
||||
''' Load defaults from the /etc/core/xen.conf file into dict object.
|
||||
'''
|
||||
"""
|
||||
Load defaults from the /etc/core/xen.conf file into dict object.
|
||||
"""
|
||||
if filename is None:
|
||||
filename = os.path.join(CORE_CONF_DIR, 'xen.conf')
|
||||
filename = os.path.join(constants.CORE_CONF_DIR, 'xen.conf')
|
||||
cfg = ConfigParser.SafeConfigParser()
|
||||
if filename not in cfg.read(filename):
|
||||
self.session.warn("unable to read Xen config file: %s" % filename)
|
||||
logger.warn("unable to read Xen config file: %s" % filename)
|
||||
return
|
||||
section = "xen"
|
||||
if not cfg.has_section(section):
|
||||
self.session.warn("%s is missing a xen section!" % filename)
|
||||
logger.warn("%s is missing a xen section!" % filename)
|
||||
return
|
||||
self.configfile = dict(cfg.items(section))
|
||||
# populate default config items from config file entries
|
||||
|
@ -92,13 +106,14 @@ class XenConfigManager(ConfigurableManager):
|
|||
if names[i] in self.configfile:
|
||||
vals[i] = self.configfile[names[i]]
|
||||
# this sets XenConfigManager.configs[0] = (type='xen', vals)
|
||||
self.setconfig(None, self.default_config._name, vals)
|
||||
self.setconfig(None, self.default_config.name, vals)
|
||||
|
||||
def getconfigitem(self, name, model=None, node=None, value=None):
|
||||
''' Get a config item of the given name, first looking for node-specific
|
||||
configuration, then model specific, and finally global defaults.
|
||||
If a value is supplied, it will override any stored config.
|
||||
'''
|
||||
"""
|
||||
Get a config item of the given name, first looking for node-specific
|
||||
configuration, then model specific, and finally global defaults.
|
||||
If a value is supplied, it will override any stored config.
|
||||
"""
|
||||
if value is not None:
|
||||
return value
|
||||
n = None
|
||||
|
@ -111,8 +126,8 @@ class XenConfigManager(ConfigurableManager):
|
|||
defaultvalues=None)
|
||||
if v is None:
|
||||
# get item from default config for the machine type
|
||||
(t, v) = self.getconfig(nodenum=None,
|
||||
conftype=self.default_config._name,
|
||||
(t, v) = self.getconfig(nodenum=None,
|
||||
conftype=self.default_config.name,
|
||||
defaultvalues=None)
|
||||
|
||||
confignames = self.default_config.getnames()
|
||||
|
@ -124,142 +139,136 @@ class XenConfigManager(ConfigurableManager):
|
|||
if name in self.configfile:
|
||||
return self.configfile[name]
|
||||
else:
|
||||
#self.warn("missing config item '%s'" % name)
|
||||
# logger.warn("missing config item '%s'" % name)
|
||||
return None
|
||||
|
||||
|
||||
class XenConfig(Configurable):
|
||||
''' Manage Xen configuration profiles.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def configure(cls, xen, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Similar to Configurable.configure(), but considers opaque data
|
||||
for indicating node types.
|
||||
'''
|
||||
reply = None
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
"""
|
||||
Manage Xen configuration profiles.
|
||||
"""
|
||||
|
||||
nodetype = objname
|
||||
@classmethod
|
||||
def configure(cls, xen, config_data):
|
||||
"""
|
||||
Handle configuration messages for setting up a model.
|
||||
Similar to Configurable.configure(), but considers opaque data
|
||||
for indicating node types.
|
||||
|
||||
:param xen: xen instance to configure
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
reply = None
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
config_type = config_data.type
|
||||
opaque = config_data.opaque
|
||||
values_str = config_data.data_values
|
||||
|
||||
nodetype = object_name
|
||||
if opaque is not None:
|
||||
opaque_items = opaque.split(':')
|
||||
if len(opaque_items) != 2:
|
||||
xen.warn("xen config: invalid opaque data in conf message")
|
||||
logger.warn("xen config: invalid opaque data in conf message")
|
||||
return None
|
||||
nodetype = opaque_items[1]
|
||||
|
||||
if xen.verbose:
|
||||
xen.info("received configure message for %s" % nodetype)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
if xen.verbose:
|
||||
xen.info("replying to configure request for %s " % nodetype)
|
||||
logger.info("received configure message for %s", nodetype)
|
||||
if config_type == ConfigFlags.REQUEST.value:
|
||||
logger.info("replying to configure request for %s " % nodetype)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if objname == "all":
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
if object_name == "all":
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
else:
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = xen.getconfig(nodenum, nodetype, defaultvalues=None)[1]
|
||||
typeflags = ConfigFlags.NONE.value
|
||||
values = xen.getconfig(node_id, nodetype, defaultvalues=None)[1]
|
||||
if values is None:
|
||||
# get defaults from default "xen" config which includes
|
||||
# settings from both cls._confdefaultvalues and xen.conf
|
||||
# settings from both cls._confdefaultvalues and xen.conf
|
||||
defaults = cls.getdefaultvalues()
|
||||
values = xen.getconfig(nodenum, cls._name, defaults)[1]
|
||||
values = xen.getconfig(node_id, cls.name, defaults)[1]
|
||||
if values is None:
|
||||
return None
|
||||
# reply with config options
|
||||
if nodenum is None:
|
||||
nodenum = 0
|
||||
reply = cls.toconfmsg(0, nodenum, typeflags, nodetype, values)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all":
|
||||
xen.clearconfig(nodenum)
|
||||
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
if node_id is None:
|
||||
node_id = 0
|
||||
reply = cls.config_data(0, node_id, typeflags, nodetype, values)
|
||||
elif config_type == ConfigFlags.RESET.value:
|
||||
if object_name == "all":
|
||||
xen.clearconfig(node_id)
|
||||
# elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the XenNode
|
||||
# object has been created
|
||||
if objname is None:
|
||||
xen.info("no configuration object for node %s" % nodenum)
|
||||
if object_name is None:
|
||||
logger.info("no configuration object for node %s" % node_id)
|
||||
return None
|
||||
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
defaults = cls.getdefaultvalues()
|
||||
values = xen.getconfig(nodenum, cls._name, defaults)[1]
|
||||
values = xen.getconfig(node_id, cls.name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
xen.setconfig(nodenum, nodetype, values)
|
||||
xen.setconfig(node_id, nodetype, values)
|
||||
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def toconfmsg(cls, flags, nodenum, typeflags, nodetype, values):
|
||||
''' Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
'''
|
||||
def config_data(cls, flags, node_id, type_flags, nodetype, values):
|
||||
"""
|
||||
Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
"""
|
||||
values_str = string.join(values, '|')
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
cls._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
typeflags)
|
||||
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
values_str)
|
||||
captions = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[4], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
possiblevals = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[3], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if cls._bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
|
||||
cls._bitmap)
|
||||
if cls._confgroups is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
cls._confgroups)
|
||||
opaque = "%s:%s" % (cls._name, nodetype)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
|
||||
opaque)
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.NODE.value, node_id)
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, cls.name)
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, type_flags)
|
||||
datatypes = tuple(map(lambda x: x[1], cls.config_matrix))
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.DATA_TYPES.value, datatypes)
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, values_str)
|
||||
captions = reduce(lambda a, b: a + '|' + b, map(lambda x: x[4], cls.config_matrix))
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.CAPTIONS, captions)
|
||||
possiblevals = reduce(lambda a, b: a + '|' + b, map(lambda x: x[3], cls.config_matrix))
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.POSSIBLE_VALUES.value, possiblevals)
|
||||
if cls.bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.BITMAP.value, cls.bitmap)
|
||||
if cls.config_groups is not None:
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.GROUPS.value, cls.config_groups)
|
||||
opaque = "%s:%s" % (cls.name, nodetype)
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OPAQUE.value, opaque)
|
||||
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
|
||||
class XenDefaultConfig(XenConfig):
|
||||
''' Global default Xen configuration options.
|
||||
'''
|
||||
_name = "xen"
|
||||
"""
|
||||
Global default Xen configuration options.
|
||||
"""
|
||||
name = "xen"
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = [
|
||||
('ram_size', coreapi.CONF_DATA_TYPE_STRING, '256', '',
|
||||
config_matrix = [
|
||||
('ram_size', ConfigDataTypes.STRING.value, '256', '',
|
||||
'ram size (MB)'),
|
||||
('disk_size', coreapi.CONF_DATA_TYPE_STRING, '256M', '',
|
||||
('disk_size', ConfigDataTypes.STRING.value, '256M', '',
|
||||
'disk size (use K/M/G suffix)'),
|
||||
('iso_file', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
('iso_file', ConfigDataTypes.STRING.value, '', '',
|
||||
'iso file'),
|
||||
('mount_path', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
('mount_path', ConfigDataTypes.STRING.value, '', '',
|
||||
'mount path'),
|
||||
('etc_path', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
('etc_path', ConfigDataTypes.STRING.value, '', '',
|
||||
'etc path'),
|
||||
('persist_tar_iso', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
('persist_tar_iso', ConfigDataTypes.STRING.value, '', '',
|
||||
'iso persist tar file'),
|
||||
('persist_tar', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
('persist_tar', ConfigDataTypes.STRING.value, '', '',
|
||||
'persist tar file'),
|
||||
('root_password', coreapi.CONF_DATA_TYPE_STRING, 'password', '',
|
||||
('root_password', ConfigDataTypes.STRING.value, 'password', '',
|
||||
'root password'),
|
||||
]
|
||||
|
||||
_confgroups = "domU properties:1-%d" % len(_confmatrix)
|
||||
]
|
||||
|
||||
config_groups = "domU properties:1-%d" % len(config_matrix)
|
||||
|
|
0
daemon/core/xml/__init__.py
Normal file
0
daemon/core/xml/__init__.py
Normal file
|
@ -1,11 +1,18 @@
|
|||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import os
|
||||
import xmlutils
|
||||
|
||||
from core.netns import nodes
|
||||
from core.misc import ipaddr
|
||||
from core import constants
|
||||
from core import emane
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import ipaddress
|
||||
from core.misc import log
|
||||
from core.misc import nodeutils
|
||||
from core.netns import nodes
|
||||
from core.xml import xmlutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class CoreDeploymentWriter(object):
|
||||
def __init__(self, dom, root, session):
|
||||
|
@ -13,7 +20,7 @@ class CoreDeploymentWriter(object):
|
|||
self.root = root
|
||||
self.session = session
|
||||
self.hostname = socket.gethostname()
|
||||
if self.session.emane.version < self.session.emane.EMANE092:
|
||||
if emane.VERSION < emane.EMANE092:
|
||||
self.transport = None
|
||||
self.platform = None
|
||||
|
||||
|
@ -34,11 +41,13 @@ class CoreDeploymentWriter(object):
|
|||
else:
|
||||
# TODO: handle other hosts
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_interface_names(hostname):
|
||||
'''Uses same methodology of get_ipv4_addresses() to get
|
||||
parallel list of interface names to go with ...'''
|
||||
"""
|
||||
Uses same methodology of get_ipv4_addresses() to get
|
||||
parallel list of interface names to go with ...
|
||||
"""
|
||||
if hostname == 'localhost':
|
||||
iface_list = []
|
||||
cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show')
|
||||
|
@ -47,27 +56,25 @@ class CoreDeploymentWriter(object):
|
|||
split = line.split()
|
||||
if not split:
|
||||
continue
|
||||
ifaceName = split[1]
|
||||
interface_name = split[1]
|
||||
addr = split[3]
|
||||
if not addr.startswith('127.'):
|
||||
iface_list.append(ifaceName)
|
||||
iface_list.append(interface_name)
|
||||
return iface_list
|
||||
else:
|
||||
# TODO: handle other hosts
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@staticmethod
|
||||
def find_device(scenario, name):
|
||||
tagName = ('device', 'host', 'router')
|
||||
for d in xmlutils.iterDescendantsWithAttribute(scenario, tagName,
|
||||
'name', name):
|
||||
tag_name = ('device', 'host', 'router')
|
||||
for d in xmlutils.iter_descendants_with_attribute(scenario, tag_name, 'name', name):
|
||||
return d
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def find_interface(device, name):
|
||||
for i in xmlutils.iterDescendantsWithAttribute(device, 'interface',
|
||||
'name', name):
|
||||
for i in xmlutils.iter_descendants_with_attribute(device, 'interface', 'name', name):
|
||||
return i
|
||||
return None
|
||||
|
||||
|
@ -75,9 +82,9 @@ class CoreDeploymentWriter(object):
|
|||
testbed = self.dom.createElement('container')
|
||||
testbed.setAttribute('name', 'TestBed')
|
||||
testbed.setAttribute('id', 'TestBed')
|
||||
self.root.baseEle.appendChild(testbed)
|
||||
self.root.base_element.appendChild(testbed)
|
||||
nodelist = []
|
||||
for obj in self.session.objs():
|
||||
for obj in self.session.objects.itervalues():
|
||||
if isinstance(obj, nodes.PyCoreNode):
|
||||
nodelist.append(obj)
|
||||
name = self.hostname
|
||||
|
@ -86,18 +93,17 @@ class CoreDeploymentWriter(object):
|
|||
testhost = self.add_physical_host(testbed, name, ipv4_addresses, iface_names)
|
||||
for n in nodelist:
|
||||
self.add_virtual_host(testhost, n)
|
||||
# TODO: handle other servers
|
||||
# servers = self.session.broker.getservernames()
|
||||
# servers.remove('localhost')
|
||||
# TODO: handle other servers
|
||||
# servers = self.session.broker.getserverlist()
|
||||
# servers.remove('localhost')
|
||||
|
||||
def add_child_element(self, parent, tagName):
|
||||
el = self.dom.createElement(tagName)
|
||||
def add_child_element(self, parent, tag_name):
|
||||
el = self.dom.createElement(tag_name)
|
||||
parent.appendChild(el)
|
||||
return el
|
||||
|
||||
def add_child_element_with_nameattr(self, parent, tagName,
|
||||
name, setid = True):
|
||||
el = self.add_child_element(parent, tagName)
|
||||
def add_child_element_with_nameattr(self, parent, tag_name, name, setid=True):
|
||||
el = self.add_child_element(parent, tag_name)
|
||||
el.setAttribute('name', name)
|
||||
if setid:
|
||||
el.setAttribute('id', '%s/%s' % (parent.getAttribute('id'), name))
|
||||
|
@ -117,8 +123,7 @@ class CoreDeploymentWriter(object):
|
|||
return el
|
||||
|
||||
def add_platform(self, parent, name):
|
||||
el = self.add_child_element_with_nameattr(parent,
|
||||
'emanePlatform', name)
|
||||
el = self.add_child_element_with_nameattr(parent, 'emanePlatform', name)
|
||||
return el
|
||||
|
||||
def add_transport(self, parent, name):
|
||||
|
@ -130,8 +135,7 @@ class CoreDeploymentWriter(object):
|
|||
return el
|
||||
|
||||
def add_parameter(self, parent, name, val):
|
||||
el = self.add_child_element_with_nameattr(parent, 'parameter',
|
||||
name, False)
|
||||
el = self.add_child_element_with_nameattr(parent, 'parameter', name, False)
|
||||
el.appendChild(self.dom.createTextNode(val))
|
||||
return el
|
||||
|
||||
|
@ -151,53 +155,51 @@ class CoreDeploymentWriter(object):
|
|||
for i in range(0, len(ipv4_addresses)):
|
||||
addr = ipv4_addresses[i]
|
||||
if iface_names:
|
||||
ifaceName = iface_names[i]
|
||||
interface_name = iface_names[i]
|
||||
else:
|
||||
ifaceName = None
|
||||
self.add_address(el, 'IPv4', addr, ifaceName)
|
||||
interface_name = None
|
||||
self.add_address(el, 'IPv4', addr, interface_name)
|
||||
return el
|
||||
|
||||
def add_virtual_host(self, parent, obj):
|
||||
assert isinstance(obj, nodes.PyCoreNode)
|
||||
el = self.add_host(parent, obj.name)
|
||||
device = self.find_device(self.root.baseEle, obj.name)
|
||||
device = self.find_device(self.root.base_element, obj.name)
|
||||
if device is None:
|
||||
self.session.warn('corresponding XML device not found for %s' %
|
||||
(obj.name))
|
||||
logger.warn('corresponding XML device not found for %s' % obj.name)
|
||||
return
|
||||
self.add_mapping(device, 'testHost', el.getAttribute('id'))
|
||||
self.add_type(el, 'virtual')
|
||||
for netif in obj.netifs():
|
||||
for address in netif.addrlist:
|
||||
addr, slash, prefixlen= address.partition('/')
|
||||
if ipaddr.isIPv4Address(addr):
|
||||
addr, slash, prefixlen = address.partition('/')
|
||||
if ipaddress.is_ipv4_address(addr):
|
||||
addr_type = 'IPv4'
|
||||
elif ipaddr.isIPv6Address(addr):
|
||||
elif ipaddress.is_ipv6_address(addr):
|
||||
addr_type = 'IPv6'
|
||||
else:
|
||||
raise NotImplementedError
|
||||
self.add_address(el, addr_type, address, netif.name)
|
||||
if isinstance(netif.net, nodes.EmaneNode):
|
||||
if nodeutils.is_node(netif.net, NodeTypes.EMANE):
|
||||
nem = self.add_emane_interface(parent, el, netif)
|
||||
interface = self.find_interface(device, netif.name)
|
||||
self.add_mapping(interface, 'nem', nem.getAttribute('id'))
|
||||
return el
|
||||
|
||||
def add_emane_interface(self, physical_host, virtual_host, netif,
|
||||
platform_name = 'p1', transport_name = 't1'):
|
||||
def add_emane_interface(self, physical_host, virtual_host, netif, platform_name='p1', transport_name='t1'):
|
||||
nemid = netif.net.nemidmap[netif]
|
||||
if self.session.emane.version < self.session.emane.EMANE092:
|
||||
if emane.VERSION < emane.EMANE092:
|
||||
if self.platform is None:
|
||||
self.platform = \
|
||||
self.add_platform(physical_host, name = platform_name)
|
||||
self.add_platform(physical_host, name=platform_name)
|
||||
platform = self.platform
|
||||
if self.transport is None:
|
||||
self.transport = \
|
||||
self.add_transport(physical_host, name = transport_name)
|
||||
self.add_transport(physical_host, name=transport_name)
|
||||
transport = self.transport
|
||||
else:
|
||||
platform = self.add_platform(virtual_host, name = platform_name)
|
||||
transport = self.add_transport(virtual_host, name = transport_name)
|
||||
platform = self.add_platform(virtual_host, name=platform_name)
|
||||
transport = self.add_transport(virtual_host, name=transport_name)
|
||||
nem_name = 'nem%s' % nemid
|
||||
nem = self.add_nem(platform, nem_name)
|
||||
self.add_parameter(nem, 'nemid', str(nemid))
|
|
@ -1,38 +1,38 @@
|
|||
# CORE
|
||||
# Copyright (c) 2014 The Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
from xml.dom.minidom import parse
|
||||
from xmlutils import getFirstChildByTagName
|
||||
from xmlparser0 import CoreDocumentParser0
|
||||
from xmlparser1 import CoreDocumentParser1
|
||||
|
||||
from core.xml.xmlparser0 import CoreDocumentParser0
|
||||
from core.xml.xmlparser1 import CoreDocumentParser1
|
||||
from core.xml.xmlutils import get_first_child_by_tag_name
|
||||
|
||||
|
||||
class CoreVersionParser(object):
|
||||
DEFAULT_SCENARIO_VERSION = '1.0'
|
||||
|
||||
'''\
|
||||
"""
|
||||
Helper class to check the version of Network Plan document. This
|
||||
simply looks for a "Scenario" element; when present, this
|
||||
indicates a 0.0 version document. The dom member is set in order
|
||||
to prevent parsing a file twice (it can be passed to the
|
||||
appropriate CoreDocumentParser class.)
|
||||
'''
|
||||
def __init__(self, filename, options={}):
|
||||
"""
|
||||
|
||||
DEFAULT_SCENARIO_VERSION = '1.0'
|
||||
|
||||
def __init__(self, filename, options):
|
||||
if 'dom' in options:
|
||||
self.dom = options['dom']
|
||||
else:
|
||||
self.dom = parse(filename)
|
||||
scenario = getFirstChildByTagName(self.dom, 'scenario')
|
||||
scenario = get_first_child_by_tag_name(self.dom, 'scenario')
|
||||
if scenario:
|
||||
version = scenario.getAttribute('version')
|
||||
if not version:
|
||||
version = self.DEFAULT_SCENARIO_VERSION
|
||||
self.version = version
|
||||
elif getFirstChildByTagName(self.dom, 'Scenario'):
|
||||
elif get_first_child_by_tag_name(self.dom, 'Scenario'):
|
||||
self.version = '0.0'
|
||||
else:
|
||||
self.version = 'unknown'
|
||||
|
||||
|
||||
def core_document_parser(session, filename, options):
|
||||
vp = CoreVersionParser(filename, options)
|
||||
if 'dom' not in options:
|
|
@ -1,19 +1,17 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
|
||||
from core.netns import nodes
|
||||
from xml.dom.minidom import parse
|
||||
from xmlutils import *
|
||||
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import log
|
||||
from core.misc import nodeutils
|
||||
from core.service import ServiceManager
|
||||
from core.xml import xmlutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class CoreDocumentParser0(object):
|
||||
def __init__(self, session, filename, options):
|
||||
self.session = session
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.filename = filename
|
||||
if 'dom' in options:
|
||||
# this prevents parsing twice when detecting file versions
|
||||
|
@ -23,17 +21,17 @@ class CoreDocumentParser0(object):
|
|||
self.start = options['start']
|
||||
self.nodecls = options['nodecls']
|
||||
|
||||
self.np = getoneelement(self.dom, "NetworkPlan")
|
||||
self.np = xmlutils.get_one_element(self.dom, "NetworkPlan")
|
||||
if self.np is None:
|
||||
raise ValueError, "missing NetworkPlan!"
|
||||
self.mp = getoneelement(self.dom, "MotionPlan")
|
||||
self.sp = getoneelement(self.dom, "ServicePlan")
|
||||
self.meta = getoneelement(self.dom, "CoreMetaData")
|
||||
|
||||
self.mp = xmlutils.get_one_element(self.dom, "MotionPlan")
|
||||
self.sp = xmlutils.get_one_element(self.dom, "ServicePlan")
|
||||
self.meta = xmlutils.get_one_element(self.dom, "CoreMetaData")
|
||||
|
||||
self.coords = self.getmotiondict(self.mp)
|
||||
# link parameters parsed in parsenets(), applied in parsenodes()
|
||||
self.linkparams = {}
|
||||
|
||||
|
||||
self.parsedefaultservices()
|
||||
self.parseorigin()
|
||||
self.parsenets()
|
||||
|
@ -41,16 +39,11 @@ class CoreDocumentParser0(object):
|
|||
self.parseservices()
|
||||
self.parsemeta()
|
||||
|
||||
|
||||
def warn(self, msg):
|
||||
if self.session:
|
||||
warnstr = "XML parsing '%s':" % (self.filename)
|
||||
self.session.warn("%s %s" % (warnstr, msg))
|
||||
|
||||
def getmotiondict(self, mp):
|
||||
''' Parse a MotionPlan into a dict with node names for keys and coordinates
|
||||
"""
|
||||
Parse a MotionPlan into a dict with node names for keys and coordinates
|
||||
for values.
|
||||
'''
|
||||
"""
|
||||
if mp is None:
|
||||
return {}
|
||||
coords = {}
|
||||
|
@ -70,35 +63,34 @@ class CoreDocumentParser0(object):
|
|||
xyz = map(int, txt.nodeValue.split(','))
|
||||
z = None
|
||||
x, y = xyz[0:2]
|
||||
if (len(xyz) == 3):
|
||||
if len(xyz) == 3:
|
||||
z = xyz[2]
|
||||
coords[nodename] = (x, y, z)
|
||||
return coords
|
||||
|
||||
@staticmethod
|
||||
def getcommonattributes(obj):
|
||||
''' Helper to return tuple of attributes common to nodes and nets.
|
||||
'''
|
||||
"""
|
||||
Helper to return tuple of attributes common to nodes and nets.
|
||||
"""
|
||||
id = int(obj.getAttribute("id"))
|
||||
name = str(obj.getAttribute("name"))
|
||||
type = str(obj.getAttribute("type"))
|
||||
return(id, name, type)
|
||||
|
||||
return id, name, type
|
||||
|
||||
def parsenets(self):
|
||||
linkednets = []
|
||||
for net in self.np.getElementsByTagName("NetworkDefinition"):
|
||||
id, name, type = self.getcommonattributes(net)
|
||||
nodecls = xmltypetonodeclass(self.session, type)
|
||||
nodecls = xmlutils.xml_type_to_node_class(self.session, type)
|
||||
if not nodecls:
|
||||
self.warn("skipping unknown network node '%s' type '%s'" % \
|
||||
(name, type))
|
||||
logger.warn("skipping unknown network node '%s' type '%s'", name, type)
|
||||
continue
|
||||
n = self.session.addobj(cls = nodecls, objid = id, name = name,
|
||||
start = self.start)
|
||||
n = self.session.add_object(cls=nodecls, objid=id, name=name, start=self.start)
|
||||
if name in self.coords:
|
||||
x, y, z = self.coords[name]
|
||||
n.setposition(x, y, z)
|
||||
getparamssetattrs(net, ("icon", "canvas", "opaque"), n)
|
||||
xmlutils.get_params_set_attrs(net, ("icon", "canvas", "opaque"), n)
|
||||
if hasattr(n, "canvas") and n.canvas is not None:
|
||||
n.canvas = int(n.canvas)
|
||||
# links between two nets (e.g. switch-switch)
|
||||
|
@ -108,12 +100,11 @@ class CoreDocumentParser0(object):
|
|||
linkednets.append((n, netid, ifcname))
|
||||
self.parsemodels(net, n)
|
||||
# link networks together now that they all have been parsed
|
||||
for (n, netid, ifcname) in linkednets:
|
||||
for n, netid, ifcname in linkednets:
|
||||
try:
|
||||
n2 = n.session.objbyname(netid)
|
||||
n2 = n.session.get_object_by_name(netid)
|
||||
except KeyError:
|
||||
n.warn("skipping net %s interface: unknown net %s" % \
|
||||
(n.name, netid))
|
||||
logger.warn("skipping net %s interface: unknown net %s", n.name, netid)
|
||||
continue
|
||||
upstream = False
|
||||
netif = n.getlinknetif(n2)
|
||||
|
@ -122,87 +113,87 @@ class CoreDocumentParser0(object):
|
|||
else:
|
||||
netif.swapparams('_params_up')
|
||||
upstream = True
|
||||
key = (n2.name, ifcname)
|
||||
key = (n2.name, ifcname)
|
||||
if key in self.linkparams:
|
||||
for (k, v) in self.linkparams[key]:
|
||||
for k, v in self.linkparams[key]:
|
||||
netif.setparam(k, v)
|
||||
if upstream:
|
||||
netif.swapparams('_params_up')
|
||||
|
||||
|
||||
def parsenodes(self):
|
||||
for node in self.np.getElementsByTagName("Node"):
|
||||
id, name, type = self.getcommonattributes(node)
|
||||
if type == "rj45":
|
||||
nodecls = nodes.RJ45Node
|
||||
nodecls = nodeutils.get_node_class(NodeTypes.RJ45)
|
||||
else:
|
||||
nodecls = self.nodecls
|
||||
n = self.session.addobj(cls = nodecls, objid = id, name = name,
|
||||
start = self.start)
|
||||
n = self.session.add_object(cls=nodecls, objid=id, name=name, start=self.start)
|
||||
if name in self.coords:
|
||||
x, y, z = self.coords[name]
|
||||
n.setposition(x, y, z)
|
||||
n.type = type
|
||||
getparamssetattrs(node, ("icon", "canvas", "opaque"), n)
|
||||
xmlutils.get_params_set_attrs(node, ("icon", "canvas", "opaque"), n)
|
||||
if hasattr(n, "canvas") and n.canvas is not None:
|
||||
n.canvas = int(n.canvas)
|
||||
for ifc in node.getElementsByTagName("interface"):
|
||||
self.parseinterface(n, ifc)
|
||||
|
||||
|
||||
def parseinterface(self, n, ifc):
|
||||
''' Parse a interface block such as:
|
||||
"""
|
||||
Parse a interface block such as:
|
||||
<interface name="eth0" net="37278">
|
||||
<address type="mac">00:00:00:aa:00:01</address>
|
||||
<address>10.0.0.2/24</address>
|
||||
<address>2001::2/64</address>
|
||||
</interface>
|
||||
'''
|
||||
"""
|
||||
name = str(ifc.getAttribute("name"))
|
||||
netid = str(ifc.getAttribute("net"))
|
||||
hwaddr = None
|
||||
addrlist = []
|
||||
try:
|
||||
net = n.session.objbyname(netid)
|
||||
net = n.session.get_object_by_name(netid)
|
||||
except KeyError:
|
||||
n.warn("skipping node %s interface %s: unknown net %s" % \
|
||||
(n.name, name, netid))
|
||||
logger.warn("skipping node %s interface %s: unknown net %s", n.name, name, netid)
|
||||
return
|
||||
for addr in ifc.getElementsByTagName("address"):
|
||||
addrstr = gettextchild(addr)
|
||||
addrstr = xmlutils.get_text_child(addr)
|
||||
if addrstr is None:
|
||||
continue
|
||||
if addr.getAttribute("type") == "mac":
|
||||
hwaddr = addrstr
|
||||
else:
|
||||
addrlist.append(addrstr)
|
||||
i = n.newnetif(net, addrlist = addrlist, hwaddr = hwaddr,
|
||||
ifindex = None, ifname = name)
|
||||
i = n.newnetif(net, addrlist=addrlist, hwaddr=hwaddr, ifindex=None, ifname=name)
|
||||
for model in ifc.getElementsByTagName("model"):
|
||||
self.parsemodel(model, n, n.objid)
|
||||
key = (n.name, name)
|
||||
if key in self.linkparams:
|
||||
netif = n.netif(i)
|
||||
for (k, v) in self.linkparams[key]:
|
||||
for k, v in self.linkparams[key]:
|
||||
netif.setparam(k, v)
|
||||
|
||||
|
||||
def parsemodels(self, dom, obj):
|
||||
''' Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
"""
|
||||
Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
config dict.
|
||||
'''
|
||||
"""
|
||||
nodenum = int(dom.getAttribute("id"))
|
||||
for model in dom.getElementsByTagName("model"):
|
||||
self.parsemodel(model, obj, nodenum)
|
||||
|
||||
|
||||
def parsemodel(self, model, obj, nodenum):
|
||||
''' Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
"""
|
||||
Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
config dict.
|
||||
'''
|
||||
"""
|
||||
name = model.getAttribute("name")
|
||||
if name == '':
|
||||
return
|
||||
type = model.getAttribute("type")
|
||||
# convert child text nodes into key=value pairs
|
||||
kvs = gettextelementstolist(model)
|
||||
|
||||
kvs = xmlutils.get_text_elements_to_list(model)
|
||||
|
||||
mgr = self.session.mobility
|
||||
# TODO: the session.confobj() mechanism could be more generic;
|
||||
# it only allows registering Conf Message callbacks, but here
|
||||
|
@ -218,24 +209,24 @@ class CoreDocumentParser0(object):
|
|||
# TODO: assign other config managers here
|
||||
if mgr:
|
||||
mgr.setconfig_keyvalues(nodenum, name, kvs)
|
||||
|
||||
|
||||
def parsenetem(self, model, obj, kvs):
|
||||
''' Determine interface and invoke setparam() using the parsed
|
||||
"""
|
||||
Determine interface and invoke setparam() using the parsed
|
||||
(key, value) pairs.
|
||||
'''
|
||||
"""
|
||||
ifname = model.getAttribute("netif")
|
||||
peer = model.getAttribute("peer")
|
||||
key = (peer, ifname)
|
||||
# nodes and interfaces do not exist yet, at this point of the parsing,
|
||||
# save (key, value) pairs for later
|
||||
try:
|
||||
#kvs = map(lambda(k, v): (int(v)), kvs)
|
||||
# kvs = map(lambda(k, v): (int(v)), kvs)
|
||||
kvs = map(self.numericvalue, kvs)
|
||||
except ValueError:
|
||||
self.warn("error parsing link parameters for '%s' on '%s'" % \
|
||||
(ifname, peer))
|
||||
logger.warn("error parsing link parameters for '%s' on '%s'", ifname, peer)
|
||||
self.linkparams[key] = kvs
|
||||
|
||||
|
||||
@staticmethod
|
||||
def numericvalue(keyvalue):
|
||||
(key, value) = keyvalue
|
||||
|
@ -243,18 +234,19 @@ class CoreDocumentParser0(object):
|
|||
value = float(value)
|
||||
else:
|
||||
value = int(value)
|
||||
return (key, value)
|
||||
return key, value
|
||||
|
||||
def parseorigin(self):
|
||||
''' Parse any origin tag from the Mobility Plan and set the CoreLocation
|
||||
reference point appropriately.
|
||||
'''
|
||||
origin = getoneelement(self.mp, "origin")
|
||||
"""
|
||||
Parse any origin tag from the Mobility Plan and set the CoreLocation
|
||||
reference point appropriately.
|
||||
"""
|
||||
origin = xmlutils.get_one_element(self.mp, "origin")
|
||||
if not origin:
|
||||
return
|
||||
location = self.session.location
|
||||
geo = []
|
||||
attrs = ("lat","lon","alt")
|
||||
attrs = ("lat", "lon", "alt")
|
||||
for i in xrange(3):
|
||||
a = origin.getAttribute(attrs[i])
|
||||
if a is not None:
|
||||
|
@ -264,44 +256,44 @@ class CoreDocumentParser0(object):
|
|||
scale = origin.getAttribute("scale100")
|
||||
if scale is not None:
|
||||
location.refscale = float(scale)
|
||||
point = getoneelement(origin, "point")
|
||||
point = xmlutils.get_one_element(origin, "point")
|
||||
if point is not None and point.firstChild is not None:
|
||||
xyz = point.firstChild.nodeValue.split(',')
|
||||
if len(xyz) == 2:
|
||||
xyz.append('0.0')
|
||||
if len(xyz) == 3:
|
||||
xyz = map(lambda(x): float(x), xyz)
|
||||
xyz = map(lambda (x): float(x), xyz)
|
||||
location.refxyz = (xyz[0], xyz[1], xyz[2])
|
||||
|
||||
|
||||
def parsedefaultservices(self):
|
||||
''' Prior to parsing nodes, use session.services manager to store
|
||||
"""
|
||||
Prior to parsing nodes, use session.services manager to store
|
||||
default services for node types
|
||||
'''
|
||||
"""
|
||||
for node in self.sp.getElementsByTagName("Node"):
|
||||
type = node.getAttribute("type")
|
||||
if type == '':
|
||||
continue # node-specific service config
|
||||
continue # node-specific service config
|
||||
services = []
|
||||
for service in node.getElementsByTagName("Service"):
|
||||
services.append(str(service.getAttribute("name")))
|
||||
self.session.services.defaultservices[type] = services
|
||||
self.session.info("default services for type %s set to %s" % \
|
||||
(type, services))
|
||||
|
||||
logger.info("default services for type %s set to %s" % (type, services))
|
||||
|
||||
def parseservices(self):
|
||||
''' After node objects exist, parse service customizations and add them
|
||||
"""
|
||||
After node objects exist, parse service customizations and add them
|
||||
to the nodes.
|
||||
'''
|
||||
"""
|
||||
svclists = {}
|
||||
# parse services and store configs into session.services.configs
|
||||
for node in self.sp.getElementsByTagName("Node"):
|
||||
name = node.getAttribute("name")
|
||||
if name == '':
|
||||
continue # node type without name
|
||||
n = self.session.objbyname(name)
|
||||
continue # node type without name
|
||||
n = self.session.get_object_by_name(name)
|
||||
if n is None:
|
||||
self.warn("skipping service config for unknown node '%s'" % \
|
||||
name)
|
||||
logger.warn("skipping service config for unknown node '%s'" % name)
|
||||
continue
|
||||
for service in node.getElementsByTagName("Service"):
|
||||
svcname = service.getAttribute("name")
|
||||
|
@ -310,28 +302,27 @@ class CoreDocumentParser0(object):
|
|||
svclists[n.objid] += "|" + svcname
|
||||
else:
|
||||
svclists[n.objid] = svcname
|
||||
# nodes in NetworkPlan but not in ServicePlan use the
|
||||
# nodes in NetworkPlan but not in ServicePlan use the
|
||||
# default services for their type
|
||||
for node in self.np.getElementsByTagName("Node"):
|
||||
id, name, type = self.getcommonattributes(node)
|
||||
if id in svclists:
|
||||
continue # custom config exists
|
||||
continue # custom config exists
|
||||
else:
|
||||
svclists[int(id)] = None # use defaults
|
||||
svclists[int(id)] = None # use defaults
|
||||
|
||||
# associate nodes with services
|
||||
for objid in sorted(svclists.keys()):
|
||||
n = self.session.obj(objid)
|
||||
self.session.services.addservicestonode(node=n, nodetype=n.type,
|
||||
services_str=svclists[objid],
|
||||
verbose=self.verbose)
|
||||
|
||||
n = self.session.get_object(objid)
|
||||
self.session.services.addservicestonode(node=n, nodetype=n.type, services_str=svclists[objid])
|
||||
|
||||
def parseservice(self, service, n):
|
||||
''' Use session.services manager to store service customizations before
|
||||
"""
|
||||
Use session.services manager to store service customizations before
|
||||
they are added to a node.
|
||||
'''
|
||||
"""
|
||||
name = service.getAttribute("name")
|
||||
svc = self.session.services.getservicebyname(name)
|
||||
svc = ServiceManager.get(name)
|
||||
if svc is None:
|
||||
return False
|
||||
values = []
|
||||
|
@ -347,13 +338,13 @@ class CoreDocumentParser0(object):
|
|||
dirs.append(dirname)
|
||||
if len(dirs):
|
||||
values.append("dirs=%s" % dirs)
|
||||
|
||||
|
||||
startup = []
|
||||
shutdown = []
|
||||
validate = []
|
||||
for cmd in service.getElementsByTagName("Command"):
|
||||
type = cmd.getAttribute("type")
|
||||
cmdstr = gettextchild(cmd)
|
||||
cmdstr = xmlutils.get_text_child(cmd)
|
||||
if cmdstr is None:
|
||||
continue
|
||||
if type == "start":
|
||||
|
@ -368,12 +359,12 @@ class CoreDocumentParser0(object):
|
|||
values.append("cmddown=%s" % shutdown)
|
||||
if len(validate):
|
||||
values.append("cmdval=%s" % validate)
|
||||
|
||||
|
||||
files = []
|
||||
for file in service.getElementsByTagName("File"):
|
||||
filename = file.getAttribute("name")
|
||||
files.append(filename)
|
||||
data = gettextchild(file)
|
||||
data = xmlutils.get_text_child(file)
|
||||
typestr = "service:%s:%s" % (name, filename)
|
||||
self.session.services.setservicefile(nodenum=n.objid, type=typestr,
|
||||
filename=filename,
|
||||
|
@ -384,37 +375,36 @@ class CoreDocumentParser0(object):
|
|||
return True
|
||||
self.session.services.setcustomservice(n.objid, svc, values)
|
||||
return True
|
||||
|
||||
|
||||
def parsehooks(self, hooks):
|
||||
''' Parse hook scripts from XML into session._hooks.
|
||||
'''
|
||||
for hook in hooks.getElementsByTagName("Hook"):
|
||||
filename = hook.getAttribute("name")
|
||||
state = hook.getAttribute("state")
|
||||
data = gettextchild(hook)
|
||||
data = xmlutils.get_text_child(hook)
|
||||
if data is None:
|
||||
data = "" # allow for empty file
|
||||
data = "" # allow for empty file
|
||||
type = "hook:%s" % state
|
||||
self.session.sethook(type, filename=filename,
|
||||
srcname=None, data=data)
|
||||
|
||||
self.session.set_hook(type, file_name=filename, source_name=None, data=data)
|
||||
|
||||
def parsemeta(self):
|
||||
opt = getoneelement(self.meta, "SessionOptions")
|
||||
opt = xmlutils.get_one_element(self.meta, "SessionOptions")
|
||||
if opt:
|
||||
for param in opt.getElementsByTagName("param"):
|
||||
k = str(param.getAttribute("name"))
|
||||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = gettextchild(param) # allow attribute/text for newlines
|
||||
v = xmlutils.get_text_child(param) # allow attribute/text for newlines
|
||||
setattr(self.session.options, k, v)
|
||||
hooks = getoneelement(self.meta, "Hooks")
|
||||
hooks = xmlutils.get_one_element(self.meta, "Hooks")
|
||||
if hooks:
|
||||
self.parsehooks(hooks)
|
||||
meta = getoneelement(self.meta, "MetaData")
|
||||
meta = xmlutils.get_one_element(self.meta, "MetaData")
|
||||
if meta:
|
||||
for param in meta.getElementsByTagName("param"):
|
||||
k = str(param.getAttribute("name"))
|
||||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = gettextchild(param)
|
||||
self.session.metadata.additem(k, v)
|
||||
v = xmlutils.get_text_child(param)
|
||||
self.session.metadata.add_item(k, v)
|
|
@ -1,19 +1,19 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c) 2015 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
|
||||
import sys
|
||||
import random
|
||||
from core.netns import nodes
|
||||
from core import constants
|
||||
from core.misc.ipaddr import MacAddr
|
||||
from xml.dom.minidom import Node
|
||||
from xml.dom.minidom import parse
|
||||
from xmlutils import *
|
||||
|
||||
from core import constants
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import log
|
||||
from core.misc import nodeutils
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.service import ServiceManager
|
||||
from core.xml import xmlutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class CoreDocumentParser1(object):
|
||||
|
||||
layer2_device_types = 'hub', 'switch'
|
||||
layer3_device_types = 'host', 'router'
|
||||
device_types = layer2_device_types + layer3_device_types
|
||||
|
@ -23,8 +23,14 @@ class CoreDocumentParser1(object):
|
|||
# TunnelNode
|
||||
|
||||
def __init__(self, session, filename, options):
|
||||
"""
|
||||
|
||||
:param core.session.Session session:
|
||||
:param filename:
|
||||
:param options:
|
||||
:return:
|
||||
"""
|
||||
self.session = session
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.filename = filename
|
||||
if 'dom' in options:
|
||||
# this prevents parsing twice when detecting file versions
|
||||
|
@ -46,23 +52,9 @@ class CoreDocumentParser1(object):
|
|||
if self.scenario:
|
||||
self.parse_scenario()
|
||||
|
||||
def info(self, msg):
|
||||
s = 'XML parsing \'%s\': %s' % (self.filename, msg)
|
||||
if self.session:
|
||||
self.session.info(s)
|
||||
else:
|
||||
sys.stdout.write(s + '\n')
|
||||
|
||||
def warn(self, msg):
|
||||
s = 'WARNING XML parsing \'%s\': %s' % (self.filename, msg)
|
||||
if self.session:
|
||||
self.session.warn(s)
|
||||
else:
|
||||
sys.stderr.write(s + '\n')
|
||||
|
||||
@staticmethod
|
||||
def get_scenario(dom):
|
||||
scenario = getFirstChildByTagName(dom, 'scenario')
|
||||
scenario = xmlutils.get_first_child_by_tag_name(dom, 'scenario')
|
||||
if not scenario:
|
||||
raise ValueError, 'no scenario element found'
|
||||
version = scenario.getAttribute('version')
|
||||
|
@ -90,10 +82,10 @@ class CoreDocumentParser1(object):
|
|||
return x
|
||||
|
||||
def get_id(self, idstr):
|
||||
'''\
|
||||
"""
|
||||
Get a, possibly new, object id (node number) corresponding to
|
||||
the given XML string id.
|
||||
'''
|
||||
"""
|
||||
if not idstr:
|
||||
idn = self.rand_id()
|
||||
self.objids.add(idn)
|
||||
|
@ -109,19 +101,21 @@ class CoreDocumentParser1(object):
|
|||
return idn
|
||||
|
||||
def get_common_attributes(self, node):
|
||||
'''\
|
||||
"""
|
||||
Return id, name attributes for the given XML element. These
|
||||
attributes are common to nodes and networks.
|
||||
'''
|
||||
"""
|
||||
idstr = node.getAttribute('id')
|
||||
# use an explicit set COREID if it exists
|
||||
coreid = self.find_core_id(node)
|
||||
|
||||
if coreid:
|
||||
idn = int(coreid)
|
||||
if idstr:
|
||||
self.assign_id(idstr, idn)
|
||||
else:
|
||||
idn = self.get_id(idstr)
|
||||
|
||||
# TODO: consider supporting unicode; for now convert to an
|
||||
# ascii string
|
||||
namestr = str(node.getAttribute('name'))
|
||||
|
@ -129,50 +123,43 @@ class CoreDocumentParser1(object):
|
|||
|
||||
def iter_network_member_devices(self, element):
|
||||
# element can be a network or a channel
|
||||
for interface in iterChildrenWithAttribute(element, 'member',
|
||||
'type', 'interface'):
|
||||
if_id = getChildTextTrim(interface)
|
||||
assert if_id # XXX for testing
|
||||
for interface in xmlutils.iter_children_with_attribute(element, 'member', 'type', 'interface'):
|
||||
if_id = xmlutils.get_child_text_trim(interface)
|
||||
assert if_id # XXX for testing
|
||||
if not if_id:
|
||||
continue
|
||||
device, if_name = self.find_device_with_interface(if_id)
|
||||
assert device, 'no device for if_id: %s' % if_id # XXX for testing
|
||||
assert device, 'no device for if_id: %s' % if_id # XXX for testing
|
||||
if device:
|
||||
yield device, if_name
|
||||
|
||||
def network_class(self, network, network_type):
|
||||
'''\
|
||||
"""
|
||||
Return the corresponding CORE network class for the given
|
||||
network/network_type.
|
||||
'''
|
||||
if network_type == 'ethernet':
|
||||
return nodes.PtpNet
|
||||
elif network_type == 'satcom':
|
||||
return nodes.PtpNet
|
||||
"""
|
||||
if network_type in ['ethernet', 'satcom']:
|
||||
return nodeutils.get_node_class(NodeTypes.PEER_TO_PEER)
|
||||
elif network_type == 'wireless':
|
||||
channel = getFirstChildByTagName(network, 'channel')
|
||||
channel = xmlutils.get_first_child_by_tag_name(network, 'channel')
|
||||
if channel:
|
||||
# use an explicit CORE type if it exists
|
||||
coretype = getFirstChildTextTrimWithAttribute(channel, 'type',
|
||||
'domain', 'CORE')
|
||||
coretype = xmlutils.get_first_child_text_trim_with_attribute(channel, 'type', 'domain', 'CORE')
|
||||
if coretype:
|
||||
if coretype == 'basic_range':
|
||||
return nodes.WlanNode
|
||||
return nodeutils.get_node_class(NodeTypes.WIRELESS_LAN)
|
||||
elif coretype.startswith('emane'):
|
||||
return nodes.EmaneNode
|
||||
return nodeutils.get_node_class(NodeTypes.EMANE)
|
||||
else:
|
||||
self.warn('unknown network type: \'%s\'' % coretype)
|
||||
return xmltypetonodeclass(self.session, coretype)
|
||||
return nodes.WlanNode
|
||||
self.warn('unknown network type: \'%s\'' % network_type)
|
||||
logger.warn('unknown network type: \'%s\'', coretype)
|
||||
return xmlutils.xml_type_to_node_class(self.session, coretype)
|
||||
return nodeutils.get_node_class(NodeTypes.WIRELESS_LAN)
|
||||
logger.warn('unknown network type: \'%s\'', network_type)
|
||||
return None
|
||||
|
||||
def create_core_object(self, objcls, objid, objname, element, node_type):
|
||||
obj = self.session.addobj(cls = objcls, objid = objid,
|
||||
name = objname, start = self.start)
|
||||
if self.verbose:
|
||||
self.info('added object objid=%s name=%s cls=%s' % \
|
||||
(objid, objname, objcls))
|
||||
obj = self.session.add_object(cls=objcls, objid=objid, name=objname, start=self.start)
|
||||
logger.info('added object objid=%s name=%s cls=%s' % (objid, objname, objcls))
|
||||
self.set_object_position(obj, element)
|
||||
self.set_object_presentation(obj, element, node_type)
|
||||
return obj
|
||||
|
@ -180,7 +167,7 @@ class CoreDocumentParser1(object):
|
|||
def get_core_object(self, idstr):
|
||||
if idstr and idstr in self.objidmap:
|
||||
objid = self.objidmap[idstr]
|
||||
return self.session.obj(objid)
|
||||
return self.session.get_object(objid)
|
||||
return None
|
||||
|
||||
def parse_network_plan(self):
|
||||
|
@ -192,8 +179,7 @@ class CoreDocumentParser1(object):
|
|||
self.parse_networks()
|
||||
self.parse_layer3_devices()
|
||||
|
||||
def set_ethernet_link_parameters(self, channel, link_params,
|
||||
mobility_model_name, mobility_params):
|
||||
def set_ethernet_link_parameters(self, channel, link_params, mobility_model_name, mobility_params):
|
||||
# save link parameters for later use, indexed by the tuple
|
||||
# (device_id, interface_name)
|
||||
for dev, if_name in self.iter_network_member_devices(channel):
|
||||
|
@ -204,18 +190,15 @@ class CoreDocumentParser1(object):
|
|||
if mobility_model_name or mobility_params:
|
||||
raise NotImplementedError
|
||||
|
||||
def set_wireless_link_parameters(self, channel, link_params,
|
||||
mobility_model_name, mobility_params):
|
||||
def set_wireless_link_parameters(self, channel, link_params, mobility_model_name, mobility_params):
|
||||
network = self.find_channel_network(channel)
|
||||
network_id = network.getAttribute('id')
|
||||
if network_id in self.objidmap:
|
||||
nodenum = self.objidmap[network_id]
|
||||
else:
|
||||
self.warn('unknown network: %s' % network.toxml('utf-8'))
|
||||
assert False # XXX for testing
|
||||
return
|
||||
model_name = getFirstChildTextTrimWithAttribute(channel, 'type',
|
||||
'domain', 'CORE')
|
||||
logger.warn('unknown network: %s', network.toxml('utf-8'))
|
||||
assert False # XXX for testing
|
||||
model_name = xmlutils.get_first_child_text_trim_with_attribute(channel, 'type', 'domain', 'CORE')
|
||||
if not model_name:
|
||||
model_name = 'basic_range'
|
||||
if model_name == 'basic_range':
|
||||
|
@ -229,21 +212,20 @@ class CoreDocumentParser1(object):
|
|||
raise NotImplementedError
|
||||
mgr.setconfig_keyvalues(nodenum, model_name, link_params.items())
|
||||
if mobility_model_name and mobility_params:
|
||||
mgr.setconfig_keyvalues(nodenum, mobility_model_name,
|
||||
mobility_params.items())
|
||||
mgr.setconfig_keyvalues(nodenum, mobility_model_name, mobility_params.items())
|
||||
|
||||
def link_layer2_devices(self, device1, ifname1, device2, ifname2):
|
||||
'''\
|
||||
"""
|
||||
Link two layer-2 devices together.
|
||||
'''
|
||||
"""
|
||||
devid1 = device1.getAttribute('id')
|
||||
dev1 = self.get_core_object(devid1)
|
||||
devid2 = device2.getAttribute('id')
|
||||
dev2 = self.get_core_object(devid2)
|
||||
assert dev1 and dev2 # XXX for testing
|
||||
assert dev1 and dev2 # XXX for testing
|
||||
if dev1 and dev2:
|
||||
# TODO: review this
|
||||
if isinstance(dev2, nodes.RJ45Node):
|
||||
if nodeutils.is_node(dev2, NodeTypes.RJ45):
|
||||
# RJ45 nodes have different linknet()
|
||||
netif = dev2.linknet(dev1)
|
||||
else:
|
||||
|
@ -266,44 +248,38 @@ class CoreDocumentParser1(object):
|
|||
@classmethod
|
||||
def parse_parameter_children(cls, parent):
|
||||
params = {}
|
||||
for parameter in iterChildrenWithName(parent, 'parameter'):
|
||||
for parameter in xmlutils.iter_children_with_name(parent, 'parameter'):
|
||||
param_name = parameter.getAttribute('name')
|
||||
assert param_name # XXX for testing
|
||||
assert param_name # XXX for testing
|
||||
if not param_name:
|
||||
continue
|
||||
# TODO: consider supporting unicode; for now convert
|
||||
# to an ascii string
|
||||
param_name = str(param_name)
|
||||
param_val = cls.parse_xml_value(getChildTextTrim(parameter))
|
||||
param_val = cls.parse_xml_value(xmlutils.get_child_text_trim(parameter))
|
||||
# TODO: check if the name already exists?
|
||||
if param_name and param_val:
|
||||
params[param_name] = param_val
|
||||
return params
|
||||
|
||||
def parse_network_channel(self, channel):
|
||||
element = self.search_for_element(channel, 'type',
|
||||
lambda x: not x.hasAttributes())
|
||||
channel_type = getChildTextTrim(element)
|
||||
element = self.search_for_element(channel, 'type', lambda x: not x.hasAttributes())
|
||||
channel_type = xmlutils.get_child_text_trim(element)
|
||||
link_params = self.parse_parameter_children(channel)
|
||||
|
||||
mobility = getFirstChildByTagName(channel, 'CORE:mobility')
|
||||
mobility = xmlutils.get_first_child_by_tag_name(channel, 'CORE:mobility')
|
||||
if mobility:
|
||||
mobility_model_name = \
|
||||
getFirstChildTextTrimByTagName(mobility, 'type')
|
||||
mobility_model_name = xmlutils.get_first_child_text_trim_by_tag_name(mobility, 'type')
|
||||
mobility_params = self.parse_parameter_children(mobility)
|
||||
else:
|
||||
mobility_model_name = None
|
||||
mobility_params = None
|
||||
if channel_type == 'wireless':
|
||||
self.set_wireless_link_parameters(channel, link_params,
|
||||
mobility_model_name,
|
||||
mobility_params)
|
||||
self.set_wireless_link_parameters(channel, link_params, mobility_model_name, mobility_params)
|
||||
elif channel_type == 'ethernet':
|
||||
# TODO: maybe this can be done in the loop below to avoid
|
||||
# iterating through channel members multiple times
|
||||
self.set_ethernet_link_parameters(channel, link_params,
|
||||
mobility_model_name,
|
||||
mobility_params)
|
||||
self.set_ethernet_link_parameters(channel, link_params, mobility_model_name, mobility_params)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
layer2_device = []
|
||||
|
@ -316,14 +292,14 @@ class CoreDocumentParser1(object):
|
|||
layer2_device[1][0], layer2_device[1][1])
|
||||
|
||||
def parse_network(self, network):
|
||||
'''\
|
||||
"""
|
||||
Each network element should have an 'id' and 'name' attribute
|
||||
and include the following child elements:
|
||||
|
||||
type (one)
|
||||
member (zero or more with type="interface" or type="channel")
|
||||
channel (zero or more)
|
||||
'''
|
||||
"""
|
||||
layer2_members = set()
|
||||
layer3_members = 0
|
||||
for dev, if_name in self.iter_network_member_devices(network):
|
||||
|
@ -336,53 +312,48 @@ class CoreDocumentParser1(object):
|
|||
layer3_members += 1
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
if len(layer2_members) == 0:
|
||||
net_type = getFirstChildTextTrimByTagName(network, 'type')
|
||||
net_type = xmlutils.get_first_child_text_trim_by_tag_name(network, 'type')
|
||||
if not net_type:
|
||||
msg = 'no network type found for network: \'%s\'' % \
|
||||
network.toxml('utf-8')
|
||||
self.warn(msg)
|
||||
assert False # XXX for testing
|
||||
return
|
||||
logger.warn('no network type found for network: \'%s\'', network.toxml('utf-8'))
|
||||
assert False # XXX for testing
|
||||
net_cls = self.network_class(network, net_type)
|
||||
objid, net_name = self.get_common_attributes(network)
|
||||
if self.verbose:
|
||||
self.info('parsing network: %s %s' % (net_name, objid))
|
||||
if objid in self.session._objs:
|
||||
logger.info('parsing network: name=%s id=%s' % (net_name, objid))
|
||||
if objid in self.session.objects:
|
||||
return
|
||||
n = self.create_core_object(net_cls, objid, net_name,
|
||||
network, None)
|
||||
n = self.create_core_object(net_cls, objid, net_name, network, None)
|
||||
|
||||
# handle channel parameters
|
||||
for channel in iterChildrenWithName(network, 'channel'):
|
||||
for channel in xmlutils.iter_children_with_name(network, 'channel'):
|
||||
self.parse_network_channel(channel)
|
||||
|
||||
def parse_networks(self):
|
||||
'''\
|
||||
"""
|
||||
Parse all 'network' elements.
|
||||
'''
|
||||
for network in iterDescendantsWithName(self.scenario, 'network'):
|
||||
"""
|
||||
for network in xmlutils.iter_descendants_with_name(self.scenario, 'network'):
|
||||
self.parse_network(network)
|
||||
|
||||
def parse_addresses(self, interface):
|
||||
mac = []
|
||||
ipv4 = []
|
||||
ipv6= []
|
||||
ipv6 = []
|
||||
hostname = []
|
||||
for address in iterChildrenWithName(interface, 'address'):
|
||||
for address in xmlutils.iter_children_with_name(interface, 'address'):
|
||||
addr_type = address.getAttribute('type')
|
||||
if not addr_type:
|
||||
msg = 'no type attribute found for address ' \
|
||||
'in interface: \'%s\'' % interface.toxml('utf-8')
|
||||
self.warn(msg)
|
||||
assert False # XXX for testing
|
||||
continue
|
||||
addr_text = getChildTextTrim(address)
|
||||
'in interface: \'%s\'' % interface.toxml('utf-8')
|
||||
logger.warn(msg)
|
||||
assert False # XXX for testing
|
||||
addr_text = xmlutils.get_child_text_trim(address)
|
||||
if not addr_text:
|
||||
msg = 'no text found for address ' \
|
||||
'in interface: \'%s\'' % interface.toxml('utf-8')
|
||||
self.warn(msg)
|
||||
assert False # XXX for testing
|
||||
continue
|
||||
'in interface: \'%s\'' % interface.toxml('utf-8')
|
||||
logger.warn(msg)
|
||||
assert False # XXX for testing
|
||||
if addr_type == 'mac':
|
||||
mac.append(addr_text)
|
||||
elif addr_type == 'IPv4':
|
||||
|
@ -393,45 +364,38 @@ class CoreDocumentParser1(object):
|
|||
hostname.append(addr_text)
|
||||
else:
|
||||
msg = 'skipping unknown address type \'%s\' in ' \
|
||||
'interface: \'%s\'' % (addr_type, interface.toxml('utf-8'))
|
||||
self.warn(msg)
|
||||
assert False # XXX for testing
|
||||
continue
|
||||
'interface: \'%s\'' % (addr_type, interface.toxml('utf-8'))
|
||||
logger.warn(msg)
|
||||
assert False # XXX for testing
|
||||
return mac, ipv4, ipv6, hostname
|
||||
|
||||
def parse_interface(self, node, device_id, interface):
|
||||
'''\
|
||||
"""
|
||||
Each interface can have multiple 'address' elements.
|
||||
'''
|
||||
"""
|
||||
if_name = interface.getAttribute('name')
|
||||
network = self.find_interface_network_object(interface)
|
||||
if not network:
|
||||
msg = 'skipping node \'%s\' interface \'%s\': ' \
|
||||
'unknown network' % (node.name, if_name)
|
||||
self.warn(msg)
|
||||
assert False # XXX for testing
|
||||
return
|
||||
'unknown network' % (node.name, if_name)
|
||||
logger.warn(msg)
|
||||
assert False # XXX for testing
|
||||
mac, ipv4, ipv6, hostname = self.parse_addresses(interface)
|
||||
if mac:
|
||||
hwaddr = MacAddr.fromstring(mac[0])
|
||||
hwaddr = MacAddress.from_string(mac[0])
|
||||
else:
|
||||
hwaddr = None
|
||||
ifindex = node.newnetif(network, addrlist = ipv4 + ipv6,
|
||||
hwaddr = hwaddr, ifindex = None,
|
||||
ifname = if_name)
|
||||
ifindex = node.newnetif(network, addrlist=ipv4 + ipv6, hwaddr=hwaddr, ifindex=None, ifname=if_name)
|
||||
# TODO: 'hostname' addresses are unused
|
||||
if self.verbose:
|
||||
msg = 'node \'%s\' interface \'%s\' connected ' \
|
||||
'to network \'%s\'' % (node.name, if_name, network.name)
|
||||
self.info(msg)
|
||||
msg = 'node \'%s\' interface \'%s\' connected ' \
|
||||
'to network \'%s\'' % (node.name, if_name, network.name)
|
||||
logger.info(msg)
|
||||
# set link parameters for wired links
|
||||
if isinstance(network,
|
||||
(nodes.HubNode, nodes.PtpNet, nodes.SwitchNode)):
|
||||
if nodeutils.is_node(network, (NodeTypes.HUB, NodeTypes.PEER_TO_PEER, NodeTypes.SWITCH)):
|
||||
netif = node.netif(ifindex)
|
||||
self.set_wired_link_parameters(network, netif, device_id)
|
||||
|
||||
def set_wired_link_parameters(self, network, netif,
|
||||
device_id, netif_name = None):
|
||||
def set_wired_link_parameters(self, network, netif, device_id, netif_name=None):
|
||||
if netif_name is None:
|
||||
netif_name = netif.name
|
||||
key = (device_id, netif_name)
|
||||
|
@ -443,22 +407,20 @@ class CoreDocumentParser1(object):
|
|||
loss = link_params.get('loss')
|
||||
duplicate = link_params.get('duplicate')
|
||||
jitter = link_params.get('jitter')
|
||||
network.linkconfig(netif, bw = bw, delay = delay, loss = loss,
|
||||
duplicate = duplicate, jitter = jitter)
|
||||
network.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate, jitter=jitter)
|
||||
else:
|
||||
for k, v in link_params.iteritems():
|
||||
netif.setparam(k, v)
|
||||
|
||||
@staticmethod
|
||||
def search_for_element(node, tagName, match = None):
|
||||
'''\
|
||||
def search_for_element(node, tag_name, match=None):
|
||||
"""
|
||||
Search the given node and all ancestors for an element named
|
||||
tagName that satisfies the given matching function.
|
||||
'''
|
||||
"""
|
||||
while True:
|
||||
for child in iterChildren(node, Node.ELEMENT_NODE):
|
||||
if child.tagName == tagName and \
|
||||
(match is None or match(child)):
|
||||
for child in xmlutils.iter_children(node, Node.ELEMENT_NODE):
|
||||
if child.tagName == tag_name and (match is None or match(child)):
|
||||
return child
|
||||
node = node.parentNode
|
||||
if not node:
|
||||
|
@ -470,9 +432,10 @@ class CoreDocumentParser1(object):
|
|||
def match(x):
|
||||
domain = x.getAttribute('domain')
|
||||
return domain == 'COREID'
|
||||
|
||||
alias = cls.search_for_element(node, 'alias', match)
|
||||
if alias:
|
||||
return getChildTextTrim(alias)
|
||||
return xmlutils.get_child_text_trim(alias)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
|
@ -487,8 +450,7 @@ class CoreDocumentParser1(object):
|
|||
return None
|
||||
|
||||
def find_interface_network_object(self, interface):
|
||||
network_id = getFirstChildTextTrimWithAttribute(interface, 'member',
|
||||
'type', 'network')
|
||||
network_id = xmlutils.get_first_child_text_trim_with_attribute(interface, 'member', 'type', 'network')
|
||||
if not network_id:
|
||||
# support legacy notation: <interface net="netid" ...
|
||||
network_id = interface.getAttribute('net')
|
||||
|
@ -498,22 +460,19 @@ class CoreDocumentParser1(object):
|
|||
return obj
|
||||
# the network should correspond to a layer-2 device if the
|
||||
# network_id does not exist
|
||||
channel_id = getFirstChildTextTrimWithAttribute(interface, 'member',
|
||||
'type', 'channel')
|
||||
channel_id = xmlutils.get_first_child_text_trim_with_attribute(interface, 'member', 'type', 'channel')
|
||||
if not network_id or not channel_id:
|
||||
return None
|
||||
network = getFirstChildWithAttribute(self.scenario, 'network',
|
||||
'id', network_id)
|
||||
network = xmlutils.get_first_child_with_attribute(self.scenario, 'network', 'id', network_id)
|
||||
if not network:
|
||||
return None
|
||||
channel = getFirstChildWithAttribute(network, 'channel',
|
||||
'id', channel_id)
|
||||
channel = xmlutils.get_first_child_with_attribute(network, 'channel', 'id', channel_id)
|
||||
if not channel:
|
||||
return None
|
||||
device = None
|
||||
for dev, if_name in self.iter_network_member_devices(channel):
|
||||
if self.device_type(dev) in self.layer2_device_types:
|
||||
assert not device # XXX
|
||||
assert not device # XXX
|
||||
device = dev
|
||||
if device:
|
||||
obj = self.get_core_object(device.getAttribute('id'))
|
||||
|
@ -532,12 +491,10 @@ class CoreDocumentParser1(object):
|
|||
# TODO: zMode is unused
|
||||
# z_mode = point.getAttribute('zMode'))
|
||||
if x < 0.0:
|
||||
self.warn('limiting negative x position of \'%s\' to zero: %s' %
|
||||
(obj.name, x))
|
||||
logger.warn('limiting negative x position of \'%s\' to zero: %s' % (obj.name, x))
|
||||
x = 0.0
|
||||
if y < 0.0:
|
||||
self.warn('limiting negative y position of \'%s\' to zero: %s' %
|
||||
(obj.name, y))
|
||||
logger.warn('limiting negative y position of \'%s\' to zero: %s' % (obj.name, y))
|
||||
y = 0.0
|
||||
obj.setposition(x, y, z)
|
||||
|
||||
|
@ -558,12 +515,10 @@ class CoreDocumentParser1(object):
|
|||
self.location_refgeo_set = True
|
||||
x, y, z = self.session.location.getxyz(lat, lon, zalt)
|
||||
if x < 0.0:
|
||||
self.warn('limiting negative x position of \'%s\' to zero: %s' %
|
||||
(obj.name, x))
|
||||
logger.warn('limiting negative x position of \'%s\' to zero: %s' % (obj.name, x))
|
||||
x = 0.0
|
||||
if y < 0.0:
|
||||
self.warn('limiting negative y position of \'%s\' to zero: %s' %
|
||||
(obj.name, y))
|
||||
logger.warn('limiting negative y position of \'%s\' to zero: %s' % (obj.name, y))
|
||||
y = 0.0
|
||||
obj.setposition(x, y, z)
|
||||
|
||||
|
@ -586,30 +541,27 @@ class CoreDocumentParser1(object):
|
|||
y = self.session.location.m2px(ym) + self.session.location.refxyz[1]
|
||||
z = self.session.location.m2px(zm) + self.session.location.refxyz[2]
|
||||
if x < 0.0:
|
||||
self.warn('limiting negative x position of \'%s\' to zero: %s' %
|
||||
(obj.name, x))
|
||||
logger.warn('limiting negative x position of \'%s\' to zero: %s' % (obj.name, x))
|
||||
x = 0.0
|
||||
if y < 0.0:
|
||||
self.warn('limiting negative y position of \'%s\' to zero: %s' %
|
||||
(obj.name, y))
|
||||
logger.warn('limiting negative y position of \'%s\' to zero: %s' % (obj.name, y))
|
||||
y = 0.0
|
||||
obj.setposition(x, y, z)
|
||||
|
||||
def set_object_position(self, obj, element):
|
||||
'''\
|
||||
"""
|
||||
Set the x,y,x position of obj from the point associated with
|
||||
the given element.
|
||||
'''
|
||||
"""
|
||||
point = self.find_point(element)
|
||||
if not point:
|
||||
return False
|
||||
point_type = point.getAttribute('type')
|
||||
if not point_type:
|
||||
msg = 'no type attribute found for point: \'%s\'' % \
|
||||
point.toxml('utf-8')
|
||||
self.warn(msg)
|
||||
assert False # XXX for testing
|
||||
return False
|
||||
point.toxml('utf-8')
|
||||
logger.warn(msg)
|
||||
assert False # XXX for testing
|
||||
elif point_type == 'pixel':
|
||||
self.set_object_position_pixel(obj, point)
|
||||
elif point_type == 'gps':
|
||||
|
@ -617,21 +569,17 @@ class CoreDocumentParser1(object):
|
|||
elif point_type == 'cart':
|
||||
self.set_object_position_cartesian(obj, point)
|
||||
else:
|
||||
self.warn("skipping unknown point type: '%s'" % point_type)
|
||||
assert False # XXX for testing
|
||||
return False
|
||||
if self.verbose:
|
||||
msg = 'set position of %s from point element: \'%s\'' % \
|
||||
(obj.name, point.toxml('utf-8'))
|
||||
self.info(msg)
|
||||
logger.warn("skipping unknown point type: '%s'" % point_type)
|
||||
assert False # XXX for testing
|
||||
|
||||
logger.info('set position of %s from point element: \'%s\'', obj.name, point.toxml('utf-8'))
|
||||
return True
|
||||
|
||||
def parse_device_service(self, service, node):
|
||||
name = service.getAttribute('name')
|
||||
session_service = self.session.services.getservicebyname(name)
|
||||
session_service = ServiceManager.get(name)
|
||||
if not session_service:
|
||||
assert False # XXX for testing
|
||||
return None
|
||||
assert False # XXX for testing
|
||||
values = []
|
||||
startup_idx = service.getAttribute('startup_idx')
|
||||
if startup_idx:
|
||||
|
@ -640,7 +588,7 @@ class CoreDocumentParser1(object):
|
|||
if startup_time:
|
||||
values.append('starttime=%s' % startup_time)
|
||||
dirs = []
|
||||
for directory in iterChildrenWithName(service, 'directory'):
|
||||
for directory in xmlutils.iter_children_with_name(service, 'directory'):
|
||||
dirname = directory.getAttribute('name')
|
||||
dirs.append(str(dirname))
|
||||
if dirs:
|
||||
|
@ -648,9 +596,9 @@ class CoreDocumentParser1(object):
|
|||
startup = []
|
||||
shutdown = []
|
||||
validate = []
|
||||
for command in iterChildrenWithName(service, 'command'):
|
||||
for command in xmlutils.iter_children_with_name(service, 'command'):
|
||||
command_type = command.getAttribute('type')
|
||||
command_text = getChildTextTrim(command)
|
||||
command_text = xmlutils.get_child_text_trim(command)
|
||||
if not command_text:
|
||||
continue
|
||||
if command_type == 'start':
|
||||
|
@ -667,12 +615,12 @@ class CoreDocumentParser1(object):
|
|||
values.append('cmdval=%s' % validate)
|
||||
filenames = []
|
||||
files = []
|
||||
for f in iterChildrenWithName(service, 'file'):
|
||||
for f in xmlutils.iter_children_with_name(service, 'file'):
|
||||
filename = f.getAttribute('name')
|
||||
if not filename:
|
||||
continue;
|
||||
continue
|
||||
filenames.append(filename)
|
||||
data = getChildTextTrim(f)
|
||||
data = xmlutils.get_child_text_trim(f)
|
||||
if data:
|
||||
data = str(data)
|
||||
else:
|
||||
|
@ -683,48 +631,48 @@ class CoreDocumentParser1(object):
|
|||
values.append('files=%s' % filenames)
|
||||
custom = service.getAttribute('custom')
|
||||
if custom and custom.lower() == 'true':
|
||||
self.session.services.setcustomservice(node.objid,
|
||||
session_service, values)
|
||||
self.session.services.setcustomservice(node.objid, session_service, values)
|
||||
# NOTE: if a custom service is used, setservicefile() must be
|
||||
# called after the custom service exists
|
||||
for typestr, filename, data in files:
|
||||
self.session.services.setservicefile(nodenum = node.objid,
|
||||
type = typestr,
|
||||
filename = filename,
|
||||
srcname = None,
|
||||
data = data)
|
||||
self.session.services.setservicefile(
|
||||
nodenum=node.objid,
|
||||
type=typestr,
|
||||
filename=filename,
|
||||
srcname=None,
|
||||
data=data
|
||||
)
|
||||
return str(name)
|
||||
|
||||
def parse_device_services(self, services, node):
|
||||
'''\
|
||||
"""
|
||||
Use session.services manager to store service customizations
|
||||
before they are added to a node.
|
||||
'''
|
||||
"""
|
||||
service_names = []
|
||||
for service in iterChildrenWithName(services, 'service'):
|
||||
for service in xmlutils.iter_children_with_name(services, 'service'):
|
||||
name = self.parse_device_service(service, node)
|
||||
if name:
|
||||
service_names.append(name)
|
||||
return '|'.join(service_names)
|
||||
|
||||
def add_device_services(self, node, device, node_type):
|
||||
'''\
|
||||
"""
|
||||
Add services to the given node.
|
||||
'''
|
||||
services = getFirstChildByTagName(device, 'CORE:services')
|
||||
"""
|
||||
services = xmlutils.get_first_child_by_tag_name(device, 'CORE:services')
|
||||
if services:
|
||||
services_str = self.parse_device_services(services, node)
|
||||
if self.verbose:
|
||||
self.info('services for node \'%s\': %s' % \
|
||||
(node.name, services_str))
|
||||
logger.info('services for node \'%s\': %s' % (node.name, services_str))
|
||||
elif node_type in self.default_services:
|
||||
services_str = None # default services will be added
|
||||
services_str = None # default services will be added
|
||||
else:
|
||||
return
|
||||
self.session.services.addservicestonode(node = node,
|
||||
nodetype = node_type,
|
||||
services_str = services_str,
|
||||
verbose = self.verbose)
|
||||
self.session.services.addservicestonode(
|
||||
node=node,
|
||||
nodetype=node_type,
|
||||
services_str=services_str
|
||||
)
|
||||
|
||||
def set_object_presentation(self, obj, element, node_type):
|
||||
# defaults from the CORE GUI
|
||||
|
@ -735,9 +683,9 @@ class CoreDocumentParser1(object):
|
|||
'mdr': 'mdr.gif',
|
||||
# 'prouter': 'router_green.gif',
|
||||
# 'xen': 'xen.gif'
|
||||
}
|
||||
}
|
||||
icon_set = False
|
||||
for child in iterChildrenWithName(element, 'CORE:presentation'):
|
||||
for child in xmlutils.iter_children_with_name(element, 'CORE:presentation'):
|
||||
canvas = child.getAttribute('canvas')
|
||||
if canvas:
|
||||
obj.canvas = int(canvas)
|
||||
|
@ -757,18 +705,15 @@ class CoreDocumentParser1(object):
|
|||
|
||||
def core_node_type(self, device):
|
||||
# use an explicit CORE type if it exists
|
||||
coretype = getFirstChildTextTrimWithAttribute(device, 'type',
|
||||
'domain', 'CORE')
|
||||
coretype = xmlutils.get_first_child_text_trim_with_attribute(device, 'type', 'domain', 'CORE')
|
||||
if coretype:
|
||||
return coretype
|
||||
return self.device_type(device)
|
||||
|
||||
def find_device_with_interface(self, interface_id):
|
||||
# TODO: suport generic 'device' elements
|
||||
for device in iterDescendantsWithName(self.scenario,
|
||||
self.device_types):
|
||||
interface = getFirstChildWithAttribute(device, 'interface',
|
||||
'id', interface_id)
|
||||
for device in xmlutils.iter_descendants_with_name(self.scenario, self.device_types):
|
||||
interface = xmlutils.get_first_child_with_attribute(device, 'interface', 'id', interface_id)
|
||||
if interface:
|
||||
if_name = interface.getAttribute('name')
|
||||
return device, if_name
|
||||
|
@ -776,72 +721,70 @@ class CoreDocumentParser1(object):
|
|||
|
||||
def parse_layer2_device(self, device):
|
||||
objid, device_name = self.get_common_attributes(device)
|
||||
if self.verbose:
|
||||
self.info('parsing layer-2 device: %s %s' % (device_name, objid))
|
||||
logger.info('parsing layer-2 device: name=%s id=%s' % (device_name, objid))
|
||||
|
||||
try:
|
||||
return self.session.obj(objid)
|
||||
return self.session.get_object(objid)
|
||||
except KeyError:
|
||||
pass
|
||||
logger.exception("error geting object: %s", objid)
|
||||
|
||||
device_type = self.device_type(device)
|
||||
if device_type == 'hub':
|
||||
device_class = nodes.HubNode
|
||||
device_class = nodeutils.get_node_class(NodeTypes.HUB)
|
||||
elif device_type == 'switch':
|
||||
device_class = nodes.SwitchNode
|
||||
device_class = nodeutils.get_node_class(NodeTypes.SWITCH)
|
||||
else:
|
||||
self.warn('unknown layer-2 device type: \'%s\'' % device_type)
|
||||
assert False # XXX for testing
|
||||
return None
|
||||
n = self.create_core_object(device_class, objid, device_name,
|
||||
device, None)
|
||||
logger.warn('unknown layer-2 device type: \'%s\'' % device_type)
|
||||
assert False # XXX for testing
|
||||
|
||||
n = self.create_core_object(device_class, objid, device_name, device, None)
|
||||
return n
|
||||
|
||||
def parse_layer3_device(self, device):
|
||||
objid, device_name = self.get_common_attributes(device)
|
||||
if self.verbose:
|
||||
self.info('parsing layer-3 device: %s %s' % (device_name, objid))
|
||||
logger.info('parsing layer-3 device: name=%s id=%s', device_name, objid)
|
||||
|
||||
try:
|
||||
return self.session.obj(objid)
|
||||
return self.session.get_object(objid)
|
||||
except KeyError:
|
||||
pass
|
||||
logger.exception("error getting session object: %s", objid)
|
||||
|
||||
device_cls = self.nodecls
|
||||
core_node_type = self.core_node_type(device)
|
||||
n = self.create_core_object(device_cls, objid, device_name,
|
||||
device, core_node_type)
|
||||
n = self.create_core_object(device_cls, objid, device_name, device, core_node_type)
|
||||
n.type = core_node_type
|
||||
self.add_device_services(n, device, core_node_type)
|
||||
for interface in iterChildrenWithName(device, 'interface'):
|
||||
for interface in xmlutils.iter_children_with_name(device, 'interface'):
|
||||
self.parse_interface(n, device.getAttribute('id'), interface)
|
||||
return n
|
||||
|
||||
def parse_layer2_devices(self):
|
||||
'''\
|
||||
"""
|
||||
Parse all layer-2 device elements. A device can be: 'switch',
|
||||
'hub'.
|
||||
'''
|
||||
"""
|
||||
# TODO: suport generic 'device' elements
|
||||
for device in iterDescendantsWithName(self.scenario,
|
||||
self.layer2_device_types):
|
||||
for device in xmlutils.iter_descendants_with_name(self.scenario, self.layer2_device_types):
|
||||
self.parse_layer2_device(device)
|
||||
|
||||
def parse_layer3_devices(self):
|
||||
'''\
|
||||
"""
|
||||
Parse all layer-3 device elements. A device can be: 'host',
|
||||
'router'.
|
||||
'''
|
||||
"""
|
||||
# TODO: suport generic 'device' elements
|
||||
for device in iterDescendantsWithName(self.scenario,
|
||||
self.layer3_device_types):
|
||||
for device in xmlutils.iter_descendants_with_name(self.scenario, self.layer3_device_types):
|
||||
self.parse_layer3_device(device)
|
||||
|
||||
def parse_session_origin(self, session_config):
|
||||
'''\
|
||||
"""
|
||||
Parse the first origin tag and set the CoreLocation reference
|
||||
point appropriately.
|
||||
'''
|
||||
"""
|
||||
# defaults from the CORE GUI
|
||||
self.session.location.setrefgeo(47.5791667, -122.132322, 2.0)
|
||||
self.session.location.refscale = 150.0
|
||||
origin = getFirstChildByTagName(session_config, 'origin')
|
||||
origin = xmlutils.get_first_child_by_tag_name(session_config, 'origin')
|
||||
if not origin:
|
||||
return
|
||||
lat = origin.getAttribute('lat')
|
||||
|
@ -853,18 +796,17 @@ class CoreDocumentParser1(object):
|
|||
scale100 = origin.getAttribute("scale100")
|
||||
if scale100:
|
||||
self.session.location.refscale = float(scale100)
|
||||
point = getFirstChildTextTrimByTagName(origin, 'point')
|
||||
point = xmlutils.get_first_child_text_trim_by_tag_name(origin, 'point')
|
||||
if point:
|
||||
xyz = point.split(',')
|
||||
if len(xyz) == 2:
|
||||
xyz.append('0.0')
|
||||
if len(xyz) == 3:
|
||||
self.session.location.refxyz = \
|
||||
(float(xyz[0]), float(xyz[1]), float(xyz[2]))
|
||||
self.session.location.refxyz = (float(xyz[0]), float(xyz[1]), float(xyz[2]))
|
||||
self.location_refxyz_set = True
|
||||
|
||||
def parse_session_options(self, session_config):
|
||||
options = getFirstChildByTagName(session_config, 'options')
|
||||
options = xmlutils.get_first_child_by_tag_name(session_config, 'options')
|
||||
if not options:
|
||||
return
|
||||
params = self.parse_parameter_children(options)
|
||||
|
@ -873,34 +815,32 @@ class CoreDocumentParser1(object):
|
|||
setattr(self.session.options, str(name), str(value))
|
||||
|
||||
def parse_session_hooks(self, session_config):
|
||||
'''\
|
||||
"""
|
||||
Parse hook scripts.
|
||||
'''
|
||||
hooks = getFirstChildByTagName(session_config, 'hooks')
|
||||
"""
|
||||
hooks = xmlutils.get_first_child_by_tag_name(session_config, 'hooks')
|
||||
if not hooks:
|
||||
return
|
||||
for hook in iterChildrenWithName(hooks, 'hook'):
|
||||
for hook in xmlutils.iter_children_with_name(hooks, 'hook'):
|
||||
filename = hook.getAttribute('name')
|
||||
state = hook.getAttribute('state')
|
||||
data = getChildTextTrim(hook)
|
||||
data = xmlutils.get_child_text_trim(hook)
|
||||
if data is None:
|
||||
data = '' # allow for empty file
|
||||
data = '' # allow for empty file
|
||||
hook_type = "hook:%s" % state
|
||||
self.session.sethook(hook_type, filename = str(filename),
|
||||
srcname = None, data = str(data))
|
||||
self.session.set_hook(hook_type, file_name=str(filename), source_name=None, data=str(data))
|
||||
|
||||
def parse_session_metadata(self, session_config):
|
||||
metadata = getFirstChildByTagName(session_config, 'metadata')
|
||||
metadata = xmlutils.get_first_child_by_tag_name(session_config, 'metadata')
|
||||
if not metadata:
|
||||
return
|
||||
params = self.parse_parameter_children(metadata)
|
||||
for name, value in params.iteritems():
|
||||
if name and value:
|
||||
self.session.metadata.additem(str(name), str(value))
|
||||
self.session.metadata.add_item(str(name), str(value))
|
||||
|
||||
def parse_session_config(self):
|
||||
session_config = \
|
||||
getFirstChildByTagName(self.scenario, 'CORE:sessionconfig')
|
||||
session_config = xmlutils.get_first_child_by_tag_name(self.scenario, 'CORE:sessionconfig')
|
||||
if not session_config:
|
||||
return
|
||||
self.parse_session_origin(session_config)
|
||||
|
@ -913,23 +853,21 @@ class CoreDocumentParser1(object):
|
|||
self.default_services = {
|
||||
'router': ['zebra', 'OSPFv2', 'OSPFv3', 'vtysh', 'IPForward'],
|
||||
'host': ['DefaultRoute', 'SSH'],
|
||||
'PC': ['DefaultRoute',],
|
||||
'PC': ['DefaultRoute', ],
|
||||
'mdr': ['zebra', 'OSPFv3MDR', 'vtysh', 'IPForward'],
|
||||
# 'prouter': ['zebra', 'OSPFv2', 'OSPFv3', 'vtysh', 'IPForward'],
|
||||
# 'xen': ['zebra', 'OSPFv2', 'OSPFv3', 'vtysh', 'IPForward'],
|
||||
}
|
||||
default_services = \
|
||||
getFirstChildByTagName(self.scenario, 'CORE:defaultservices')
|
||||
}
|
||||
default_services = xmlutils.get_first_child_by_tag_name(self.scenario, 'CORE:defaultservices')
|
||||
if not default_services:
|
||||
return
|
||||
for device in iterChildrenWithName(default_services, 'device'):
|
||||
for device in xmlutils.iter_children_with_name(default_services, 'device'):
|
||||
device_type = device.getAttribute('type')
|
||||
if not device_type:
|
||||
self.warn('parse_default_services: no type attribute ' \
|
||||
'found for device')
|
||||
logger.warn('parse_default_services: no type attribute found for device')
|
||||
continue
|
||||
services = []
|
||||
for service in iterChildrenWithName(device, 'service'):
|
||||
for service in xmlutils.iter_children_with_name(device, 'service'):
|
||||
name = service.getAttribute('name')
|
||||
if name:
|
||||
services.append(str(name))
|
||||
|
@ -937,6 +875,4 @@ class CoreDocumentParser1(object):
|
|||
# store default services for the session
|
||||
for t, s in self.default_services.iteritems():
|
||||
self.session.services.defaultservices[t] = s
|
||||
if self.verbose:
|
||||
self.info('default services for node type \'%s\' ' \
|
||||
'set to: %s' % (t, s))
|
||||
logger.info('default services for node type \'%s\' set to: %s' % (t, s))
|
36
daemon/core/xml/xmlsession.py
Normal file
36
daemon/core/xml/xmlsession.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
"""
|
||||
Helpers for loading and saving XML files. savesessionxml(session, filename) is
|
||||
the main public interface here.
|
||||
"""
|
||||
|
||||
import os.path
|
||||
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodeutils
|
||||
from core.xml.xmlparser import core_document_parser
|
||||
from core.xml.xmlwriter import core_document_writer
|
||||
|
||||
|
||||
def open_session_xml(session, filename, start=False, nodecls=None):
|
||||
"""
|
||||
Import a session from the EmulationScript XML format.
|
||||
"""
|
||||
|
||||
# set default node class when one is not provided
|
||||
if not nodecls:
|
||||
nodecls = nodeutils.get_node_class(NodeTypes.DEFAULT)
|
||||
|
||||
options = {'start': start, 'nodecls': nodecls}
|
||||
doc = core_document_parser(session, filename, options)
|
||||
if start:
|
||||
session.name = os.path.basename(filename)
|
||||
session.filename = filename
|
||||
session.instantiate()
|
||||
|
||||
|
||||
def save_session_xml(session, filename, version):
|
||||
"""
|
||||
Export a session to the EmulationScript XML format.
|
||||
"""
|
||||
doc = core_document_writer(session, version)
|
||||
doc.writexml(filename)
|
|
@ -1,16 +1,14 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
|
||||
from core.netns import nodes
|
||||
from xml.dom.minidom import Node
|
||||
|
||||
def addelementsfromlist(dom, parent, iterable, name, attr_name):
|
||||
''' XML helper to iterate through a list and add items to parent using tags
|
||||
from core.misc import log
|
||||
from core.netns import nodes
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
def add_elements_from_list(dom, parent, iterable, name, attr_name):
|
||||
"""
|
||||
XML helper to iterate through a list and add items to parent using tags
|
||||
of the given name and the item value as an attribute named attr_name.
|
||||
Example: addelementsfromlist(dom, parent, ('a','b','c'), "letter", "value")
|
||||
<parent>
|
||||
|
@ -18,14 +16,16 @@ def addelementsfromlist(dom, parent, iterable, name, attr_name):
|
|||
<letter value="b"/>
|
||||
<letter value="c"/>
|
||||
</parent>
|
||||
'''
|
||||
"""
|
||||
for item in iterable:
|
||||
element = dom.createElement(name)
|
||||
element.setAttribute(attr_name, item)
|
||||
parent.appendChild(element)
|
||||
|
||||
def addtextelementsfromlist(dom, parent, iterable, name, attrs):
|
||||
''' XML helper to iterate through a list and add items to parent using tags
|
||||
|
||||
def add_text_elements_from_list(dom, parent, iterable, name, attrs):
|
||||
"""
|
||||
XML helper to iterate through a list and add items to parent using tags
|
||||
of the given name, attributes specified in the attrs tuple, and having the
|
||||
text of the item within the tags.
|
||||
Example: addtextelementsfromlist(dom, parent, ('a','b','c'), "letter",
|
||||
|
@ -35,17 +35,19 @@ def addtextelementsfromlist(dom, parent, iterable, name, attrs):
|
|||
<letter show="True">b</letter>
|
||||
<letter show="True">c</letter>
|
||||
</parent>
|
||||
'''
|
||||
"""
|
||||
for item in iterable:
|
||||
element = dom.createElement(name)
|
||||
for k,v in attrs:
|
||||
for k, v in attrs:
|
||||
element.setAttribute(k, v)
|
||||
parent.appendChild(element)
|
||||
txt = dom.createTextNode(item)
|
||||
element.appendChild(txt)
|
||||
|
||||
def addtextelementsfromtuples(dom, parent, iterable, attrs=()):
|
||||
''' XML helper to iterate through a list of tuples and add items to
|
||||
|
||||
def add_text_elements_from_tuples(dom, parent, iterable, attrs=()):
|
||||
"""
|
||||
XML helper to iterate through a list of tuples and add items to
|
||||
parent using tags named for the first tuple element,
|
||||
attributes specified in the attrs tuple, and having the
|
||||
text of second tuple element.
|
||||
|
@ -57,37 +59,41 @@ def addtextelementsfromtuples(dom, parent, iterable, attrs=()):
|
|||
<second show="True">b</second>
|
||||
<third show="True">c</third>
|
||||
</parent>
|
||||
'''
|
||||
"""
|
||||
for name, value in iterable:
|
||||
element = dom.createElement(name)
|
||||
for k,v in attrs:
|
||||
for k, v in attrs:
|
||||
element.setAttribute(k, v)
|
||||
parent.appendChild(element)
|
||||
txt = dom.createTextNode(value)
|
||||
element.appendChild(txt)
|
||||
|
||||
def gettextelementstolist(parent):
|
||||
''' XML helper to parse child text nodes from the given parent and return
|
||||
|
||||
def get_text_elements_to_list(parent):
|
||||
"""
|
||||
XML helper to parse child text nodes from the given parent and return
|
||||
a list of (key, value) tuples.
|
||||
'''
|
||||
"""
|
||||
r = []
|
||||
for n in parent.childNodes:
|
||||
if n.nodeType != Node.ELEMENT_NODE:
|
||||
continue
|
||||
k = str(n.nodeName)
|
||||
v = '' # sometimes want None here?
|
||||
v = '' # sometimes want None here?
|
||||
for c in n.childNodes:
|
||||
if c.nodeType != Node.TEXT_NODE:
|
||||
continue
|
||||
v = str(c.nodeValue)
|
||||
break
|
||||
r.append((k,v))
|
||||
r.append((k, v))
|
||||
return r
|
||||
|
||||
def addparamtoparent(dom, parent, name, value):
|
||||
''' XML helper to add a <param name="name" value="value"/> tag to the parent
|
||||
|
||||
def add_param_to_parent(dom, parent, name, value):
|
||||
"""
|
||||
XML helper to add a <param name="name" value="value"/> tag to the parent
|
||||
element, when value is not None.
|
||||
'''
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
p = dom.createElement("param")
|
||||
|
@ -96,10 +102,12 @@ def addparamtoparent(dom, parent, name, value):
|
|||
p.setAttribute("value", "%s" % value)
|
||||
return p
|
||||
|
||||
def addtextparamtoparent(dom, parent, name, value):
|
||||
''' XML helper to add a <param name="name">value</param> tag to the parent
|
||||
|
||||
def add_text_param_to_parent(dom, parent, name, value):
|
||||
"""
|
||||
XML helper to add a <param name="name">value</param> tag to the parent
|
||||
element, when value is not None.
|
||||
'''
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
p = dom.createElement("param")
|
||||
|
@ -109,14 +117,16 @@ def addtextparamtoparent(dom, parent, name, value):
|
|||
p.appendChild(txt)
|
||||
return p
|
||||
|
||||
def addparamlisttoparent(dom, parent, name, values):
|
||||
''' XML helper to return a parameter list and optionally add it to the
|
||||
|
||||
def add_param_list_to_parent(dom, parent, name, values):
|
||||
"""
|
||||
XML helper to return a parameter list and optionally add it to the
|
||||
parent element:
|
||||
<paramlist name="name">
|
||||
<item value="123">
|
||||
<item value="456">
|
||||
</paramlist>
|
||||
'''
|
||||
"""
|
||||
if values is None:
|
||||
return None
|
||||
p = dom.createElement("paramlist")
|
||||
|
@ -129,18 +139,20 @@ def addparamlisttoparent(dom, parent, name, values):
|
|||
p.appendChild(item)
|
||||
return p
|
||||
|
||||
def getoneelement(dom, name):
|
||||
|
||||
def get_one_element(dom, name):
|
||||
e = dom.getElementsByTagName(name)
|
||||
if len(e) == 0:
|
||||
return None
|
||||
return e[0]
|
||||
|
||||
def iterDescendants(dom, max_depth = 0):
|
||||
'''\
|
||||
|
||||
def iter_descendants(dom, max_depth=0):
|
||||
"""
|
||||
Iterate over all descendant element nodes in breadth first order.
|
||||
Only consider nodes up to max_depth deep when max_depth is greater
|
||||
than zero.
|
||||
'''
|
||||
"""
|
||||
nodes = [dom]
|
||||
depth = 0
|
||||
current_depth_nodes = 1
|
||||
|
@ -160,144 +172,167 @@ def iterDescendants(dom, max_depth = 0):
|
|||
current_depth_nodes = next_depth_nodes
|
||||
next_depth_nodes = 0
|
||||
|
||||
def iterMatchingDescendants(dom, matchFunction, max_depth = 0):
|
||||
'''\
|
||||
|
||||
def iter_matching_descendants(dom, match_function, max_depth=0):
|
||||
"""
|
||||
Iterate over descendant elements where matchFunction(descendant)
|
||||
returns true. Only consider nodes up to max_depth deep when
|
||||
max_depth is greater than zero.
|
||||
'''
|
||||
for d in iterDescendants(dom, max_depth):
|
||||
if matchFunction(d):
|
||||
"""
|
||||
for d in iter_descendants(dom, max_depth):
|
||||
if match_function(d):
|
||||
yield d
|
||||
|
||||
def iterDescendantsWithName(dom, tagName, max_depth = 0):
|
||||
'''\
|
||||
|
||||
def iter_descendants_with_name(dom, tag_name, max_depth=0):
|
||||
"""
|
||||
Iterate over descendant elements whose name is contained in
|
||||
tagName (or is named tagName if tagName is a string). Only
|
||||
consider nodes up to max_depth deep when max_depth is greater than
|
||||
zero.
|
||||
'''
|
||||
if isinstance(tagName, basestring):
|
||||
tagName = (tagName,)
|
||||
def match(d):
|
||||
return d.tagName in tagName
|
||||
return iterMatchingDescendants(dom, match, max_depth)
|
||||
"""
|
||||
if isinstance(tag_name, basestring):
|
||||
tag_name = (tag_name,)
|
||||
|
||||
def iterDescendantsWithAttribute(dom, tagName, attrName, attrValue,
|
||||
max_depth = 0):
|
||||
'''\
|
||||
def match(d):
|
||||
return d.tagName in tag_name
|
||||
|
||||
return iter_matching_descendants(dom, match, max_depth)
|
||||
|
||||
|
||||
def iter_descendants_with_attribute(dom, tag_name, attr_name, attr_value, max_depth=0):
|
||||
"""
|
||||
Iterate over descendant elements whose name is contained in
|
||||
tagName (or is named tagName if tagName is a string) and have an
|
||||
attribute named attrName with value attrValue. Only consider
|
||||
nodes up to max_depth deep when max_depth is greater than zero.
|
||||
'''
|
||||
if isinstance(tagName, basestring):
|
||||
tagName = (tagName,)
|
||||
def match(d):
|
||||
return d.tagName in tagName and \
|
||||
d.getAttribute(attrName) == attrValue
|
||||
return iterMatchingDescendants(dom, match, max_depth)
|
||||
"""
|
||||
if isinstance(tag_name, basestring):
|
||||
tag_name = (tag_name,)
|
||||
|
||||
def iterChildren(dom, nodeType):
|
||||
'''\
|
||||
def match(d):
|
||||
return d.tagName in tag_name and \
|
||||
d.getAttribute(attr_name) == attr_value
|
||||
|
||||
return iter_matching_descendants(dom, match, max_depth)
|
||||
|
||||
|
||||
def iter_children(dom, node_type):
|
||||
"""
|
||||
Iterate over all child elements of the given type.
|
||||
'''
|
||||
"""
|
||||
for child in dom.childNodes:
|
||||
if child.nodeType == nodeType:
|
||||
if child.nodeType == node_type:
|
||||
yield child
|
||||
|
||||
def gettextchild(dom):
|
||||
'''\
|
||||
|
||||
def get_text_child(dom):
|
||||
"""
|
||||
Return the text node of the given element.
|
||||
'''
|
||||
for child in iterChildren(dom, Node.TEXT_NODE):
|
||||
"""
|
||||
for child in iter_children(dom, Node.TEXT_NODE):
|
||||
return str(child.nodeValue)
|
||||
return None
|
||||
|
||||
def getChildTextTrim(dom):
|
||||
text = gettextchild(dom)
|
||||
|
||||
def get_child_text_trim(dom):
|
||||
text = get_text_child(dom)
|
||||
if text:
|
||||
text = text.strip()
|
||||
return text
|
||||
|
||||
def getparamssetattrs(dom, param_names, target):
|
||||
''' XML helper to get <param name="name" value="value"/> tags and set
|
||||
|
||||
def get_params_set_attrs(dom, param_names, target):
|
||||
"""
|
||||
XML helper to get <param name="name" value="value"/> tags and set
|
||||
the attribute in the target object. String type is used. Target object
|
||||
attribute is unchanged if the XML attribute is not present.
|
||||
'''
|
||||
"""
|
||||
params = dom.getElementsByTagName("param")
|
||||
for param in params:
|
||||
param_name = param.getAttribute("name")
|
||||
value = param.getAttribute("value")
|
||||
if value is None:
|
||||
continue # never reached?
|
||||
continue # never reached?
|
||||
if param_name in param_names:
|
||||
setattr(target, param_name, str(value))
|
||||
|
||||
def xmltypetonodeclass(session, type):
|
||||
''' Helper to convert from a type string to a class name in nodes.*.
|
||||
'''
|
||||
|
||||
def xml_type_to_node_class(session, type):
|
||||
"""
|
||||
Helper to convert from a type string to a class name in nodes.*.
|
||||
"""
|
||||
if hasattr(nodes, type):
|
||||
# TODO: remove and use a mapping to known nodes
|
||||
logger.error("using eval to retrieve node type: %s", type)
|
||||
return eval("nodes.%s" % type)
|
||||
else:
|
||||
return None
|
||||
|
||||
def iterChildrenWithName(dom, tagName):
|
||||
return iterDescendantsWithName(dom, tagName, 1)
|
||||
|
||||
def iterChildrenWithAttribute(dom, tagName, attrName, attrValue):
|
||||
return iterDescendantsWithAttribute(dom, tagName, attrName, attrValue, 1)
|
||||
def iter_children_with_name(dom, tag_name):
|
||||
return iter_descendants_with_name(dom, tag_name, 1)
|
||||
|
||||
def getFirstChildByTagName(dom, tagName):
|
||||
'''\
|
||||
|
||||
def iter_children_with_attribute(dom, tag_name, attr_name, attr_value):
|
||||
return iter_descendants_with_attribute(dom, tag_name, attr_name, attr_value, 1)
|
||||
|
||||
|
||||
def get_first_child_by_tag_name(dom, tag_name):
|
||||
"""
|
||||
Return the first child element whose name is contained in tagName
|
||||
(or is named tagName if tagName is a string).
|
||||
'''
|
||||
for child in iterChildrenWithName(dom, tagName):
|
||||
"""
|
||||
for child in iter_children_with_name(dom, tag_name):
|
||||
return child
|
||||
return None
|
||||
|
||||
def getFirstChildTextByTagName(dom, tagName):
|
||||
'''\
|
||||
|
||||
def get_first_child_text_by_tag_name(dom, tag_name):
|
||||
"""
|
||||
Return the corresponding text of the first child element whose
|
||||
name is contained in tagName (or is named tagName if tagName is a
|
||||
string).
|
||||
'''
|
||||
child = getFirstChildByTagName(dom, tagName)
|
||||
"""
|
||||
child = get_first_child_by_tag_name(dom, tag_name)
|
||||
if child:
|
||||
return gettextchild(child)
|
||||
return get_text_child(child)
|
||||
return None
|
||||
|
||||
def getFirstChildTextTrimByTagName(dom, tagName):
|
||||
text = getFirstChildTextByTagName(dom, tagName)
|
||||
|
||||
def get_first_child_text_trim_by_tag_name(dom, tag_name):
|
||||
text = get_first_child_text_by_tag_name(dom, tag_name)
|
||||
if text:
|
||||
text = text.strip()
|
||||
return text
|
||||
|
||||
def getFirstChildWithAttribute(dom, tagName, attrName, attrValue):
|
||||
'''\
|
||||
|
||||
def get_first_child_with_attribute(dom, tag_name, attr_name, attr_value):
|
||||
"""
|
||||
Return the first child element whose name is contained in tagName
|
||||
(or is named tagName if tagName is a string) that has an attribute
|
||||
named attrName with value attrValue.
|
||||
'''
|
||||
"""
|
||||
for child in \
|
||||
iterChildrenWithAttribute(dom, tagName, attrName, attrValue):
|
||||
iter_children_with_attribute(dom, tag_name, attr_name, attr_value):
|
||||
return child
|
||||
return None
|
||||
|
||||
def getFirstChildTextWithAttribute(dom, tagName, attrName, attrValue):
|
||||
'''\
|
||||
|
||||
def get_first_child_text_with_attribute(dom, tag_name, attr_name, attr_value):
|
||||
"""
|
||||
Return the corresponding text of the first child element whose
|
||||
name is contained in tagName (or is named tagName if tagName is a
|
||||
string) that has an attribute named attrName with value attrValue.
|
||||
'''
|
||||
child = getFirstChildWithAttribute(dom, tagName, attrName, attrValue)
|
||||
"""
|
||||
child = get_first_child_with_attribute(dom, tag_name, attr_name, attr_value)
|
||||
if child:
|
||||
return gettextchild(child)
|
||||
return get_text_child(child)
|
||||
return None
|
||||
|
||||
def getFirstChildTextTrimWithAttribute(dom, tagName, attrName, attrValue):
|
||||
text = getFirstChildTextWithAttribute(dom, tagName, attrName, attrValue)
|
||||
|
||||
def get_first_child_text_trim_with_attribute(dom, tag_name, attr_name, attr_value):
|
||||
text = get_first_child_text_with_attribute(dom, tag_name, attr_name, attr_value)
|
||||
if text:
|
||||
text = text.strip()
|
||||
return text
|
12
daemon/core/xml/xmlwriter.py
Normal file
12
daemon/core/xml/xmlwriter.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
from core.xml.xmlwriter0 import CoreDocumentWriter0
|
||||
from core.xml.xmlwriter1 import CoreDocumentWriter1
|
||||
|
||||
|
||||
def core_document_writer(session, version):
|
||||
if version == '0.0':
|
||||
doc = CoreDocumentWriter0(session)
|
||||
elif version == '1.0':
|
||||
doc = CoreDocumentWriter1(session)
|
||||
else:
|
||||
raise ValueError('unsupported document version: %s' % version)
|
||||
return doc
|
|
@ -1,26 +1,28 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
|
||||
import os
|
||||
import pwd
|
||||
from core.netns import nodes
|
||||
from core.api import coreapi
|
||||
from xml.dom.minidom import Document
|
||||
from xmlutils import *
|
||||
|
||||
import pwd
|
||||
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import log
|
||||
from core.xml import xmlutils
|
||||
|
||||
logger = log.get_logger(__name__)
|
||||
|
||||
|
||||
class CoreDocumentWriter0(Document):
|
||||
''' Utility class for writing a CoreSession to XML. The init method builds
|
||||
"""
|
||||
Utility class for writing a CoreSession to XML. The init method builds
|
||||
an xml.dom.minidom.Document, and the writexml() method saves the XML file.
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self, session):
|
||||
''' Create an empty Scenario XML Document, then populate it with
|
||||
"""
|
||||
Create an empty Scenario XML Document, then populate it with
|
||||
objects from the given session.
|
||||
'''
|
||||
"""
|
||||
Document.__init__(self)
|
||||
self.session = session
|
||||
self.scenario = self.createElement("Scenario")
|
||||
|
@ -34,40 +36,41 @@ class CoreDocumentWriter0(Document):
|
|||
self.scenario.appendChild(self.mp)
|
||||
self.scenario.appendChild(self.sp)
|
||||
self.scenario.appendChild(self.meta)
|
||||
|
||||
|
||||
self.populatefromsession()
|
||||
|
||||
def populatefromsession(self):
|
||||
self.session.emane.setup() # not during runtime?
|
||||
self.session.emane.setup() # not during runtime?
|
||||
self.addorigin()
|
||||
self.adddefaultservices()
|
||||
self.addnets()
|
||||
self.addnodes()
|
||||
self.addmetadata()
|
||||
|
||||
|
||||
def writexml(self, filename):
|
||||
self.session.info("saving session XML file %s" % filename)
|
||||
logger.info("saving session XML file %s", filename)
|
||||
f = open(filename, "w")
|
||||
Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \
|
||||
encoding="UTF-8")
|
||||
Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", encoding="UTF-8")
|
||||
f.close()
|
||||
if self.session.user is not None:
|
||||
uid = pwd.getpwnam(self.session.user).pw_uid
|
||||
gid = os.stat(self.session.sessiondir).st_gid
|
||||
os.chown(filename, uid, gid)
|
||||
|
||||
|
||||
def addnets(self):
|
||||
''' Add PyCoreNet objects as NetworkDefinition XML elements.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for net in self.session.objs():
|
||||
if not isinstance(net, nodes.PyCoreNet):
|
||||
"""
|
||||
Add PyCoreNet objects as NetworkDefinition XML elements.
|
||||
"""
|
||||
with self.session._objects_lock:
|
||||
for net in self.session.objects.itervalues():
|
||||
if not isinstance(net, PyCoreNet):
|
||||
continue
|
||||
self.addnet(net)
|
||||
|
||||
def addnet(self, net):
|
||||
''' Add one PyCoreNet object as a NetworkDefinition XML element.
|
||||
'''
|
||||
"""
|
||||
Add one PyCoreNet object as a NetworkDefinition XML element.
|
||||
"""
|
||||
n = self.createElement("NetworkDefinition")
|
||||
self.np.appendChild(n)
|
||||
n.setAttribute("name", net.name)
|
||||
|
@ -80,18 +83,19 @@ class CoreDocumentWriter0(Document):
|
|||
n.setAttribute("key", "%s" % net.grekey)
|
||||
# link parameters
|
||||
for netif in net.netifs(sort=True):
|
||||
self.addnetem(n, netif)
|
||||
# wireless/mobility models
|
||||
self.addnetem(n, netif)
|
||||
# wireless/mobility models
|
||||
modelconfigs = net.session.mobility.getmodels(net)
|
||||
modelconfigs += net.session.emane.getmodels(net)
|
||||
self.addmodels(n, modelconfigs)
|
||||
self.addposition(net)
|
||||
|
||||
|
||||
def addnetem(self, n, netif):
|
||||
''' Similar to addmodels(); used for writing netem link effects
|
||||
"""
|
||||
Similar to addmodels(); used for writing netem link effects
|
||||
parameters. TODO: Interface parameters should be moved to the model
|
||||
construct, then this separate method shouldn't be required.
|
||||
'''
|
||||
"""
|
||||
params = netif.getparams()
|
||||
if len(params) == 0:
|
||||
return
|
||||
|
@ -126,16 +130,17 @@ class CoreDocumentWriter0(Document):
|
|||
has_params = True
|
||||
if has_params:
|
||||
n.appendChild(model)
|
||||
|
||||
|
||||
def addmodels(self, n, configs):
|
||||
''' Add models from a list of model-class, config values tuples.
|
||||
'''
|
||||
for (m, conf) in configs:
|
||||
"""
|
||||
Add models from a list of model-class, config values tuples.
|
||||
"""
|
||||
for m, conf in configs:
|
||||
model = self.createElement("model")
|
||||
n.appendChild(model)
|
||||
model.setAttribute("name", m._name)
|
||||
type = "wireless"
|
||||
if m._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
if m._type == RegisterTlvs.MOBILITY.value:
|
||||
type = "mobility"
|
||||
model.setAttribute("type", type)
|
||||
for i, k in enumerate(m.getnames()):
|
||||
|
@ -147,17 +152,19 @@ class CoreDocumentWriter0(Document):
|
|||
model.appendChild(key)
|
||||
|
||||
def addnodes(self):
|
||||
''' Add PyCoreNode objects as node XML elements.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for node in self.session.objs():
|
||||
if not isinstance(node, nodes.PyCoreNode):
|
||||
"""
|
||||
Add PyCoreNode objects as node XML elements.
|
||||
"""
|
||||
with self.session._objects_lock:
|
||||
for node in self.session.objects.itervalues():
|
||||
if not isinstance(node, PyCoreNode):
|
||||
continue
|
||||
self.addnode(node)
|
||||
|
||||
def addnode(self, node):
|
||||
''' Add a PyCoreNode object as node XML elements.
|
||||
'''
|
||||
"""
|
||||
Add a PyCoreNode object as node XML elements.
|
||||
"""
|
||||
n = self.createElement("Node")
|
||||
self.np.appendChild(n)
|
||||
n.setAttribute("name", node.name)
|
||||
|
@ -166,13 +173,14 @@ class CoreDocumentWriter0(Document):
|
|||
n.setAttribute("type", node.type)
|
||||
self.addinterfaces(n, node)
|
||||
self.addposition(node)
|
||||
addparamtoparent(self, n, "icon", node.icon)
|
||||
addparamtoparent(self, n, "canvas", node.canvas)
|
||||
xmlutils.add_param_to_parent(self, n, "icon", node.icon)
|
||||
xmlutils.add_param_to_parent(self, n, "canvas", node.canvas)
|
||||
self.addservices(node)
|
||||
|
||||
|
||||
def addinterfaces(self, n, node):
|
||||
''' Add PyCoreNetIfs to node XML elements.
|
||||
'''
|
||||
"""
|
||||
Add PyCoreNetIfs to node XML elements.
|
||||
"""
|
||||
for ifc in node.netifs(sort=True):
|
||||
i = self.createElement("interface")
|
||||
n.appendChild(i)
|
||||
|
@ -191,14 +199,14 @@ class CoreDocumentWriter0(Document):
|
|||
cfg = self.session.emane.getifcconfig(node.objid, netmodel._name,
|
||||
None, ifc)
|
||||
if cfg:
|
||||
self.addmodels(i, ((netmodel, cfg),) )
|
||||
|
||||
self.addmodels(i, ((netmodel, cfg),))
|
||||
|
||||
def addnetinterfaces(self, n, net):
|
||||
''' Similar to addinterfaces(), but only adds interface elements to the
|
||||
"""
|
||||
Similar to addinterfaces(), but only adds interface elements to the
|
||||
supplied XML node that would not otherwise appear in the Node elements.
|
||||
These are any interfaces that link two switches/hubs together.
|
||||
'''
|
||||
"""
|
||||
for ifc in net.netifs(sort=True):
|
||||
if not hasattr(ifc, "othernet") or not ifc.othernet:
|
||||
continue
|
||||
|
@ -212,37 +220,39 @@ class CoreDocumentWriter0(Document):
|
|||
i.setAttribute("net", ifc.net.name)
|
||||
|
||||
def addposition(self, node):
|
||||
''' Add object coordinates as location XML element.
|
||||
'''
|
||||
(x,y,z) = node.position.get()
|
||||
"""
|
||||
Add object coordinates as location XML element.
|
||||
"""
|
||||
(x, y, z) = node.position.get()
|
||||
if x is None or y is None:
|
||||
return
|
||||
# <Node name="n1">
|
||||
mpn = self.createElement("Node")
|
||||
mpn.setAttribute("name", node.name)
|
||||
self.mp.appendChild(mpn)
|
||||
|
||||
|
||||
# <motion type="stationary">
|
||||
motion = self.createElement("motion")
|
||||
motion.setAttribute("type", "stationary")
|
||||
mpn.appendChild(motion)
|
||||
|
||||
|
||||
# <point>$X$,$Y$,$Z$</point>
|
||||
pt = self.createElement("point")
|
||||
motion.appendChild(pt)
|
||||
coordstxt = "%s,%s" % (x,y)
|
||||
coordstxt = "%s,%s" % (x, y)
|
||||
if z:
|
||||
coordstxt += ",%s" % z
|
||||
coords = self.createTextNode(coordstxt)
|
||||
pt.appendChild(coords)
|
||||
|
||||
def addorigin(self):
|
||||
''' Add origin to Motion Plan using canvas reference point.
|
||||
The CoreLocation class maintains this reference point.
|
||||
'''
|
||||
"""
|
||||
Add origin to Motion Plan using canvas reference point.
|
||||
The CoreLocation class maintains this reference point.
|
||||
"""
|
||||
refgeo = self.session.location.refgeo
|
||||
origin = self.createElement("origin")
|
||||
attrs = ("lat","lon","alt")
|
||||
attrs = ("lat", "lon", "alt")
|
||||
have_origin = False
|
||||
for i in xrange(3):
|
||||
if refgeo[i] is not None:
|
||||
|
@ -250,23 +260,24 @@ class CoreDocumentWriter0(Document):
|
|||
have_origin = True
|
||||
if not have_origin:
|
||||
return
|
||||
if self.session.location.refscale != 1.0: # 100 pixels = refscale m
|
||||
if self.session.location.refscale != 1.0: # 100 pixels = refscale m
|
||||
origin.setAttribute("scale100", str(self.session.location.refscale))
|
||||
if self.session.location.refxyz != (0.0, 0.0, 0.0):
|
||||
pt = self.createElement("point")
|
||||
origin.appendChild(pt)
|
||||
x,y,z = self.session.location.refxyz
|
||||
coordstxt = "%s,%s" % (x,y)
|
||||
x, y, z = self.session.location.refxyz
|
||||
coordstxt = "%s,%s" % (x, y)
|
||||
if z:
|
||||
coordstxt += ",%s" % z
|
||||
coords = self.createTextNode(coordstxt)
|
||||
pt.appendChild(coords)
|
||||
|
||||
self.mp.appendChild(origin)
|
||||
|
||||
|
||||
def adddefaultservices(self):
|
||||
''' Add default services and node types to the ServicePlan.
|
||||
'''
|
||||
"""
|
||||
Add default services and node types to the ServicePlan.
|
||||
"""
|
||||
for type in self.session.services.defaultservices:
|
||||
defaults = self.session.services.getdefaultservices(type)
|
||||
spn = self.createElement("Node")
|
||||
|
@ -276,10 +287,11 @@ class CoreDocumentWriter0(Document):
|
|||
s = self.createElement("Service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
|
||||
|
||||
def addservices(self, node):
|
||||
''' Add services and their customizations to the ServicePlan.
|
||||
'''
|
||||
"""
|
||||
Add services and their customizations to the ServicePlan.
|
||||
"""
|
||||
if len(node.services) == 0:
|
||||
return
|
||||
defaults = self.session.services.getdefaultservices(node.type)
|
||||
|
@ -300,8 +312,8 @@ class CoreDocumentWriter0(Document):
|
|||
if not svc._custom:
|
||||
continue
|
||||
s.setAttribute("custom", str(svc._custom))
|
||||
addelementsfromlist(self, s, svc._dirs, "Directory", "name")
|
||||
|
||||
xmlutils.add_elements_from_list(self, s, svc._dirs, "Directory", "name")
|
||||
|
||||
for fn in svc._configs:
|
||||
if len(fn) == 0:
|
||||
continue
|
||||
|
@ -316,17 +328,15 @@ class CoreDocumentWriter0(Document):
|
|||
continue
|
||||
txt = self.createTextNode(data)
|
||||
f.appendChild(txt)
|
||||
|
||||
addtextelementsfromlist(self, s, svc._startup, "Command",
|
||||
(("type","start"),))
|
||||
addtextelementsfromlist(self, s, svc._shutdown, "Command",
|
||||
(("type","stop"),))
|
||||
addtextelementsfromlist(self, s, svc._validate, "Command",
|
||||
(("type","validate"),))
|
||||
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._startup, "Command", (("type", "start"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._shutdown, "Command", (("type", "stop"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._validate, "Command", (("type", "validate"),))
|
||||
|
||||
def addaddresses(self, i, netif):
|
||||
''' Add MAC and IP addresses to interface XML elements.
|
||||
'''
|
||||
"""
|
||||
Add MAC and IP addresses to interface XML elements.
|
||||
"""
|
||||
if netif.hwaddr:
|
||||
h = self.createElement("address")
|
||||
i.appendChild(h)
|
||||
|
@ -339,13 +349,14 @@ class CoreDocumentWriter0(Document):
|
|||
# a.setAttribute("type", )
|
||||
atxt = self.createTextNode("%s" % addr)
|
||||
a.appendChild(atxt)
|
||||
|
||||
|
||||
def addhooks(self):
|
||||
''' Add hook script XML elements to the metadata tag.
|
||||
'''
|
||||
"""
|
||||
Add hook script XML elements to the metadata tag.
|
||||
"""
|
||||
hooks = self.createElement("Hooks")
|
||||
for state in sorted(self.session._hooks.keys()):
|
||||
for (filename, data) in self.session._hooks[state]:
|
||||
for filename, data in self.session._hooks[state]:
|
||||
hook = self.createElement("Hook")
|
||||
hook.setAttribute("name", filename)
|
||||
hook.setAttribute("state", str(state))
|
||||
|
@ -354,17 +365,18 @@ class CoreDocumentWriter0(Document):
|
|||
hooks.appendChild(hook)
|
||||
if hooks.hasChildNodes():
|
||||
self.meta.appendChild(hooks)
|
||||
|
||||
|
||||
def addmetadata(self):
|
||||
''' Add CORE-specific session meta-data XML elements.
|
||||
'''
|
||||
"""
|
||||
Add CORE-specific session meta-data XML elements.
|
||||
"""
|
||||
# options
|
||||
options = self.createElement("SessionOptions")
|
||||
defaults = self.session.options.getdefaultvalues()
|
||||
for i, (k, v) in enumerate(self.session.options.getkeyvaluelist()):
|
||||
if str(v) != str(defaults[i]):
|
||||
addtextparamtoparent(self, options, k, v)
|
||||
#addparamtoparent(self, options, k, v)
|
||||
xmlutils.add_text_param_to_parent(self, options, k, v)
|
||||
# addparamtoparent(self, options, k, v)
|
||||
if options.hasChildNodes():
|
||||
self.meta.appendChild(options)
|
||||
# hook scripts
|
||||
|
@ -372,6 +384,6 @@ class CoreDocumentWriter0(Document):
|
|||
# meta
|
||||
meta = self.createElement("MetaData")
|
||||
self.meta.appendChild(meta)
|
||||
for (k, v) in self.session.metadata.items():
|
||||
addtextparamtoparent(self, meta, k, v)
|
||||
#addparamtoparent(self, meta, k, v)
|
||||
for k, v in self.session.metadata.items():
|
||||
xmlutils.add_text_param_to_parent(self, meta, k, v)
|
||||
# addparamtoparent(self, meta, k, v)
|
1018
daemon/core/xml/xmlwriter1.py
Normal file
1018
daemon/core/xml/xmlwriter1.py
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue