initial import (Boeing r1752, NRL r878)
This commit is contained in:
commit
f8f46d28be
394 changed files with 99738 additions and 0 deletions
23
daemon/core/__init__.py
Normal file
23
daemon/core/__init__.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
"""core
|
||||
|
||||
Top-level Python package containing CORE components.
|
||||
|
||||
See http://cs.itd.nrl.navy.mil/work/core/ and
|
||||
http://code.google.com/p/coreemu/ for more information on CORE.
|
||||
|
||||
Pieces can be imported individually, for example
|
||||
|
||||
import core.netns.vnode
|
||||
|
||||
or everything listed in __all__ can be imported using
|
||||
|
||||
from core import *
|
||||
"""
|
||||
|
||||
__all__ = []
|
||||
|
||||
# Automatically import all add-ons listed in addons.__all__
|
||||
from addons import *
|
6
daemon/core/addons/__init__.py
Normal file
6
daemon/core/addons/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
"""Optional add-ons
|
||||
|
||||
Add on files can be put in this directory. Everything listed in
|
||||
__all__ is automatically loaded by the main core module.
|
||||
"""
|
||||
__all__ = []
|
0
daemon/core/api/__init__.py
Normal file
0
daemon/core/api/__init__.py
Normal file
630
daemon/core/api/coreapi.py
Normal file
630
daemon/core/api/coreapi.py
Normal file
|
@ -0,0 +1,630 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
coreapi.py: uses coreapi_data for Message and TLV types, and defines TLV data
|
||||
types and objects used for parsing and building CORE API messages.
|
||||
'''
|
||||
|
||||
import struct
|
||||
|
||||
from core.api.data import *
|
||||
from core.misc.ipaddr import *
|
||||
|
||||
|
||||
class CoreTlvData(object):
|
||||
datafmt = None
|
||||
datatype = None
|
||||
padlen = None
|
||||
|
||||
@classmethod
|
||||
def pack(cls, value):
|
||||
"return: (tlvlen, tlvdata)"
|
||||
tmp = struct.pack(cls.datafmt, value)
|
||||
return len(tmp) - cls.padlen, tmp
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
return struct.unpack(cls.datafmt, data)[0]
|
||||
|
||||
@classmethod
|
||||
def packstring(cls, strvalue):
|
||||
return cls.pack(cls.fromstring(strvalue))
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
return cls.datatype(s)
|
||||
|
||||
class CoreTlvDataObj(CoreTlvData):
|
||||
@classmethod
|
||||
def pack(cls, obj):
|
||||
"return: (tlvlen, tlvdata)"
|
||||
tmp = struct.pack(cls.datafmt, cls.getvalue(obj))
|
||||
return len(tmp) - cls.padlen, tmp
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
return cls.newobj(struct.unpack(cls.datafmt, data)[0])
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def newobj(obj):
|
||||
raise NotImplementedError
|
||||
|
||||
class CoreTlvDataUint16(CoreTlvData):
|
||||
datafmt = "!H"
|
||||
datatype = int
|
||||
padlen = 0
|
||||
|
||||
class CoreTlvDataUint32(CoreTlvData):
|
||||
datafmt = "!2xI"
|
||||
datatype = int
|
||||
padlen = 2
|
||||
|
||||
class CoreTlvDataUint64(CoreTlvData):
|
||||
datafmt = "!2xQ"
|
||||
datatype = long
|
||||
padlen = 2
|
||||
|
||||
class CoreTlvDataString(CoreTlvData):
|
||||
datatype = str
|
||||
|
||||
@staticmethod
|
||||
def pack(value):
|
||||
if not isinstance(value, str):
|
||||
raise ValueError, "value not a string: %s" % value
|
||||
if len(value) < 256:
|
||||
hdrsiz = CoreTlv.hdrsiz
|
||||
else:
|
||||
hdrsiz = CoreTlv.longhdrsiz
|
||||
padlen = -(hdrsiz + len(value)) % 4
|
||||
return len(value), value + '\0' * padlen
|
||||
|
||||
@staticmethod
|
||||
def unpack(data):
|
||||
return data.rstrip('\0')
|
||||
|
||||
class CoreTlvDataUint16List(CoreTlvData):
|
||||
''' List of unsigned 16-bit values.
|
||||
'''
|
||||
datatype = tuple
|
||||
|
||||
@staticmethod
|
||||
def pack(values):
|
||||
if not isinstance(values, tuple):
|
||||
raise ValueError, "value not a tuple: %s" % values
|
||||
data = ""
|
||||
for v in values:
|
||||
data += struct.pack("!H", v)
|
||||
padlen = -(CoreTlv.hdrsiz + len(data)) % 4
|
||||
return len(data), data + '\0' * padlen
|
||||
|
||||
@staticmethod
|
||||
def unpack(data):
|
||||
datafmt = "!%dH" % (len(data)/2)
|
||||
return struct.unpack(datafmt, data)
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
return tuple(map(lambda(x): int(x), s.split()))
|
||||
|
||||
class CoreTlvDataIPv4Addr(CoreTlvDataObj):
|
||||
datafmt = "!2x4s"
|
||||
datatype = IPAddr.fromstring
|
||||
padlen = 2
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
return obj.addr
|
||||
|
||||
@staticmethod
|
||||
def newobj(value):
|
||||
return IPAddr(af = AF_INET, addr = value)
|
||||
|
||||
class CoreTlvDataIPv6Addr(CoreTlvDataObj):
|
||||
datafmt = "!16s2x"
|
||||
datatype = IPAddr.fromstring
|
||||
padlen = 2
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
return obj.addr
|
||||
|
||||
@staticmethod
|
||||
def newobj(value):
|
||||
return IPAddr(af = AF_INET6, addr = value)
|
||||
|
||||
class CoreTlvDataMacAddr(CoreTlvDataObj):
|
||||
datafmt = "!2x8s"
|
||||
datatype = MacAddr.fromstring
|
||||
padlen = 2
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
return obj.addr
|
||||
|
||||
@staticmethod
|
||||
def newobj(value):
|
||||
return MacAddr(addr = value[2:]) # only use 48 bits
|
||||
|
||||
class CoreTlv(object):
|
||||
hdrfmt = "!BB"
|
||||
hdrsiz = struct.calcsize(hdrfmt)
|
||||
|
||||
longhdrfmt = "!BBH"
|
||||
longhdrsiz = struct.calcsize(longhdrfmt)
|
||||
|
||||
tlvtypemap = {}
|
||||
tlvdataclsmap = {}
|
||||
|
||||
def __init__(self, tlvtype, tlvdata):
|
||||
self.tlvtype = tlvtype
|
||||
if tlvdata:
|
||||
try:
|
||||
self.value = self.tlvdataclsmap[self.tlvtype].unpack(tlvdata)
|
||||
except KeyError:
|
||||
self.value = tlvdata
|
||||
else:
|
||||
self.value = None
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
"parse data and return (tlv, remainingdata)"
|
||||
tlvtype, tlvlen = struct.unpack(cls.hdrfmt, data[:cls.hdrsiz])
|
||||
hdrsiz = cls.hdrsiz
|
||||
if tlvlen == 0:
|
||||
tlvtype, zero, tlvlen = struct.unpack(cls.longhdrfmt,
|
||||
data[:cls.longhdrsiz])
|
||||
hdrsiz = cls.longhdrsiz
|
||||
tlvsiz = hdrsiz + tlvlen
|
||||
tlvsiz += -tlvsiz % 4 # for 32-bit alignment
|
||||
return cls(tlvtype, data[hdrsiz:tlvsiz]), data[tlvsiz:]
|
||||
|
||||
@classmethod
|
||||
def pack(cls, tlvtype, value):
|
||||
try:
|
||||
tlvlen, tlvdata = cls.tlvdataclsmap[tlvtype].pack(value)
|
||||
except Exception, e:
|
||||
raise ValueError, "TLV packing error type=%s: %s" % (tlvtype, e)
|
||||
if tlvlen < 256:
|
||||
hdr = struct.pack(cls.hdrfmt, tlvtype, tlvlen)
|
||||
else:
|
||||
hdr = struct.pack(cls.longhdrfmt, tlvtype, 0, tlvlen)
|
||||
return hdr + tlvdata
|
||||
|
||||
@classmethod
|
||||
def packstring(cls, tlvtype, value):
|
||||
return cls.pack(tlvtype, cls.tlvdataclsmap[tlvtype].fromstring(value))
|
||||
|
||||
def typestr(self):
|
||||
try:
|
||||
return self.tlvtypemap[self.tlvtype]
|
||||
except KeyError:
|
||||
return "unknown tlv type: %s" % str(self.tlvtype)
|
||||
|
||||
def __str__(self):
|
||||
return "%s <tlvtype = %s, value = %s>" % \
|
||||
(self.__class__.__name__, self.typestr(), self.value)
|
||||
|
||||
class CoreNodeTlv(CoreTlv):
|
||||
tlvtypemap = node_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_NODE_NUMBER: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_TYPE: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_NAME: CoreTlvDataString,
|
||||
CORE_TLV_NODE_IPADDR: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_NODE_MACADDR: CoreTlvDataMacAddr,
|
||||
CORE_TLV_NODE_IP6ADDR: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_NODE_MODEL: CoreTlvDataString,
|
||||
CORE_TLV_NODE_EMUSRV: CoreTlvDataString,
|
||||
CORE_TLV_NODE_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_NODE_XPOS: CoreTlvDataUint16,
|
||||
CORE_TLV_NODE_YPOS: CoreTlvDataUint16,
|
||||
CORE_TLV_NODE_CANVAS: CoreTlvDataUint16,
|
||||
CORE_TLV_NODE_EMUID: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_NETID: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_SERVICES: CoreTlvDataString,
|
||||
CORE_TLV_NODE_LAT: CoreTlvDataString,
|
||||
CORE_TLV_NODE_LONG: CoreTlvDataString,
|
||||
CORE_TLV_NODE_ALT: CoreTlvDataString,
|
||||
CORE_TLV_NODE_ICON: CoreTlvDataString,
|
||||
CORE_TLV_NODE_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreLinkTlv(CoreTlv):
|
||||
tlvtypemap = link_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_LINK_N1NUMBER: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_N2NUMBER: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_DELAY: CoreTlvDataUint64,
|
||||
CORE_TLV_LINK_BW: CoreTlvDataUint64,
|
||||
CORE_TLV_LINK_PER: CoreTlvDataString,
|
||||
CORE_TLV_LINK_DUP: CoreTlvDataString,
|
||||
CORE_TLV_LINK_JITTER: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_MER: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_BURST: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_LINK_MBURST: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_TYPE: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_GUIATTR: CoreTlvDataString,
|
||||
CORE_TLV_LINK_EMUID: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_NETID: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_KEY: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_IF1NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF1IP4: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_LINK_IF1IP4MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF1MAC: CoreTlvDataMacAddr,
|
||||
CORE_TLV_LINK_IF1IP6: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_LINK_IF1IP6MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF2NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF2IP4: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_LINK_IF2IP4MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF2MAC: CoreTlvDataMacAddr,
|
||||
CORE_TLV_LINK_IF2IP6: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_LINK_IF2IP6MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreExecTlv(CoreTlv):
|
||||
tlvtypemap = exec_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_EXEC_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_NUM: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_TIME: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_CMD: CoreTlvDataString,
|
||||
CORE_TLV_EXEC_RESULT: CoreTlvDataString,
|
||||
CORE_TLV_EXEC_STATUS: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_SESSION: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreRegTlv(CoreTlv):
|
||||
tlvtypemap = reg_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_REG_WIRELESS: CoreTlvDataString,
|
||||
CORE_TLV_REG_MOBILITY: CoreTlvDataString,
|
||||
CORE_TLV_REG_UTILITY: CoreTlvDataString,
|
||||
CORE_TLV_REG_EXECSRV: CoreTlvDataString,
|
||||
CORE_TLV_REG_GUI: CoreTlvDataString,
|
||||
CORE_TLV_REG_EMULSRV: CoreTlvDataString,
|
||||
CORE_TLV_REG_SESSION: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreConfTlv(CoreTlv):
|
||||
tlvtypemap = conf_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_CONF_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_CONF_OBJ: CoreTlvDataString,
|
||||
CORE_TLV_CONF_TYPE: CoreTlvDataUint16,
|
||||
CORE_TLV_CONF_DATA_TYPES: CoreTlvDataUint16List,
|
||||
CORE_TLV_CONF_VALUES: CoreTlvDataString,
|
||||
CORE_TLV_CONF_CAPTIONS: CoreTlvDataString,
|
||||
CORE_TLV_CONF_BITMAP: CoreTlvDataString,
|
||||
CORE_TLV_CONF_POSSIBLE_VALUES: CoreTlvDataString,
|
||||
CORE_TLV_CONF_GROUPS: CoreTlvDataString,
|
||||
CORE_TLV_CONF_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_CONF_NETID: CoreTlvDataUint32,
|
||||
CORE_TLV_CONF_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreFileTlv(CoreTlv):
|
||||
tlvtypemap = file_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_FILE_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_FILE_NAME: CoreTlvDataString,
|
||||
CORE_TLV_FILE_MODE: CoreTlvDataString,
|
||||
CORE_TLV_FILE_NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_FILE_TYPE: CoreTlvDataString,
|
||||
CORE_TLV_FILE_SRCNAME: CoreTlvDataString,
|
||||
CORE_TLV_FILE_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_FILE_DATA: CoreTlvDataString,
|
||||
CORE_TLV_FILE_CMPDATA: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreIfaceTlv(CoreTlv):
|
||||
tlvtypemap = iface_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_IFACE_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_IFACE_NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_NAME: CoreTlvDataString,
|
||||
CORE_TLV_IFACE_IPADDR: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_IFACE_MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_MACADDR: CoreTlvDataMacAddr,
|
||||
CORE_TLV_IFACE_IP6ADDR: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_IFACE_IP6MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_TYPE: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_IFACE_STATE: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_EMUID: CoreTlvDataUint32,
|
||||
CORE_TLV_IFACE_NETID: CoreTlvDataUint32,
|
||||
}
|
||||
|
||||
class CoreEventTlv(CoreTlv):
|
||||
tlvtypemap = event_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_EVENT_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_EVENT_TYPE: CoreTlvDataUint32,
|
||||
CORE_TLV_EVENT_NAME: CoreTlvDataString,
|
||||
CORE_TLV_EVENT_DATA: CoreTlvDataString,
|
||||
CORE_TLV_EVENT_TIME: CoreTlvDataString,
|
||||
CORE_TLV_EVENT_SESSION: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreSessionTlv(CoreTlv):
|
||||
tlvtypemap = session_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_SESS_NUMBER: CoreTlvDataString,
|
||||
CORE_TLV_SESS_NAME: CoreTlvDataString,
|
||||
CORE_TLV_SESS_FILE: CoreTlvDataString,
|
||||
CORE_TLV_SESS_NODECOUNT: CoreTlvDataString,
|
||||
CORE_TLV_SESS_DATE: CoreTlvDataString,
|
||||
CORE_TLV_SESS_THUMB: CoreTlvDataString,
|
||||
CORE_TLV_SESS_USER: CoreTlvDataString,
|
||||
CORE_TLV_SESS_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreExceptionTlv(CoreTlv):
|
||||
tlvtypemap = exception_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_EXCP_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_EXCP_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_LEVEL: CoreTlvDataUint16,
|
||||
CORE_TLV_EXCP_SOURCE: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_DATE: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_TEXT: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
|
||||
class CoreMessage(object):
|
||||
hdrfmt = "!BBH"
|
||||
hdrsiz = struct.calcsize(hdrfmt)
|
||||
|
||||
msgtype = None
|
||||
|
||||
flagmap = {}
|
||||
|
||||
tlvcls = CoreTlv
|
||||
|
||||
def __init__(self, flags, hdr, data):
|
||||
self.rawmsg = hdr + data
|
||||
self.flags = flags
|
||||
self.tlvdata = {}
|
||||
self.parsedata(data)
|
||||
|
||||
@classmethod
|
||||
def unpackhdr(cls, data):
|
||||
"parse data and return (msgtype, msgflags, msglen)"
|
||||
msgtype, msgflags, msglen = struct.unpack(cls.hdrfmt, data[:cls.hdrsiz])
|
||||
return msgtype, msgflags, msglen
|
||||
|
||||
@classmethod
|
||||
def pack(cls, msgflags, tlvdata):
|
||||
hdr = struct.pack(cls.hdrfmt, cls.msgtype, msgflags, len(tlvdata))
|
||||
return hdr + tlvdata
|
||||
|
||||
def addtlvdata(self, k, v):
|
||||
if k in self.tlvdata:
|
||||
raise KeyError, "key already exists: %s (val=%s)" % (k, v)
|
||||
self.tlvdata[k] = v
|
||||
|
||||
def gettlv(self, tlvtype):
|
||||
if tlvtype in self.tlvdata:
|
||||
return self.tlvdata[tlvtype]
|
||||
else:
|
||||
return None
|
||||
|
||||
def parsedata(self, data):
|
||||
while data:
|
||||
tlv, data = self.tlvcls.unpack(data)
|
||||
self.addtlvdata(tlv.tlvtype, tlv.value)
|
||||
|
||||
def packtlvdata(self):
|
||||
''' Opposite of parsedata(). Return packed TLV data using
|
||||
self.tlvdata dict. Used by repack().
|
||||
'''
|
||||
tlvdata = ""
|
||||
keys = sorted(self.tlvdata.keys())
|
||||
for k in keys:
|
||||
v = self.tlvdata[k]
|
||||
tlvdata += self.tlvcls.pack(k, v)
|
||||
return tlvdata
|
||||
|
||||
def repack(self):
|
||||
''' Invoke after updating self.tlvdata[] to rebuild self.rawmsg.
|
||||
Useful for modifying a message that has been parsed, before
|
||||
sending the raw data again.
|
||||
'''
|
||||
tlvdata = self.packtlvdata()
|
||||
self.rawmsg = self.pack(self.flags, tlvdata)
|
||||
|
||||
def typestr(self):
|
||||
try:
|
||||
return message_types[self.msgtype]
|
||||
except KeyError:
|
||||
return "unknown message type: %s" % str(self.msgtype)
|
||||
|
||||
def flagstr(self):
|
||||
msgflags = []
|
||||
flag = 1L
|
||||
while True:
|
||||
if (self.flags & flag):
|
||||
try:
|
||||
msgflags.append(self.flagmap[flag])
|
||||
except KeyError:
|
||||
msgflags.append("0x%x" % flag)
|
||||
flag <<= 1
|
||||
if not (self.flags & ~(flag - 1)):
|
||||
break
|
||||
return "0x%x <%s>" % (self.flags, " | ".join(msgflags))
|
||||
|
||||
def __str__(self):
|
||||
tmp = "%s <msgtype = %s, flags = %s>" % \
|
||||
(self.__class__.__name__, self.typestr(), self.flagstr())
|
||||
for k, v in self.tlvdata.iteritems():
|
||||
if k in self.tlvcls.tlvtypemap:
|
||||
tlvtype = self.tlvcls.tlvtypemap[k]
|
||||
else:
|
||||
tlvtype = "tlv type %s" % k
|
||||
tmp += "\n %s: %s" % (tlvtype, v)
|
||||
return tmp
|
||||
|
||||
def nodenumbers(self):
|
||||
''' Return a list of node numbers included in this message.
|
||||
'''
|
||||
n = None
|
||||
n2 = None
|
||||
# not all messages have node numbers
|
||||
if self.msgtype == CORE_API_NODE_MSG:
|
||||
n = self.gettlv(CORE_TLV_NODE_NUMBER)
|
||||
elif self.msgtype == CORE_API_LINK_MSG:
|
||||
n = self.gettlv(CORE_TLV_LINK_N1NUMBER)
|
||||
n2 = self.gettlv(CORE_TLV_LINK_N2NUMBER)
|
||||
elif self.msgtype == CORE_API_EXEC_MSG:
|
||||
n = self.gettlv(CORE_TLV_EXEC_NODE)
|
||||
elif self.msgtype == CORE_API_CONF_MSG:
|
||||
n = self.gettlv(CORE_TLV_CONF_NODE)
|
||||
elif self.msgtype == CORE_API_FILE_MSG:
|
||||
n = self.gettlv(CORE_TLV_FILE_NODE)
|
||||
elif self.msgtype == CORE_API_IFACE_MSG:
|
||||
n = self.gettlv(CORE_TLV_IFACE_NODE)
|
||||
elif self.msgtype == CORE_API_EVENT_MSG:
|
||||
n = self.gettlv(CORE_TLV_EVENT_NODE)
|
||||
r = []
|
||||
if n is not None:
|
||||
r.append(n)
|
||||
if n2 is not None:
|
||||
r.append(n2)
|
||||
return r
|
||||
|
||||
def sessionnumbers(self):
|
||||
''' Return a list of session numbers included in this message.
|
||||
'''
|
||||
r = []
|
||||
if self.msgtype == CORE_API_SESS_MSG:
|
||||
s = self.gettlv(CORE_TLV_SESS_NUMBER)
|
||||
elif self.msgtype == CORE_API_EXCP_MSG:
|
||||
s = self.gettlv(CORE_TLV_EXCP_SESSION)
|
||||
else:
|
||||
# All other messages share TLV number 0xA for the session number(s).
|
||||
s = self.gettlv(CORE_TLV_NODE_SESSION)
|
||||
if s is not None:
|
||||
for sid in s.split('|'):
|
||||
r.append(int(sid))
|
||||
return r
|
||||
|
||||
|
||||
class CoreNodeMessage(CoreMessage):
|
||||
msgtype = CORE_API_NODE_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreNodeTlv
|
||||
|
||||
class CoreLinkMessage(CoreMessage):
|
||||
msgtype = CORE_API_LINK_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreLinkTlv
|
||||
|
||||
class CoreExecMessage(CoreMessage):
|
||||
msgtype = CORE_API_EXEC_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreExecTlv
|
||||
|
||||
class CoreRegMessage(CoreMessage):
|
||||
msgtype = CORE_API_REG_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreRegTlv
|
||||
|
||||
class CoreConfMessage(CoreMessage):
|
||||
msgtype = CORE_API_CONF_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreConfTlv
|
||||
|
||||
class CoreFileMessage(CoreMessage):
|
||||
msgtype = CORE_API_FILE_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreFileTlv
|
||||
|
||||
class CoreIfaceMessage(CoreMessage):
|
||||
msgtype = CORE_API_IFACE_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreIfaceTlv
|
||||
|
||||
class CoreEventMessage(CoreMessage):
|
||||
msgtype = CORE_API_EVENT_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreEventTlv
|
||||
|
||||
class CoreSessionMessage(CoreMessage):
|
||||
msgtype = CORE_API_SESS_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreSessionTlv
|
||||
|
||||
class CoreExceptionMessage(CoreMessage):
|
||||
msgtype = CORE_API_EXCP_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreExceptionTlv
|
||||
|
||||
msgclsmap = {
|
||||
CORE_API_NODE_MSG: CoreNodeMessage,
|
||||
CORE_API_LINK_MSG: CoreLinkMessage,
|
||||
CORE_API_EXEC_MSG: CoreExecMessage,
|
||||
CORE_API_REG_MSG: CoreRegMessage,
|
||||
CORE_API_CONF_MSG: CoreConfMessage,
|
||||
CORE_API_FILE_MSG: CoreFileMessage,
|
||||
CORE_API_IFACE_MSG: CoreIfaceMessage,
|
||||
CORE_API_EVENT_MSG: CoreEventMessage,
|
||||
CORE_API_SESS_MSG: CoreSessionMessage,
|
||||
CORE_API_EXCP_MSG: CoreExceptionMessage,
|
||||
}
|
||||
|
||||
def msg_class(msgtypeid):
|
||||
global msgclsmap
|
||||
return msgclsmap[msgtypeid]
|
||||
|
||||
nodeclsmap = {}
|
||||
|
||||
def add_node_class(name, nodetypeid, nodecls, change = False):
|
||||
global nodeclsmap
|
||||
if nodetypeid in nodeclsmap:
|
||||
if not change:
|
||||
raise ValueError, \
|
||||
"node class already exists for nodetypeid %s" % nodetypeid
|
||||
nodeclsmap[nodetypeid] = nodecls
|
||||
if nodetypeid not in node_types:
|
||||
node_types[nodetypeid] = name
|
||||
exec "%s = %s" % (name, nodetypeid) in globals()
|
||||
elif name != node_types[nodetypeid]:
|
||||
raise ValueError, "node type already exists for '%s'" % name
|
||||
else:
|
||||
pass
|
||||
|
||||
def change_node_class(name, nodetypeid, nodecls):
|
||||
return add_node_class(name, nodetypeid, nodecls, change = True)
|
||||
|
||||
def node_class(nodetypeid):
|
||||
global nodeclsmap
|
||||
return nodeclsmap[nodetypeid]
|
||||
|
||||
def str_to_list(s):
|
||||
''' Helper to convert pipe-delimited string ("a|b|c") into a list (a, b, c)
|
||||
'''
|
||||
if s is None:
|
||||
return None
|
||||
return s.split("|")
|
||||
|
||||
def state_name(n):
|
||||
''' Helper to convert state number into state name using event types.
|
||||
'''
|
||||
if n in event_types:
|
||||
eventname = event_types[n]
|
||||
name = eventname.split('_')[2]
|
||||
else:
|
||||
name = "unknown"
|
||||
return name
|
327
daemon/core/api/data.py
Normal file
327
daemon/core/api/data.py
Normal file
|
@ -0,0 +1,327 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
data.py: constant definitions for the CORE API, enumerating the
|
||||
different message and TLV types (these constants are also found in coreapi.h)
|
||||
'''
|
||||
|
||||
def enumdict(d):
|
||||
for k, v in d.iteritems():
|
||||
exec "%s = %s" % (v, k) in globals()
|
||||
|
||||
# Constants
|
||||
|
||||
CORE_API_VER = "1.21"
|
||||
CORE_API_PORT = 4038
|
||||
|
||||
# Message types
|
||||
|
||||
message_types = {
|
||||
0x01: "CORE_API_NODE_MSG",
|
||||
0x02: "CORE_API_LINK_MSG",
|
||||
0x03: "CORE_API_EXEC_MSG",
|
||||
0x04: "CORE_API_REG_MSG",
|
||||
0x05: "CORE_API_CONF_MSG",
|
||||
0x06: "CORE_API_FILE_MSG",
|
||||
0x07: "CORE_API_IFACE_MSG",
|
||||
0x08: "CORE_API_EVENT_MSG",
|
||||
0x09: "CORE_API_SESS_MSG",
|
||||
0x0A: "CORE_API_EXCP_MSG",
|
||||
0x0B: "CORE_API_MSG_MAX",
|
||||
}
|
||||
|
||||
enumdict(message_types)
|
||||
|
||||
# Generic Message Flags
|
||||
|
||||
message_flags = {
|
||||
0x01: "CORE_API_ADD_FLAG",
|
||||
0x02: "CORE_API_DEL_FLAG",
|
||||
0x04: "CORE_API_CRI_FLAG",
|
||||
0x08: "CORE_API_LOC_FLAG",
|
||||
0x10: "CORE_API_STR_FLAG",
|
||||
0x20: "CORE_API_TXT_FLAG",
|
||||
0x40: "CORE_API_TTY_FLAG",
|
||||
}
|
||||
|
||||
enumdict(message_flags)
|
||||
|
||||
# Node Message TLV Types
|
||||
|
||||
node_tlvs = {
|
||||
0x01: "CORE_TLV_NODE_NUMBER",
|
||||
0x02: "CORE_TLV_NODE_TYPE",
|
||||
0x03: "CORE_TLV_NODE_NAME",
|
||||
0x04: "CORE_TLV_NODE_IPADDR",
|
||||
0x05: "CORE_TLV_NODE_MACADDR",
|
||||
0x06: "CORE_TLV_NODE_IP6ADDR",
|
||||
0x07: "CORE_TLV_NODE_MODEL",
|
||||
0x08: "CORE_TLV_NODE_EMUSRV",
|
||||
0x0A: "CORE_TLV_NODE_SESSION",
|
||||
0x20: "CORE_TLV_NODE_XPOS",
|
||||
0x21: "CORE_TLV_NODE_YPOS",
|
||||
0x22: "CORE_TLV_NODE_CANVAS",
|
||||
0x23: "CORE_TLV_NODE_EMUID",
|
||||
0x24: "CORE_TLV_NODE_NETID",
|
||||
0x25: "CORE_TLV_NODE_SERVICES",
|
||||
0x30: "CORE_TLV_NODE_LAT",
|
||||
0x31: "CORE_TLV_NODE_LONG",
|
||||
0x32: "CORE_TLV_NODE_ALT",
|
||||
0x42: "CORE_TLV_NODE_ICON",
|
||||
0x50: "CORE_TLV_NODE_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(node_tlvs)
|
||||
|
||||
node_types = dict(enumerate([
|
||||
"CORE_NODE_DEF",
|
||||
"CORE_NODE_PHYS",
|
||||
"CORE_NODE_XEN",
|
||||
"CORE_NODE_TBD",
|
||||
"CORE_NODE_SWITCH",
|
||||
"CORE_NODE_HUB",
|
||||
"CORE_NODE_WLAN",
|
||||
"CORE_NODE_RJ45",
|
||||
"CORE_NODE_TUNNEL",
|
||||
"CORE_NODE_KTUNNEL",
|
||||
"CORE_NODE_EMANE",
|
||||
]))
|
||||
|
||||
enumdict(node_types)
|
||||
|
||||
rj45_models = dict(enumerate([
|
||||
"RJ45_MODEL_LINKED",
|
||||
"RJ45_MODEL_WIRELESS",
|
||||
"RJ45_MODEL_INSTALLED",
|
||||
]))
|
||||
|
||||
enumdict(rj45_models)
|
||||
|
||||
# Link Message TLV Types
|
||||
|
||||
link_tlvs = {
|
||||
0x01: "CORE_TLV_LINK_N1NUMBER",
|
||||
0x02: "CORE_TLV_LINK_N2NUMBER",
|
||||
0x03: "CORE_TLV_LINK_DELAY",
|
||||
0x04: "CORE_TLV_LINK_BW",
|
||||
0x05: "CORE_TLV_LINK_PER",
|
||||
0x06: "CORE_TLV_LINK_DUP",
|
||||
0x07: "CORE_TLV_LINK_JITTER",
|
||||
0x08: "CORE_TLV_LINK_MER",
|
||||
0x09: "CORE_TLV_LINK_BURST",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_LINK_SESSION",
|
||||
0x10: "CORE_TLV_LINK_MBURST",
|
||||
0x20: "CORE_TLV_LINK_TYPE",
|
||||
0x21: "CORE_TLV_LINK_GUIATTR",
|
||||
0x23: "CORE_TLV_LINK_EMUID",
|
||||
0x24: "CORE_TLV_LINK_NETID",
|
||||
0x25: "CORE_TLV_LINK_KEY",
|
||||
0x30: "CORE_TLV_LINK_IF1NUM",
|
||||
0x31: "CORE_TLV_LINK_IF1IP4",
|
||||
0x32: "CORE_TLV_LINK_IF1IP4MASK",
|
||||
0x33: "CORE_TLV_LINK_IF1MAC",
|
||||
0x34: "CORE_TLV_LINK_IF1IP6",
|
||||
0x35: "CORE_TLV_LINK_IF1IP6MASK",
|
||||
0x36: "CORE_TLV_LINK_IF2NUM",
|
||||
0x37: "CORE_TLV_LINK_IF2IP4",
|
||||
0x38: "CORE_TLV_LINK_IF2IP4MASK",
|
||||
0x39: "CORE_TLV_LINK_IF2MAC",
|
||||
0x40: "CORE_TLV_LINK_IF2IP6",
|
||||
0x41: "CORE_TLV_LINK_IF2IP6MASK",
|
||||
0x50: "CORE_TLV_LINK_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(link_tlvs)
|
||||
|
||||
link_types = dict(enumerate([
|
||||
"CORE_LINK_WIRELESS",
|
||||
"CORE_LINK_WIRED",
|
||||
]))
|
||||
|
||||
enumdict(link_types)
|
||||
|
||||
# Execute Message TLV Types
|
||||
|
||||
exec_tlvs = {
|
||||
0x01: "CORE_TLV_EXEC_NODE",
|
||||
0x02: "CORE_TLV_EXEC_NUM",
|
||||
0x03: "CORE_TLV_EXEC_TIME",
|
||||
0x04: "CORE_TLV_EXEC_CMD",
|
||||
0x05: "CORE_TLV_EXEC_RESULT",
|
||||
0x06: "CORE_TLV_EXEC_STATUS",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_EXEC_SESSION",
|
||||
}
|
||||
|
||||
enumdict(exec_tlvs)
|
||||
|
||||
# Register Message TLV Types
|
||||
|
||||
reg_tlvs = {
|
||||
0x01: "CORE_TLV_REG_WIRELESS",
|
||||
0x02: "CORE_TLV_REG_MOBILITY",
|
||||
0x03: "CORE_TLV_REG_UTILITY",
|
||||
0x04: "CORE_TLV_REG_EXECSRV",
|
||||
0x05: "CORE_TLV_REG_GUI",
|
||||
0x06: "CORE_TLV_REG_EMULSRV",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_REG_SESSION",
|
||||
}
|
||||
|
||||
enumdict(reg_tlvs)
|
||||
|
||||
# Configuration Message TLV Types
|
||||
|
||||
conf_tlvs = {
|
||||
0x01: "CORE_TLV_CONF_NODE",
|
||||
0x02: "CORE_TLV_CONF_OBJ",
|
||||
0x03: "CORE_TLV_CONF_TYPE",
|
||||
0x04: "CORE_TLV_CONF_DATA_TYPES",
|
||||
0x05: "CORE_TLV_CONF_VALUES",
|
||||
0x06: "CORE_TLV_CONF_CAPTIONS",
|
||||
0x07: "CORE_TLV_CONF_BITMAP",
|
||||
0x08: "CORE_TLV_CONF_POSSIBLE_VALUES",
|
||||
0x09: "CORE_TLV_CONF_GROUPS",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_CONF_SESSION",
|
||||
CORE_TLV_NODE_NETID: "CORE_TLV_CONF_NETID",
|
||||
0x50: "CORE_TLV_CONF_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(conf_tlvs)
|
||||
|
||||
conf_flags = {
|
||||
0x00: "CONF_TYPE_FLAGS_NONE",
|
||||
0x01: "CONF_TYPE_FLAGS_REQUEST",
|
||||
0x02: "CONF_TYPE_FLAGS_UPDATE",
|
||||
0x03: "CONF_TYPE_FLAGS_RESET",
|
||||
}
|
||||
|
||||
enumdict(conf_flags)
|
||||
|
||||
conf_data_types = {
|
||||
0x01: "CONF_DATA_TYPE_UINT8",
|
||||
0x02: "CONF_DATA_TYPE_UINT16",
|
||||
0x03: "CONF_DATA_TYPE_UINT32",
|
||||
0x04: "CONF_DATA_TYPE_UINT64",
|
||||
0x05: "CONF_DATA_TYPE_INT8",
|
||||
0x06: "CONF_DATA_TYPE_INT16",
|
||||
0x07: "CONF_DATA_TYPE_INT32",
|
||||
0x08: "CONF_DATA_TYPE_INT64",
|
||||
0x09: "CONF_DATA_TYPE_FLOAT",
|
||||
0x0A: "CONF_DATA_TYPE_STRING",
|
||||
0x0B: "CONF_DATA_TYPE_BOOL",
|
||||
}
|
||||
|
||||
enumdict(conf_data_types)
|
||||
|
||||
# File Message TLV Types
|
||||
|
||||
file_tlvs = {
|
||||
0x01: "CORE_TLV_FILE_NODE",
|
||||
0x02: "CORE_TLV_FILE_NAME",
|
||||
0x03: "CORE_TLV_FILE_MODE",
|
||||
0x04: "CORE_TLV_FILE_NUM",
|
||||
0x05: "CORE_TLV_FILE_TYPE",
|
||||
0x06: "CORE_TLV_FILE_SRCNAME",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_FILE_SESSION",
|
||||
0x10: "CORE_TLV_FILE_DATA",
|
||||
0x11: "CORE_TLV_FILE_CMPDATA",
|
||||
}
|
||||
|
||||
enumdict(file_tlvs)
|
||||
|
||||
# Interface Message TLV Types
|
||||
|
||||
iface_tlvs = {
|
||||
0x01: "CORE_TLV_IFACE_NODE",
|
||||
0x02: "CORE_TLV_IFACE_NUM",
|
||||
0x03: "CORE_TLV_IFACE_NAME",
|
||||
0x04: "CORE_TLV_IFACE_IPADDR",
|
||||
0x05: "CORE_TLV_IFACE_MASK",
|
||||
0x06: "CORE_TLV_IFACE_MACADDR",
|
||||
0x07: "CORE_TLV_IFACE_IP6ADDR",
|
||||
0x08: "CORE_TLV_IFACE_IP6MASK",
|
||||
0x09: "CORE_TLV_IFACE_TYPE",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_IFACE_SESSION",
|
||||
0x0B: "CORE_TLV_IFACE_STATE",
|
||||
CORE_TLV_NODE_EMUID: "CORE_TLV_IFACE_EMUID",
|
||||
CORE_TLV_NODE_NETID: "CORE_TLV_IFACE_NETID",
|
||||
}
|
||||
|
||||
enumdict(iface_tlvs)
|
||||
|
||||
# Event Message TLV Types
|
||||
|
||||
event_tlvs = {
|
||||
0x01: "CORE_TLV_EVENT_NODE",
|
||||
0x02: "CORE_TLV_EVENT_TYPE",
|
||||
0x03: "CORE_TLV_EVENT_NAME",
|
||||
0x04: "CORE_TLV_EVENT_DATA",
|
||||
0x05: "CORE_TLV_EVENT_TIME",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_EVENT_SESSION",
|
||||
}
|
||||
|
||||
enumdict(event_tlvs)
|
||||
|
||||
event_types = dict(enumerate([
|
||||
"CORE_EVENT_NONE",
|
||||
"CORE_EVENT_DEFINITION_STATE",
|
||||
"CORE_EVENT_CONFIGURATION_STATE",
|
||||
"CORE_EVENT_INSTANTIATION_STATE",
|
||||
"CORE_EVENT_RUNTIME_STATE",
|
||||
"CORE_EVENT_DATACOLLECT_STATE",
|
||||
"CORE_EVENT_SHUTDOWN_STATE",
|
||||
"CORE_EVENT_START",
|
||||
"CORE_EVENT_STOP",
|
||||
"CORE_EVENT_PAUSE",
|
||||
"CORE_EVENT_RESTART",
|
||||
"CORE_EVENT_FILE_OPEN",
|
||||
"CORE_EVENT_FILE_SAVE",
|
||||
"CORE_EVENT_SCHEDULED",
|
||||
]))
|
||||
|
||||
enumdict(event_types)
|
||||
|
||||
# Session Message TLV Types
|
||||
|
||||
session_tlvs = {
|
||||
0x01: "CORE_TLV_SESS_NUMBER",
|
||||
0x02: "CORE_TLV_SESS_NAME",
|
||||
0x03: "CORE_TLV_SESS_FILE",
|
||||
0x04: "CORE_TLV_SESS_NODECOUNT",
|
||||
0x05: "CORE_TLV_SESS_DATE",
|
||||
0x06: "CORE_TLV_SESS_THUMB",
|
||||
0x07: "CORE_TLV_SESS_USER",
|
||||
0x0A: "CORE_TLV_SESS_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(session_tlvs)
|
||||
|
||||
# Exception Message TLV Types
|
||||
|
||||
exception_tlvs = {
|
||||
0x01: "CORE_TLV_EXCP_NODE",
|
||||
0x02: "CORE_TLV_EXCP_SESSION",
|
||||
0x03: "CORE_TLV_EXCP_LEVEL",
|
||||
0x04: "CORE_TLV_EXCP_SOURCE",
|
||||
0x05: "CORE_TLV_EXCP_DATE",
|
||||
0x06: "CORE_TLV_EXCP_TEXT",
|
||||
0x0A: "CORE_TLV_EXCP_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(exception_tlvs)
|
||||
|
||||
exception_levels = dict(enumerate([
|
||||
"CORE_EXCP_LEVEL_NONE",
|
||||
"CORE_EXCP_LEVEL_FATAL",
|
||||
"CORE_EXCP_LEVEL_ERROR",
|
||||
"CORE_EXCP_LEVEL_WARNING",
|
||||
"CORE_EXCP_LEVEL_NOTICE",
|
||||
]))
|
||||
|
||||
enumdict(exception_levels)
|
||||
|
||||
del enumdict
|
858
daemon/core/broker.py
Normal file
858
daemon/core/broker.py
Normal file
|
@ -0,0 +1,858 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
broker.py: definition of CoreBroker class that is part of the
|
||||
pycore session object. Handles distributing parts of the emulation out to
|
||||
other emulation servers. The broker is consulted during the
|
||||
CoreRequestHandler.handlemsg() loop to determine if messages should be handled
|
||||
locally or forwarded on to another emulation server.
|
||||
'''
|
||||
|
||||
import os, socket, select, threading, sys
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode, PyCoreNet
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.phys.pnodes import PhysicalNode
|
||||
from core.misc.ipaddr import IPAddr
|
||||
from core.conf import ConfigurableManager
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns.vif import GreTap
|
||||
from core.netns.vnet import GreTapBridge
|
||||
|
||||
|
||||
class CoreBroker(ConfigurableManager):
|
||||
''' Member of pycore session class for handling global emulation server
|
||||
data.
|
||||
'''
|
||||
_name = "broker"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session, verbose = False):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.session_id_master = None
|
||||
self.myip = None
|
||||
self.verbose = verbose
|
||||
# dict containing tuples of (host, port, sock)
|
||||
self.servers = {}
|
||||
self.servers_lock = threading.Lock()
|
||||
self.addserver("localhost", None, None)
|
||||
# dict containing node number to server name mapping
|
||||
self.nodemap = {}
|
||||
# this lock also protects self.nodecounts
|
||||
self.nodemap_lock = threading.Lock()
|
||||
# reference counts of nodes on servers
|
||||
self.nodecounts = { }
|
||||
self.bootcount = 0
|
||||
# list of node numbers that are link-layer nodes (networks)
|
||||
self.nets = []
|
||||
# list of node numbers that are PhysicalNode nodes
|
||||
self.phys = []
|
||||
# allows for other message handlers to process API messages (e.g. EMANE)
|
||||
self.handlers = ()
|
||||
# dict with tunnel key to tunnel device mapping
|
||||
self.tunnels = {}
|
||||
self.dorecvloop = False
|
||||
self.recvthread = None
|
||||
|
||||
def startup(self):
|
||||
''' Build tunnels between network-layer nodes now that all node
|
||||
and link information has been received; called when session
|
||||
enters the instantation state.
|
||||
'''
|
||||
self.addnettunnels()
|
||||
self.writeservers()
|
||||
|
||||
def shutdown(self):
|
||||
''' Close all active sockets; called when the session enters the
|
||||
data collect state
|
||||
'''
|
||||
with self.servers_lock:
|
||||
while len(self.servers) > 0:
|
||||
(server, v) = self.servers.popitem()
|
||||
(host, port, sock) = v
|
||||
if sock is None:
|
||||
continue
|
||||
if self.verbose:
|
||||
self.session.info("closing connection with %s @ %s:%s" % \
|
||||
(server, host, port))
|
||||
sock.close()
|
||||
self.reset()
|
||||
self.dorecvloop = False
|
||||
if self.recvthread is not None:
|
||||
self.recvthread.join()
|
||||
|
||||
def reset(self):
|
||||
''' Reset to initial state.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
self.nodemap.clear()
|
||||
for server in self.nodecounts:
|
||||
if self.nodecounts[server] < 1:
|
||||
self.delserver(server)
|
||||
self.nodecounts.clear()
|
||||
self.bootcount = 0
|
||||
self.nodemap_lock.release()
|
||||
del self.nets[:]
|
||||
del self.phys[:]
|
||||
while len(self.tunnels) > 0:
|
||||
(key, gt) = self.tunnels.popitem()
|
||||
gt.shutdown()
|
||||
|
||||
def startrecvloop(self):
|
||||
''' Spawn the recvloop() thread if it hasn't been already started.
|
||||
'''
|
||||
if self.recvthread is not None:
|
||||
if self.recvthread.isAlive():
|
||||
return
|
||||
else:
|
||||
self.recvthread.join()
|
||||
# start reading data from connected sockets
|
||||
self.dorecvloop = True
|
||||
self.recvthread = threading.Thread(target = self.recvloop)
|
||||
self.recvthread.daemon = True
|
||||
self.recvthread.start()
|
||||
|
||||
def recvloop(self):
|
||||
''' Thread target that receives messages from server sockets.
|
||||
'''
|
||||
self.dorecvloop = True
|
||||
# note: this loop continues after emulation is stopped,
|
||||
# even with 0 servers
|
||||
while self.dorecvloop:
|
||||
rlist = []
|
||||
with self.servers_lock:
|
||||
# build a socket list for select call
|
||||
for name in self.servers:
|
||||
(h, p, sock) = self.servers[name]
|
||||
if sock is not None:
|
||||
rlist.append(sock.fileno())
|
||||
r, w, x = select.select(rlist, [], [], 1.0)
|
||||
for sockfd in r:
|
||||
try:
|
||||
(h, p, sock, name) = self.getserverbysock(sockfd)
|
||||
except KeyError:
|
||||
# servers may have changed; loop again
|
||||
break
|
||||
rcvlen = self.recv(sock, h)
|
||||
if rcvlen == 0:
|
||||
if self.verbose:
|
||||
self.session.info("connection with %s @ %s:%s" \
|
||||
" has closed" % (name, h, p))
|
||||
self.servers[name] = (h, p, None)
|
||||
|
||||
|
||||
def recv(self, sock, host):
|
||||
''' Receive data on an emulation server socket and broadcast it to
|
||||
all connected session handlers. Returns the length of data recevied
|
||||
and forwarded. Return value of zero indicates the socket has closed
|
||||
and should be removed from the self.servers dict.
|
||||
'''
|
||||
msghdr = sock.recv(coreapi.CoreMessage.hdrsiz)
|
||||
if len(msghdr) == 0:
|
||||
# server disconnected
|
||||
sock.close()
|
||||
return 0
|
||||
if len(msghdr) != coreapi.CoreMessage.hdrsiz:
|
||||
if self.verbose:
|
||||
self.session.info("warning: broker received not enough data " \
|
||||
"len=%s" % len(msghdr))
|
||||
return len(msghdr)
|
||||
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr)
|
||||
msgdata = sock.recv(msglen)
|
||||
data = msghdr + msgdata
|
||||
count = None
|
||||
# snoop exec response for remote interactive TTYs
|
||||
if msgtype == coreapi.CORE_API_EXEC_MSG and \
|
||||
msgflags & coreapi.CORE_API_TTY_FLAG:
|
||||
data = self.fixupremotetty(msghdr, msgdata, host)
|
||||
elif msgtype == coreapi.CORE_API_NODE_MSG:
|
||||
# snoop node delete response to decrement node counts
|
||||
if msgflags & coreapi.CORE_API_DEL_FLAG:
|
||||
msg = coreapi.CoreNodeMessage(msgflags, msghdr, msgdata)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER)
|
||||
if nodenum is not None:
|
||||
count = self.delnodemap(sock, nodenum)
|
||||
# snoop node add response to increment booted node count
|
||||
# (only CoreNodes send these response messages)
|
||||
elif msgflags & \
|
||||
(coreapi.CORE_API_ADD_FLAG | coreapi.CORE_API_LOC_FLAG):
|
||||
self.incrbootcount()
|
||||
self.session.checkruntime()
|
||||
|
||||
self.session.broadcastraw(None, data)
|
||||
if count is not None and count < 1:
|
||||
return 0
|
||||
else:
|
||||
return len(data)
|
||||
|
||||
def addserver(self, name, host, port):
|
||||
''' Add a new server, and try to connect to it. If we're already
|
||||
connected to this (host, port), then leave it alone. When host,port
|
||||
is None, do not try to connect.
|
||||
'''
|
||||
self.servers_lock.acquire()
|
||||
if name in self.servers:
|
||||
(oldhost, oldport, sock) = self.servers[name]
|
||||
if host == oldhost or port == oldport:
|
||||
# leave this socket connected
|
||||
if sock is not None:
|
||||
self.servers_lock.release()
|
||||
return
|
||||
if self.verbose and host is not None and sock is not None:
|
||||
self.session.info("closing connection with %s @ %s:%s" % \
|
||||
(name, host, port))
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
self.servers_lock.release()
|
||||
if self.verbose and host is not None:
|
||||
self.session.info("adding server %s @ %s:%s" % (name, host, port))
|
||||
if host is None:
|
||||
sock = None
|
||||
else:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
#sock.setblocking(0)
|
||||
#error = sock.connect_ex((host, port))
|
||||
try:
|
||||
sock.connect((host, port))
|
||||
self.startrecvloop()
|
||||
except Exception, e:
|
||||
self.session.warn("error connecting to server %s:%s:\n\t%s" % \
|
||||
(host, port, e))
|
||||
sock.close()
|
||||
sock = None
|
||||
self.servers_lock.acquire()
|
||||
self.servers[name] = (host, port, sock)
|
||||
self.servers_lock.release()
|
||||
|
||||
def delserver(self, name):
|
||||
''' Remove a server and hang up any connection.
|
||||
'''
|
||||
self.servers_lock.acquire()
|
||||
if name not in self.servers:
|
||||
self.servers_lock.release()
|
||||
return
|
||||
(host, port, sock) = self.servers.pop(name)
|
||||
if sock is not None:
|
||||
if self.verbose:
|
||||
self.session.info("closing connection with %s @ %s:%s" % \
|
||||
(name, host, port))
|
||||
sock.close()
|
||||
self.servers_lock.release()
|
||||
|
||||
def getserver(self, name):
|
||||
''' Return the (host, port, sock) tuple, or raise a KeyError exception.
|
||||
'''
|
||||
if name not in self.servers:
|
||||
raise KeyError, "emulation server %s not found" % name
|
||||
return self.servers[name]
|
||||
|
||||
def getserverbysock(self, sockfd):
|
||||
''' Return a (host, port, sock, name) tuple based on socket file
|
||||
descriptor, or raise a KeyError exception.
|
||||
'''
|
||||
with self.servers_lock:
|
||||
for name in self.servers:
|
||||
(host, port, sock) = self.servers[name]
|
||||
if sock is None:
|
||||
continue
|
||||
if sock.fileno() == sockfd:
|
||||
return (host, port, sock, name)
|
||||
raise KeyError, "socket fd %s not found" % sockfd
|
||||
|
||||
def getserverlist(self):
|
||||
''' Return the list of server names (keys from self.servers).
|
||||
'''
|
||||
with self.servers_lock:
|
||||
serverlist = sorted(self.servers.keys())
|
||||
return serverlist
|
||||
|
||||
def tunnelkey(self, n1num, n2num):
|
||||
''' Compute a 32-bit key used to uniquely identify a GRE tunnel.
|
||||
The hash(n1num), hash(n2num) values are used, so node numbers may be
|
||||
None or string values (used for e.g. "ctrlnet").
|
||||
'''
|
||||
sid = self.session_id_master
|
||||
if sid is None:
|
||||
# this is the master session
|
||||
sid = self.session.sessionid
|
||||
|
||||
key = (sid << 16) | hash(n1num) | (hash(n2num) << 8)
|
||||
return key & 0xFFFFFFFF
|
||||
|
||||
def addtunnel(self, remoteip, n1num, n2num, localnum):
|
||||
''' Add a new GreTapBridge between nodes on two different machines.
|
||||
'''
|
||||
key = self.tunnelkey(n1num, n2num)
|
||||
if localnum == n2num:
|
||||
remotenum = n1num
|
||||
else:
|
||||
remotenum = n2num
|
||||
if key in self.tunnels.keys():
|
||||
self.session.warn("tunnel with key %s (%s-%s) already exists!" % \
|
||||
(key, n1num, n2num))
|
||||
else:
|
||||
objid = key & ((1<<16)-1)
|
||||
self.session.info("Adding tunnel for %s-%s to %s with key %s" % \
|
||||
(n1num, n2num, remoteip, key))
|
||||
if localnum in self.phys:
|
||||
# no bridge is needed on physical nodes; use the GreTap directly
|
||||
gt = GreTap(node=None, name=None, session=self.session,
|
||||
remoteip=remoteip, key=key)
|
||||
else:
|
||||
gt = self.session.addobj(cls = GreTapBridge, objid = objid,
|
||||
policy="ACCEPT", remoteip=remoteip, key = key)
|
||||
gt.localnum = localnum
|
||||
gt.remotenum = remotenum
|
||||
self.tunnels[key] = gt
|
||||
|
||||
def addnettunnels(self):
|
||||
''' Add GreTaps between network devices on different machines.
|
||||
The GreTapBridge is not used since that would add an extra bridge.
|
||||
'''
|
||||
for n in self.nets:
|
||||
self.addnettunnel(n)
|
||||
|
||||
def addnettunnel(self, n):
|
||||
try:
|
||||
net = self.session.obj(n)
|
||||
except KeyError:
|
||||
raise KeyError, "network node %s not found" % n
|
||||
# add other nets here that do not require tunnels
|
||||
if isinstance(net, EmaneNet):
|
||||
return None
|
||||
|
||||
servers = self.getserversbynode(n)
|
||||
if len(servers) < 2:
|
||||
return None
|
||||
hosts = []
|
||||
for server in servers:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is None:
|
||||
continue
|
||||
hosts.append(host)
|
||||
if len(hosts) == 0:
|
||||
# get IP address from API message sender (master)
|
||||
self.session._handlerslock.acquire()
|
||||
for h in self.session._handlers:
|
||||
if h.client_address != "":
|
||||
hosts.append(h.client_address[0])
|
||||
self.session._handlerslock.release()
|
||||
|
||||
r = []
|
||||
for host in hosts:
|
||||
if self.myip:
|
||||
# we are the remote emulation server
|
||||
myip = self.myip
|
||||
else:
|
||||
# we are the session master
|
||||
myip = host
|
||||
key = self.tunnelkey(n, IPAddr.toint(myip))
|
||||
if key in self.tunnels.keys():
|
||||
continue
|
||||
self.session.info("Adding tunnel for net %s to %s with key %s" % \
|
||||
(n, host, key))
|
||||
gt = GreTap(node=None, name=None, session=self.session,
|
||||
remoteip=host, key=key)
|
||||
self.tunnels[key] = gt
|
||||
r.append(gt)
|
||||
# attaching to net will later allow gt to be destroyed
|
||||
# during net.shutdown()
|
||||
net.attach(gt)
|
||||
return r
|
||||
|
||||
def deltunnel(self, n1num, n2num):
|
||||
''' Cleanup of the GreTapBridge.
|
||||
'''
|
||||
key = self.tunnelkey(n1num, n2num)
|
||||
try:
|
||||
gt = self.tunnels.pop(key)
|
||||
except KeyError:
|
||||
gt = None
|
||||
if gt:
|
||||
self.session.delobj(gt.objid)
|
||||
del gt
|
||||
|
||||
def gettunnel(self, n1num, n2num):
|
||||
''' Return the GreTap between two nodes if it exists.
|
||||
'''
|
||||
key = self.tunnelkey(n1num, n2num)
|
||||
if key in self.tunnels.keys():
|
||||
return self.tunnels[key]
|
||||
else:
|
||||
return None
|
||||
|
||||
def addnodemap(self, server, nodenum):
|
||||
''' Record a node number to emulation server mapping.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
if nodenum in self.nodemap:
|
||||
if server in self.nodemap[nodenum]:
|
||||
self.nodemap_lock.release()
|
||||
return
|
||||
self.nodemap[nodenum].append(server)
|
||||
else:
|
||||
self.nodemap[nodenum] = [server,]
|
||||
if server in self.nodecounts:
|
||||
self.nodecounts[server] += 1
|
||||
else:
|
||||
self.nodecounts[server] = 1
|
||||
self.nodemap_lock.release()
|
||||
|
||||
def delnodemap(self, sock, nodenum):
|
||||
''' Remove a node number to emulation server mapping.
|
||||
Return the number of nodes left on this server.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
count = None
|
||||
if nodenum not in self.nodemap:
|
||||
self.nodemap_lock.release()
|
||||
return count
|
||||
found = False
|
||||
for server in self.nodemap[nodenum]:
|
||||
(host, port, srvsock) = self.getserver(server)
|
||||
if srvsock == sock:
|
||||
found = True
|
||||
break
|
||||
if server in self.nodecounts:
|
||||
count = self.nodecounts[server]
|
||||
if found:
|
||||
self.nodemap[nodenum].remove(server)
|
||||
if server in self.nodecounts:
|
||||
count -= 1
|
||||
self.nodecounts[server] = count
|
||||
self.nodemap_lock.release()
|
||||
return count
|
||||
|
||||
def incrbootcount(self):
|
||||
''' Count a node that has booted.
|
||||
'''
|
||||
self.bootcount += 1
|
||||
return self.bootcount
|
||||
|
||||
def getbootcount(self):
|
||||
''' Return the number of booted nodes.
|
||||
'''
|
||||
return self.bootcount
|
||||
|
||||
def getserversbynode(self, nodenum):
|
||||
''' Retrieve a list of emulation servers given a node number.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
if nodenum not in self.nodemap:
|
||||
self.nodemap_lock.release()
|
||||
return []
|
||||
r = self.nodemap[nodenum]
|
||||
self.nodemap_lock.release()
|
||||
return r
|
||||
|
||||
def addnet(self, nodenum):
|
||||
''' Add a node number to the list of link-layer nodes.
|
||||
'''
|
||||
if nodenum not in self.nets:
|
||||
self.nets.append(nodenum)
|
||||
|
||||
def addphys(self, nodenum):
|
||||
''' Add a node number to the list of physical nodes.
|
||||
'''
|
||||
if nodenum not in self.phys:
|
||||
self.phys.append(nodenum)
|
||||
|
||||
def configure_reset(self, msg):
|
||||
''' Ignore reset messages, because node delete responses may still
|
||||
arrive and require the use of nodecounts.
|
||||
'''
|
||||
return None
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message with a list of server:host:port
|
||||
combinations that we'll need to connect with.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
|
||||
if values is None:
|
||||
self.session.info("emulation server data missing")
|
||||
return None
|
||||
values = values.split('|')
|
||||
# string of "server:ip:port,server:ip:port,..."
|
||||
serverstrings = values[0]
|
||||
server_list = serverstrings.split(',')
|
||||
for server in server_list:
|
||||
server_items = server.split(':')
|
||||
(name, host, port) = server_items[:3]
|
||||
if host == '':
|
||||
host = None
|
||||
if port == '':
|
||||
port = None
|
||||
else:
|
||||
port = int(port)
|
||||
sid = msg.gettlv(coreapi.CORE_TLV_CONF_SESSION)
|
||||
if sid is not None:
|
||||
# receive session ID and my IP from master
|
||||
self.session_id_master = int(sid.split('|')[0])
|
||||
self.myip = host
|
||||
host = None
|
||||
port = None
|
||||
# this connects to the server immediately; maybe we should wait
|
||||
# or spin off a new "client" thread here
|
||||
self.addserver(name, host, port)
|
||||
self.setupserver(name)
|
||||
return None
|
||||
|
||||
def handlemsg(self, msg):
|
||||
''' Handle an API message. Determine whether this needs to be handled
|
||||
by the local server or forwarded on to another one.
|
||||
Returns True when message does not need to be handled locally,
|
||||
and performs forwarding if required.
|
||||
Returning False indicates this message should be handled locally.
|
||||
'''
|
||||
serverlist = []
|
||||
handle_locally = False
|
||||
# Do not forward messages when in definition state
|
||||
# (for e.g. configuring services)
|
||||
if self.session.getstate() == coreapi.CORE_EVENT_DEFINITION_STATE:
|
||||
handle_locally = True
|
||||
return not handle_locally
|
||||
# Decide whether message should be handled locally or forwarded, or both
|
||||
if msg.msgtype == coreapi.CORE_API_NODE_MSG:
|
||||
(handle_locally, serverlist) = self.handlenodemsg(msg)
|
||||
elif msg.msgtype == coreapi.CORE_API_EVENT_MSG:
|
||||
# broadcast events everywhere
|
||||
serverlist = self.getserverlist()
|
||||
elif msg.msgtype == coreapi.CORE_API_CONF_MSG:
|
||||
# broadcast location and services configuration everywhere
|
||||
confobj = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
if confobj == "location" or confobj == "services" or \
|
||||
confobj == "session":
|
||||
serverlist = self.getserverlist()
|
||||
elif msg.msgtype == coreapi.CORE_API_FILE_MSG:
|
||||
# broadcast hook scripts and custom service files everywhere
|
||||
filetype = msg.gettlv(coreapi.CORE_TLV_FILE_TYPE)
|
||||
if filetype is not None and \
|
||||
(filetype[:5] == "hook:" or filetype[:8] == "service:"):
|
||||
serverlist = self.getserverlist()
|
||||
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG:
|
||||
# prepare a serverlist from two node numbers in link message
|
||||
(handle_locally, serverlist, msg) = self.handlelinkmsg(msg)
|
||||
elif len(serverlist) == 0:
|
||||
# check for servers based on node numbers in all messages but link
|
||||
nn = msg.nodenumbers()
|
||||
if len(nn) == 0:
|
||||
return False
|
||||
serverlist = self.getserversbynode(nn[0])
|
||||
|
||||
if len(serverlist) == 0:
|
||||
handle_locally = True
|
||||
|
||||
# allow other handlers to process this message
|
||||
# (this is used by e.g. EMANE to use the link add message to keep counts
|
||||
# of interfaces on other servers)
|
||||
for handler in self.handlers:
|
||||
handler(msg)
|
||||
|
||||
# Perform any message forwarding
|
||||
handle_locally = self.forwardmsg(msg, serverlist, handle_locally)
|
||||
return not handle_locally
|
||||
|
||||
def setupserver(self, server):
|
||||
''' Send the appropriate API messages for configuring the specified
|
||||
emulation server.
|
||||
'''
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is None or sock is None:
|
||||
return
|
||||
# communicate this session's current state to the server
|
||||
tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
|
||||
self.session.getstate())
|
||||
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
|
||||
sock.send(msg)
|
||||
# send a Configuration message for the broker object and inform the
|
||||
# server of its local name
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, "broker")
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
coreapi.CONF_TYPE_FLAGS_UPDATE)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
(coreapi.CONF_DATA_TYPE_STRING,))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
"%s:%s:%s" % (server, host, port))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_SESSION,
|
||||
"%s" % self.session.sessionid)
|
||||
msg = coreapi.CoreConfMessage.pack(0, tlvdata)
|
||||
sock.send(msg)
|
||||
|
||||
@staticmethod
|
||||
def fixupremotetty(msghdr, msgdata, host):
|
||||
''' When an interactive TTY request comes from the GUI, snoop the reply
|
||||
and add an SSH command to the appropriate remote server.
|
||||
'''
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr)
|
||||
msgcls = coreapi.msg_class(msgtype)
|
||||
msg = msgcls(msgflags, msghdr, msgdata)
|
||||
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_EXEC_NODE)
|
||||
execnum = msg.gettlv(coreapi.CORE_TLV_EXEC_NUM)
|
||||
cmd = msg.gettlv(coreapi.CORE_TLV_EXEC_CMD)
|
||||
res = msg.gettlv(coreapi.CORE_TLV_EXEC_RESULT)
|
||||
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NODE, nodenum)
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NUM, execnum)
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_CMD, cmd)
|
||||
title = "\\\"CORE: n%s @ %s\\\"" % (nodenum, host)
|
||||
res = "ssh -X -f " + host + " xterm -e " + res
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_RESULT, res)
|
||||
|
||||
return coreapi.CoreExecMessage.pack(msgflags, tlvdata)
|
||||
|
||||
def handlenodemsg(self, msg):
|
||||
''' Determine and return the servers to which this node message should
|
||||
be forwarded. Also keep track of link-layer nodes and the mapping of
|
||||
nodes to servers.
|
||||
'''
|
||||
serverlist = []
|
||||
handle_locally = False
|
||||
serverfiletxt = None
|
||||
# snoop Node Message for emulation server TLV and record mapping
|
||||
n = msg.tlvdata[coreapi.CORE_TLV_NODE_NUMBER]
|
||||
# replicate link-layer nodes on all servers
|
||||
nodetype = msg.gettlv(coreapi.CORE_TLV_NODE_TYPE)
|
||||
if nodetype is not None:
|
||||
try:
|
||||
nodecls = coreapi.node_class(nodetype)
|
||||
except KeyError:
|
||||
self.session.warn("broker invalid node type %s" % nodetype)
|
||||
return (False, serverlist)
|
||||
if nodecls is None:
|
||||
self.session.warn("broker unimplemented node type %s" % nodetype)
|
||||
return (False, serverlist)
|
||||
if issubclass(nodecls, PyCoreNet) and \
|
||||
nodetype != coreapi.CORE_NODE_WLAN:
|
||||
# network node replicated on all servers; could be optimized
|
||||
# don't replicate WLANs, because ebtables rules won't work
|
||||
serverlist = self.getserverlist()
|
||||
handle_locally = True
|
||||
self.addnet(n)
|
||||
for server in serverlist:
|
||||
self.addnodemap(server, n)
|
||||
# do not record server name for networks since network
|
||||
# nodes are replicated across all server
|
||||
return (handle_locally, serverlist)
|
||||
if issubclass(nodecls, PyCoreNet) and \
|
||||
nodetype == coreapi.CORE_NODE_WLAN:
|
||||
# special case where remote WLANs not in session._objs, and no
|
||||
# node response message received, so they are counted here
|
||||
if msg.gettlv(coreapi.CORE_TLV_NODE_EMUSRV) is not None:
|
||||
self.incrbootcount()
|
||||
elif issubclass(nodecls, PyCoreNode):
|
||||
name = msg.gettlv(coreapi.CORE_TLV_NODE_NAME)
|
||||
if name:
|
||||
serverfiletxt = "%s %s %s" % (n, name, nodecls)
|
||||
if issubclass(nodecls, PhysicalNode):
|
||||
# remember physical nodes
|
||||
self.addphys(n)
|
||||
|
||||
# emulation server TLV specifies server
|
||||
server = msg.gettlv(coreapi.CORE_TLV_NODE_EMUSRV)
|
||||
if server is not None:
|
||||
self.addnodemap(server, n)
|
||||
if server not in serverlist:
|
||||
serverlist.append(server)
|
||||
if serverfiletxt and self.session.master:
|
||||
self.writenodeserver(serverfiletxt, server)
|
||||
# hook to update coordinates of physical nodes
|
||||
if n in self.phys:
|
||||
self.session.mobility.physnodeupdateposition(msg)
|
||||
return (handle_locally, serverlist)
|
||||
|
||||
def handlelinkmsg(self, msg):
|
||||
''' Determine and return the servers to which this link message should
|
||||
be forwarded. Also build tunnels between different servers or add
|
||||
opaque data to the link message before forwarding.
|
||||
'''
|
||||
serverlist = []
|
||||
handle_locally = False
|
||||
|
||||
# determine link message destination using non-network nodes
|
||||
nn = msg.nodenumbers()
|
||||
if nn[0] in self.nets:
|
||||
if nn[1] in self.nets:
|
||||
# two network nodes linked together - prevent loops caused by
|
||||
# the automatic tunnelling
|
||||
handle_locally = True
|
||||
else:
|
||||
serverlist = self.getserversbynode(nn[1])
|
||||
elif nn[1] in self.nets:
|
||||
serverlist = self.getserversbynode(nn[0])
|
||||
else:
|
||||
serverset1 = set(self.getserversbynode(nn[0]))
|
||||
serverset2 = set(self.getserversbynode(nn[1]))
|
||||
# nodes are on two different servers, build tunnels as needed
|
||||
if serverset1 != serverset2:
|
||||
localn = None
|
||||
if len(serverset1) == 0 or len(serverset2) == 0:
|
||||
handle_locally = True
|
||||
serverlist = list(serverset1 | serverset2)
|
||||
host = None
|
||||
# get the IP of remote server and decide which node number
|
||||
# is for a local node
|
||||
for server in serverlist:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is None:
|
||||
# named server is local
|
||||
handle_locally = True
|
||||
if server in serverset1:
|
||||
localn = nn[0]
|
||||
else:
|
||||
localn = nn[1]
|
||||
if handle_locally and localn is None:
|
||||
# having no local node at this point indicates local node is
|
||||
# the one with the empty serverset
|
||||
if len(serverset1) == 0:
|
||||
localn = nn[0]
|
||||
elif len(serverset2) == 0:
|
||||
localn = nn[1]
|
||||
if host is None:
|
||||
host = self.getlinkendpoint(msg, localn == nn[0])
|
||||
if localn is None:
|
||||
msg = self.addlinkendpoints(msg, serverset1, serverset2)
|
||||
elif msg.flags & coreapi.CORE_API_ADD_FLAG:
|
||||
self.addtunnel(host, nn[0], nn[1], localn)
|
||||
elif msg.flags & coreapi.CORE_API_DEL_FLAG:
|
||||
self.deltunnel(nn[0], nn[1])
|
||||
handle_locally = False
|
||||
else:
|
||||
serverlist = list(serverset1 | serverset2)
|
||||
|
||||
return (handle_locally, serverlist, msg)
|
||||
|
||||
def addlinkendpoints(self, msg, serverset1, serverset2):
|
||||
''' For a link message that is not handled locally, inform the remote
|
||||
servers of the IP addresses used as tunnel endpoints by adding
|
||||
opaque data to the link message.
|
||||
'''
|
||||
ip1 = ""
|
||||
for server in serverset1:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is not None:
|
||||
ip1 = host
|
||||
ip2 = ""
|
||||
for server in serverset2:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is not None:
|
||||
ip2 = host
|
||||
tlvdata = msg.rawmsg[coreapi.CoreMessage.hdrsiz:]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_OPAQUE,
|
||||
"%s:%s" % (ip1, ip2))
|
||||
newraw = coreapi.CoreLinkMessage.pack(msg.flags, tlvdata)
|
||||
msghdr = newraw[:coreapi.CoreMessage.hdrsiz]
|
||||
return coreapi.CoreLinkMessage(msg.flags, msghdr, tlvdata)
|
||||
|
||||
def getlinkendpoint(self, msg, first_is_local):
|
||||
''' A link message between two different servers has been received,
|
||||
and we need to determine the tunnel endpoint. First look for
|
||||
opaque data in the link message, otherwise use the IP of the message
|
||||
sender (the master server).
|
||||
'''
|
||||
host = None
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_LINK_OPAQUE)
|
||||
if opaque is not None:
|
||||
if first_is_local:
|
||||
host = opaque.split(':')[1]
|
||||
else:
|
||||
host = opaque.split(':')[0]
|
||||
if host == "":
|
||||
host = None
|
||||
if host is None:
|
||||
# get IP address from API message sender (master)
|
||||
self.session._handlerslock.acquire()
|
||||
for h in self.session._handlers:
|
||||
if h.client_address != "":
|
||||
host = h.client_address[0]
|
||||
self.session._handlerslock.release()
|
||||
return host
|
||||
|
||||
def forwardmsg(self, msg, serverlist, handle_locally):
|
||||
''' Forward API message to all servers in serverlist; if an empty
|
||||
host/port is encountered, set the handle_locally flag. Returns the
|
||||
value of the handle_locally flag, which may be unchanged.
|
||||
'''
|
||||
for server in serverlist:
|
||||
try:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
except KeyError:
|
||||
# server not found, don't handle this message locally
|
||||
self.session.info("broker could not find server %s, message " \
|
||||
"with type %s dropped" % \
|
||||
(server, msg.msgtype))
|
||||
continue
|
||||
if host is None and port is None:
|
||||
# local emulation server, handle this locally
|
||||
handle_locally = True
|
||||
else:
|
||||
if sock is None:
|
||||
self.session.info("server %s @ %s:%s is disconnected" % \
|
||||
(server, host, port))
|
||||
else:
|
||||
sock.send(msg.rawmsg)
|
||||
return handle_locally
|
||||
|
||||
def writeservers(self):
|
||||
''' Write the server list to a text file in the session directory upon
|
||||
startup: /tmp/pycore.nnnnn/servers
|
||||
'''
|
||||
filename = os.path.join(self.session.sessiondir, "servers")
|
||||
try:
|
||||
f = open(filename, "w")
|
||||
master = self.session_id_master
|
||||
if master is None:
|
||||
master = self.session.sessionid
|
||||
f.write("master=%s\n" % master)
|
||||
self.servers_lock.acquire()
|
||||
for name in sorted(self.servers.keys()):
|
||||
if name == "localhost":
|
||||
continue
|
||||
(host, port, sock) = self.servers[name]
|
||||
f.write("%s %s %s\n" % (name, host, port))
|
||||
f.close()
|
||||
except Exception, e:
|
||||
self.session.warn("Error writing server list to the file: %s\n%s" \
|
||||
% (filename, e))
|
||||
finally:
|
||||
self.servers_lock.release()
|
||||
|
||||
def writenodeserver(self, nodestr, server):
|
||||
''' Creates a /tmp/pycore.nnnnn/nX.conf/server file having the node
|
||||
and server info. This may be used by scripts for accessing nodes on
|
||||
other machines, much like local nodes may be accessed via the
|
||||
VnodeClient class.
|
||||
'''
|
||||
(host, port, sock) = self.getserver(server)
|
||||
serverstr = "%s %s %s" % (server, host, port)
|
||||
name = nodestr.split()[1]
|
||||
dirname = os.path.join(self.session.sessiondir, name + ".conf")
|
||||
filename = os.path.join(dirname, "server")
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError:
|
||||
# directory may already exist from previous distributed run
|
||||
pass
|
||||
try:
|
||||
f = open(filename, "w")
|
||||
f.write("%s\n%s\n" % (serverstr, nodestr))
|
||||
f.close()
|
||||
return True
|
||||
except Exception, e:
|
||||
msg = "Error writing server file '%s'" % filename
|
||||
msg += "for node %s:\n%s" % (name, e)
|
||||
self.session.warn(msg)
|
||||
return False
|
||||
|
||||
|
0
daemon/core/bsd/__init__.py
Normal file
0
daemon/core/bsd/__init__.py
Normal file
70
daemon/core/bsd/netgraph.py
Normal file
70
daemon/core/bsd/netgraph.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
netgraph.py: Netgraph helper functions; for now these are wrappers around
|
||||
ngctl commands.
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
checkexec([NGCTL_BIN])
|
||||
|
||||
def createngnode(type, hookstr, name=None):
|
||||
''' Create a new Netgraph node of type and optionally assign name. The
|
||||
hook string hookstr should contain two names. This is a string so
|
||||
other commands may be inserted after the two names.
|
||||
Return the name and netgraph ID of the new node.
|
||||
'''
|
||||
hook1 = hookstr.split()[0]
|
||||
ngcmd = "mkpeer %s %s \n show .%s" % (type, hookstr, hook1)
|
||||
cmd = [NGCTL_BIN, "-f", "-"]
|
||||
cmdid = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
cmdid.stdin.write(ngcmd)
|
||||
cmdid.stdin.close()
|
||||
result = cmdid.stdout.read()
|
||||
result += cmdid.stderr.read()
|
||||
cmdid.stdout.close()
|
||||
cmdid.stderr.close()
|
||||
status = cmdid.wait()
|
||||
if status > 0:
|
||||
raise Exception, "error creating Netgraph node %s (%s): %s" % \
|
||||
(type, ngcmd, result)
|
||||
results = result.split()
|
||||
ngname = results[1]
|
||||
ngid = results[5]
|
||||
if name:
|
||||
check_call([NGCTL_BIN, "name", "[0x%s]:" % ngid, name])
|
||||
return (ngname, ngid)
|
||||
|
||||
def destroyngnode(name):
|
||||
''' Shutdown a Netgraph node having the given name.
|
||||
'''
|
||||
check_call([NGCTL_BIN, "shutdown", "%s:" % name])
|
||||
|
||||
def connectngnodes(name1, name2, hook1, hook2):
|
||||
''' Connect two hooks of two Netgraph nodes given by their names.
|
||||
'''
|
||||
node1 = "%s:" % name1
|
||||
node2 = "%s:" % name2
|
||||
check_call([NGCTL_BIN, "connect", node1, node2, hook1, hook2])
|
||||
|
||||
def ngmessage(name, msg):
|
||||
''' Send a Netgraph message to the node named name.
|
||||
'''
|
||||
cmd = [NGCTL_BIN, "msg", "%s:" % name] + msg
|
||||
check_call(cmd)
|
||||
|
||||
def ngloadkernelmodule(name):
|
||||
''' Load a kernel module by invoking kldstat. This is needed for the
|
||||
ng_ether module which automatically creates Netgraph nodes when loaded.
|
||||
'''
|
||||
mutecall(["kldload", name])
|
197
daemon/core/bsd/nodes.py
Normal file
197
daemon/core/bsd/nodes.py
Normal file
|
@ -0,0 +1,197 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
|
||||
'''
|
||||
nodes.py: definition of CoreNode classes and other node classes that inherit
|
||||
from the CoreNode, implementing specific node types.
|
||||
'''
|
||||
|
||||
from vnode import *
|
||||
from vnet import *
|
||||
from core.constants import *
|
||||
from core.misc.ipaddr import *
|
||||
from core.api import coreapi
|
||||
from core.bsd.netgraph import ngloadkernelmodule
|
||||
|
||||
checkexec([IFCONFIG_BIN])
|
||||
|
||||
class CoreNode(JailNode):
|
||||
apitype = coreapi.CORE_NODE_DEF
|
||||
|
||||
class PtpNet(NetgraphPipeNet):
|
||||
def tonodemsg(self, flags):
|
||||
''' Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
'''
|
||||
pass
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
'''
|
||||
tlvdata = ""
|
||||
if len(self._netif) != 2:
|
||||
return tlvdata
|
||||
(if1, if2) = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if2.node.objid)
|
||||
delay = if1.getparam('delay')
|
||||
bw = if1.getparam('bw')
|
||||
loss = if1.getparam('loss')
|
||||
duplicate = if1.getparam('duplicate')
|
||||
jitter = if1.getparam('jitter')
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
for addr in if1.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
for addr in if2.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
return [msg,]
|
||||
|
||||
class SwitchNode(NetgraphNet):
|
||||
ngtype = "bridge"
|
||||
nghooks = "link0 link0\nmsg .link0 setpersistent"
|
||||
apitype = coreapi.CORE_NODE_SWITCH
|
||||
policy = "ACCEPT"
|
||||
|
||||
class HubNode(NetgraphNet):
|
||||
ngtype = "hub"
|
||||
nghooks = "link0 link0\nmsg .link0 setpersistent"
|
||||
apitype = coreapi.CORE_NODE_HUB
|
||||
policy = "ACCEPT"
|
||||
|
||||
class WlanNode(NetgraphNet):
|
||||
ngtype = "wlan"
|
||||
nghooks = "anchor anchor"
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
NetgraphNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
self.mobility = None
|
||||
|
||||
def attach(self, netif):
|
||||
NetgraphNet.attach(self, netif)
|
||||
if self.model:
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is None:
|
||||
return
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' Mobility and wireless model.
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
if self.model._positioncallback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
|
||||
class RJ45Node(NetgraphPipeNet):
|
||||
apitype = coreapi.CORE_NODE_RJ45
|
||||
policy = "ACCEPT"
|
||||
|
||||
def __init__(self, session, objid, name, verbose, start = True):
|
||||
if start:
|
||||
ngloadkernelmodule("ng_ether")
|
||||
NetgraphPipeNet.__init__(self, session, objid, name, verbose, start)
|
||||
if start:
|
||||
self.setpromisc(True)
|
||||
|
||||
def shutdown(self):
|
||||
self.setpromisc(False)
|
||||
NetgraphPipeNet.shutdown(self)
|
||||
|
||||
def setpromisc(self, promisc):
|
||||
p = "promisc"
|
||||
if not promisc:
|
||||
p = "-" + p
|
||||
check_call([IFCONFIG_BIN, self.name, "up", p])
|
||||
|
||||
def attach(self, netif):
|
||||
if len(self._netif) > 0:
|
||||
raise ValueError, \
|
||||
"RJ45 networks support at most 1 network interface"
|
||||
NetgraphPipeNet.attach(self, netif)
|
||||
connectngnodes(self.ngname, self.name, self.gethook(), "lower")
|
||||
|
||||
class TunnelNode(NetgraphNet):
|
||||
ngtype = "pipe"
|
||||
nghooks = "upper lower"
|
||||
apitype = coreapi.CORE_NODE_TUNNEL
|
||||
policy = "ACCEPT"
|
||||
|
216
daemon/core/bsd/vnet.py
Normal file
216
daemon/core/bsd/vnet.py
Normal file
|
@ -0,0 +1,216 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
vnet.py: NetgraphNet and NetgraphPipeNet classes that implement virtual networks
|
||||
using the FreeBSD Netgraph subsystem.
|
||||
'''
|
||||
|
||||
import sys, threading
|
||||
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreNet, PyCoreObj
|
||||
from core.bsd.netgraph import *
|
||||
from core.bsd.vnode import VEth
|
||||
|
||||
class NetgraphNet(PyCoreNet):
|
||||
ngtype = None
|
||||
nghooks = ()
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
PyCoreNet.__init__(self, session, objid, name)
|
||||
if name is None:
|
||||
name = str(self.objid)
|
||||
if policy is not None:
|
||||
self.policy = policy
|
||||
self.name = name
|
||||
self.ngname = "n_%s_%s" % (str(self.objid), self.session.sessionid)
|
||||
self.ngid = None
|
||||
self.verbose = verbose
|
||||
self._netif = {}
|
||||
self._linked = {}
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
tmp, self.ngid = createngnode(type=self.ngtype, hookstr=self.nghooks,
|
||||
name=self.ngname)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.up = False
|
||||
while self._netif:
|
||||
k, netif = self._netif.popitem()
|
||||
if netif.pipe:
|
||||
pipe = netif.pipe
|
||||
netif.pipe = None
|
||||
pipe.shutdown()
|
||||
else:
|
||||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
self._linked.clear()
|
||||
del self.session
|
||||
destroyngnode(self.ngname)
|
||||
|
||||
def attach(self, netif):
|
||||
''' Attach an interface to this netgraph node. Create a pipe between
|
||||
the interface and the hub/switch/wlan node.
|
||||
(Note that the PtpNet subclass overrides this method.)
|
||||
'''
|
||||
if self.up:
|
||||
pipe = self.session.addobj(cls = NetgraphPipeNet,
|
||||
verbose = self.verbose, start = True)
|
||||
pipe.attach(netif)
|
||||
hook = "link%d" % len(self._netif)
|
||||
pipe.attachnet(self, hook)
|
||||
PyCoreNet.attach(self, netif)
|
||||
|
||||
def detach(self, netif):
|
||||
if self.up:
|
||||
pass
|
||||
PyCoreNet.detach(self, netif)
|
||||
|
||||
def linked(self, netif1, netif2):
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[netif1] != netif1:
|
||||
raise ValueError, "inconsistency for netif %s" % netif1.name
|
||||
if self._netif[netif2] != netif2:
|
||||
raise ValueError, "inconsistency for netif %s" % netif2.name
|
||||
try:
|
||||
linked = self._linked[netif1][netif2]
|
||||
except KeyError:
|
||||
linked = False
|
||||
self._linked[netif1][netif2] = linked
|
||||
return linked
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
if not self.linked(netif1, netif2):
|
||||
return
|
||||
msg = ["unlink", "{", "node1=0x%s" % netif1.pipe.ngid]
|
||||
msg += ["node2=0x%s" % netif2.pipe.ngid, "}"]
|
||||
ngmessage(self.ngname, msg)
|
||||
self._linked[netif1][netif2] = False
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
if self.linked(netif1, netif2):
|
||||
return
|
||||
msg = ["link", "{", "node1=0x%s" % netif1.pipe.ngid]
|
||||
msg += ["node2=0x%s" % netif2.pipe.ngid, "}"]
|
||||
ngmessage(self.ngname, msg)
|
||||
self._linked[netif1][netif2] = True
|
||||
|
||||
def linknet(self, net):
|
||||
''' Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2=None):
|
||||
''' Set link effects by modifying the pipe connected to an interface.
|
||||
'''
|
||||
if not netif.pipe:
|
||||
self.warn("linkconfig for %s but interface %s has no pipe" % \
|
||||
(self.name, netif.name))
|
||||
return
|
||||
return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter,
|
||||
netif2)
|
||||
|
||||
class NetgraphPipeNet(NetgraphNet):
|
||||
ngtype = "pipe"
|
||||
nghooks = "upper lower"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
NetgraphNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
if start:
|
||||
# account for Ethernet header
|
||||
ngmessage(self.ngname, ["setcfg", "{", "header_offset=14", "}"])
|
||||
|
||||
def attach(self, netif):
|
||||
''' Attach an interface to this pipe node.
|
||||
The first interface is connected to the "upper" hook, the second
|
||||
connected to the "lower" hook.
|
||||
'''
|
||||
if len(self._netif) > 1:
|
||||
raise ValueError, \
|
||||
"Netgraph pipes support at most 2 network interfaces"
|
||||
if self.up:
|
||||
hook = self.gethook()
|
||||
connectngnodes(self.ngname, netif.localname, hook, netif.hook)
|
||||
if netif.pipe:
|
||||
raise ValueError, \
|
||||
"Interface %s already attached to pipe %s" % \
|
||||
(netif.name, netif.pipe.name)
|
||||
netif.pipe = self
|
||||
self._netif[netif] = netif
|
||||
self._linked[netif] = {}
|
||||
|
||||
def attachnet(self, net, hook):
|
||||
''' Attach another NetgraphNet to this pipe node.
|
||||
'''
|
||||
localhook = self.gethook()
|
||||
connectngnodes(self.ngname, net.ngname, localhook, hook)
|
||||
|
||||
def gethook(self):
|
||||
''' Returns the first hook (e.g. "upper") then the second hook
|
||||
(e.g. "lower") based on the number of connections.
|
||||
'''
|
||||
hooks = self.nghooks.split()
|
||||
if len(self._netif) == 0:
|
||||
return hooks[0]
|
||||
else:
|
||||
return hooks[1]
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Set link effects by sending a Netgraph setcfg message to the pipe.
|
||||
'''
|
||||
netif.setparam('bw', bw)
|
||||
netif.setparam('delay', delay)
|
||||
netif.setparam('loss', loss)
|
||||
netif.setparam('duplicate', duplicate)
|
||||
netif.setparam('jitter', jitter)
|
||||
if not self.up:
|
||||
return
|
||||
params = []
|
||||
upstream = []
|
||||
downstream = []
|
||||
if bw is not None:
|
||||
if str(bw)=="0":
|
||||
bw="-1"
|
||||
params += ["bandwidth=%s" % bw,]
|
||||
if delay is not None:
|
||||
if str(delay)=="0":
|
||||
delay="-1"
|
||||
params += ["delay=%s" % delay,]
|
||||
if loss is not None:
|
||||
if str(loss)=="0":
|
||||
loss="-1"
|
||||
upstream += ["BER=%s" % loss,]
|
||||
downstream += ["BER=%s" % loss,]
|
||||
if duplicate is not None:
|
||||
if str(duplicate)=="0":
|
||||
duplicate="-1"
|
||||
upstream += ["duplicate=%s" % duplicate,]
|
||||
downstream += ["duplicate=%s" % duplicate,]
|
||||
if jitter:
|
||||
self.warn("jitter parameter ignored for link %s" % self.name)
|
||||
if len(params) > 0 or len(upstream) > 0 or len(downstream) > 0:
|
||||
setcfg = ["setcfg", "{",] + params
|
||||
if len(upstream) > 0:
|
||||
setcfg += ["upstream={",] + upstream + ["}",]
|
||||
if len(downstream) > 0:
|
||||
setcfg += ["downstream={",] + downstream + ["}",]
|
||||
setcfg += ["}",]
|
||||
ngmessage(self.ngname, setcfg)
|
||||
|
393
daemon/core/bsd/vnode.py
Normal file
393
daemon/core/bsd/vnode.py
Normal file
|
@ -0,0 +1,393 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
vnode.py: SimpleJailNode and JailNode classes that implement the FreeBSD
|
||||
jail-based virtual node.
|
||||
'''
|
||||
|
||||
import os, signal, sys, subprocess, threading, string
|
||||
import random, time
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.bsd.netgraph import *
|
||||
|
||||
checkexec([IFCONFIG_BIN, VIMAGE_BIN])
|
||||
|
||||
class VEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
# name is the device name (e.g. ngeth0, ngeth1, etc.) before it is
|
||||
# installed in a node; the Netgraph name is renamed to localname
|
||||
# e.g. before install: name = ngeth0 localname = n0_0_123
|
||||
# after install: name = eth0 localname = n0_0_123
|
||||
self.localname = localname
|
||||
self.ngid = None
|
||||
self.net = None
|
||||
self.pipe = None
|
||||
self.addrlist = []
|
||||
self.hwaddr = None
|
||||
self.up = False
|
||||
self.hook = "ether"
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
hookstr = "%s %s" % (self.hook, self.hook)
|
||||
ngname, ngid = createngnode(type="eiface", hookstr=hookstr,
|
||||
name=self.localname)
|
||||
self.name = ngname
|
||||
self.ngid = ngid
|
||||
check_call([IFCONFIG_BIN, ngname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
destroyngnode(self.localname)
|
||||
self.up = False
|
||||
|
||||
def attachnet(self, net):
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
self.net = None
|
||||
net.attach(self)
|
||||
self.net = net
|
||||
|
||||
def detachnet(self):
|
||||
if self.net is not None:
|
||||
self.net.detach(self)
|
||||
|
||||
def addaddr(self, addr):
|
||||
self.addrlist.append(addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
self.addrlist.remove(addr)
|
||||
|
||||
def sethwaddr(self, addr):
|
||||
self.hwaddr = addr
|
||||
|
||||
class TunTap(PyCoreNetIf):
|
||||
'''TUN/TAP virtual device in TAP mode'''
|
||||
def __init__(self, node, name, localname, mtu = None, net = None,
|
||||
start = True):
|
||||
raise NotImplementedError
|
||||
|
||||
class SimpleJailNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None, nodedir = None,
|
||||
verbose = False):
|
||||
PyCoreNode.__init__(self, session, objid, name)
|
||||
self.nodedir = nodedir
|
||||
self.verbose = verbose
|
||||
self.pid = None
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
|
||||
def startup(self):
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
vimg = [VIMAGE_BIN, "-c", self.name]
|
||||
try:
|
||||
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
|
||||
except OSError:
|
||||
raise Exception, ("vimage command not found while running: %s" % \
|
||||
vimg)
|
||||
self.info("bringing up loopback interface")
|
||||
self.cmd([IFCONFIG_BIN, "lo0", "127.0.0.1"])
|
||||
self.info("setting hostname: %s" % self.name)
|
||||
self.cmd(["hostname", self.name])
|
||||
self.cmd([SYSCTL_BIN, "vfs.morphing_symlinks=1"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
del self.session
|
||||
vimg = [VIMAGE_BIN, "-d", self.name]
|
||||
try:
|
||||
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
|
||||
except OSError:
|
||||
raise Exception, ("vimage command not found while running: %s" % \
|
||||
vimg)
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
if wait:
|
||||
mode = os.P_WAIT
|
||||
else:
|
||||
mode = os.P_NOWAIT
|
||||
tmp = call([VIMAGE_BIN, self.name] + args, cwd=self.nodedir)
|
||||
if not wait:
|
||||
tmp = None
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def cmdresult(self, args, wait = True):
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(args)
|
||||
result = cmdout.read()
|
||||
result += cmderr.read()
|
||||
cmdin.close()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
if wait:
|
||||
status = cmdid.wait()
|
||||
else:
|
||||
status = 0
|
||||
return (status, result)
|
||||
|
||||
def popen(self, args):
|
||||
cmd = [VIMAGE_BIN, self.name]
|
||||
cmd.extend(args)
|
||||
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE, cwd=self.nodedir)
|
||||
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
return os.spawnlp(os.P_WAIT, VIMAGE_BIN, VIMAGE_BIN, self.name, *args)
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut",
|
||||
"-title", self.name, "-e", VIMAGE_BIN, self.name, sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' We add 'sudo' to the command string because the GUI runs as a
|
||||
normal user.
|
||||
'''
|
||||
return "cd %s && sudo %s %s %s" % (self.nodedir, VIMAGE_BIN, self.name, sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def boot(self):
|
||||
pass
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
self.addsymlink(path=target, file=None)
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s_%s_%s" % (self.objid, ifindex, sessionid)
|
||||
localname = name
|
||||
ifclass = VEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
if self.up:
|
||||
# install into jail
|
||||
check_call([IFCONFIG_BIN, veth.name, "vnet", self.name])
|
||||
# rename from "ngeth0" to "eth0"
|
||||
self.cmd([IFCONFIG_BIN, veth.name, "name", ifname])
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "link",
|
||||
str(addr)])
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
if ':' in addr:
|
||||
family = "inet6"
|
||||
else:
|
||||
family = "inet"
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "alias",
|
||||
str(addr)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
if ':' in addr:
|
||||
family = "inet6"
|
||||
else:
|
||||
family = "inet"
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "-alias",
|
||||
str(addr)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def ifup(self, ifindex):
|
||||
if self.up:
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
(filename, srcname, filename)
|
||||
self.shcmd(shcmd)
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
return None
|
||||
#return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
|
||||
def addsymlink(self, path, file):
|
||||
''' Create a symbolic link from /path/name/file ->
|
||||
/tmp/pycore.nnnnn/@.conf/path.name/file
|
||||
'''
|
||||
dirname = path
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
if file:
|
||||
pathname = os.path.join(path, file)
|
||||
sym = os.path.join(self.session.sessiondir, "@.conf", dirname, file)
|
||||
else:
|
||||
pathname = path
|
||||
sym = os.path.join(self.session.sessiondir, "@.conf", dirname)
|
||||
|
||||
if os.path.islink(pathname):
|
||||
if os.readlink(pathname) == sym:
|
||||
# this link already exists - silently return
|
||||
return
|
||||
os.unlink(pathname)
|
||||
else:
|
||||
if os.path.exists(pathname):
|
||||
self.warn("did not create symlink for %s since path " \
|
||||
"exists on host" % pathname)
|
||||
return
|
||||
self.info("creating symlink %s -> %s" % (pathname, sym))
|
||||
os.symlink(sym, pathname)
|
||||
|
||||
class JailNode(SimpleJailNode):
|
||||
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True):
|
||||
super(JailNode, self).__init__(session = session, objid = objid,
|
||||
name = name, nodedir = nodedir,
|
||||
verbose = verbose)
|
||||
self.bootsh = bootsh
|
||||
if not start:
|
||||
return
|
||||
# below here is considered node startup/instantiation code
|
||||
self.makenodedir()
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
super(JailNode, self).startup()
|
||||
#self.privatedir("/var/run")
|
||||
#self.privatedir("/var/log")
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
# services are instead stopped when session enters datacollect state
|
||||
#self.session.services.stopnodeservices(self)
|
||||
try:
|
||||
super(JailNode, self).shutdown()
|
||||
finally:
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
#self.addsymlink(path=dirname, file=basename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
373
daemon/core/conf.py
Normal file
373
daemon/core/conf.py
Normal file
|
@ -0,0 +1,373 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
conf.py: common support for configurable objects
|
||||
'''
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
class ConfigurableManager(object):
|
||||
''' A generic class for managing Configurables. This class can register
|
||||
with a session to receive Config Messages for setting some parameters
|
||||
for itself or for the Configurables that it manages.
|
||||
'''
|
||||
# name corresponds to configuration object field
|
||||
_name = ""
|
||||
# type corresponds with register message types
|
||||
_type = None
|
||||
|
||||
def __init__(self, session=None):
|
||||
self.session = session
|
||||
self.session.addconfobj(self._name, self._type, self.configure)
|
||||
# Configurable key=values, indexed by node number
|
||||
self.configs = {}
|
||||
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configure messages. The configuration message sent to a
|
||||
ConfigurableManager usually is used to:
|
||||
1. Request a list of Configurables (request flag)
|
||||
2. Reset manager and clear configs (reset flag)
|
||||
3. Send values that configure the manager or one of its
|
||||
Configurables
|
||||
|
||||
Returns any reply messages.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
return self.configure_request(msg)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all" or objname == self._name:
|
||||
return self.configure_reset(msg)
|
||||
else:
|
||||
return self.configure_values(msg,
|
||||
msg.gettlv(coreapi.CORE_TLV_CONF_VALUES))
|
||||
|
||||
def configure_request(self, msg):
|
||||
''' Request configuration data.
|
||||
'''
|
||||
return None
|
||||
|
||||
def configure_reset(self, msg):
|
||||
''' By default, resets this manager to clear configs.
|
||||
'''
|
||||
return self.reset()
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Values have been sent to this manager.
|
||||
'''
|
||||
return None
|
||||
|
||||
def configure_values_keyvalues(self, msg, values, target, keys):
|
||||
''' Helper that can be used for configure_values for parsing in
|
||||
'key=value' strings from a values field. The key name must be
|
||||
in the keys list, and target.key=value is set.
|
||||
'''
|
||||
if values is None:
|
||||
return None
|
||||
kvs = values.split('|')
|
||||
for kv in kvs:
|
||||
try:
|
||||
# key=value
|
||||
(key, value) = kv.split('=', 1)
|
||||
except ValueError:
|
||||
# value only
|
||||
key = keys[kvs.index(kv)]
|
||||
value = kv
|
||||
if key not in keys:
|
||||
raise ValueError, "invalid key: %s" % key
|
||||
setattr(target, key, value)
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
return None
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
''' add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
'''
|
||||
conflist = []
|
||||
if nodenum in self.configs:
|
||||
oldlist = self.configs[nodenum]
|
||||
found = False
|
||||
for (t, v) in oldlist:
|
||||
if (t == conftype):
|
||||
# replace existing config
|
||||
found = True
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((t, v))
|
||||
if not found:
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((conftype, values))
|
||||
self.configs[nodenum] = conflist
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
''' get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied
|
||||
'''
|
||||
if nodenum in self.configs:
|
||||
# return configured values
|
||||
conflist = self.configs[nodenum]
|
||||
for (t, v) in conflist:
|
||||
if (conftype is None) or (t == conftype):
|
||||
return (t, v)
|
||||
# return default values provided (may be None)
|
||||
return (conftype, defaultvalues)
|
||||
|
||||
def getallconfigs(self, use_clsmap=True):
|
||||
''' Return (nodenum, conftype, values) tuples for all stored configs.
|
||||
Used when reconnecting to a session.
|
||||
'''
|
||||
r = []
|
||||
for nodenum in self.configs:
|
||||
for (t, v) in self.configs[nodenum]:
|
||||
if use_clsmap:
|
||||
t = self._modelclsmap[t]
|
||||
r.append( (nodenum, t, v) )
|
||||
return r
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
''' remove configuration values for the specified node;
|
||||
when nodenum is None, remove all configuration values
|
||||
'''
|
||||
if nodenum is None:
|
||||
self.configs = {}
|
||||
return
|
||||
if nodenum in self.configs:
|
||||
self.configs.pop(nodenum)
|
||||
|
||||
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
|
||||
''' keyvalues list of tuples
|
||||
'''
|
||||
if conftype not in self._modelclsmap:
|
||||
self.warn("Unknown model type '%s'" % (conftype))
|
||||
return
|
||||
model = self._modelclsmap[conftype]
|
||||
keys = model.getnames()
|
||||
# defaults are merged with supplied values here
|
||||
values = list(model.getdefaultvalues())
|
||||
for key, value in keyvalues:
|
||||
if key not in keys:
|
||||
self.warn("Skipping unknown configuration key for %s: '%s'" % \
|
||||
(conftype, key))
|
||||
continue
|
||||
i = keys.index(key)
|
||||
values[i] = value
|
||||
self.setconfig(nodenum, conftype, values)
|
||||
|
||||
def getmodels(self, n):
|
||||
''' Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
This assumes self.configs contains an iterable of (model-names, values)
|
||||
and a self._modelclsmapdict exists.
|
||||
'''
|
||||
r = []
|
||||
if n.objid in self.configs:
|
||||
v = self.configs[n.objid]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
|
||||
def info(self, msg):
|
||||
self.session.info(msg)
|
||||
|
||||
def warn(self, msg):
|
||||
self.session.warn(msg)
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
''' A generic class for managing configuration parameters.
|
||||
Parameters are sent via Configuration Messages, which allow the GUI
|
||||
to build dynamic dialogs depending on what is being configured.
|
||||
'''
|
||||
_name = ""
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = []
|
||||
_confgroups = None
|
||||
_bitmap = None
|
||||
|
||||
def __init__(self, session=None, objid=None):
|
||||
self.session = session
|
||||
self.objid = objid
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def register(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def getdefaultvalues(cls):
|
||||
return tuple( map(lambda x: x[2], cls._confmatrix) )
|
||||
|
||||
@classmethod
|
||||
def getnames(cls):
|
||||
return tuple( map( lambda x: x[0], cls._confmatrix) )
|
||||
|
||||
@classmethod
|
||||
def configure(cls, mgr, msg):
|
||||
''' Handle configuration messages for this object.
|
||||
'''
|
||||
reply = None
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
|
||||
if mgr.verbose:
|
||||
mgr.info("received configure message for %s" % cls._name)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
if mgr.verbose:
|
||||
mgr.info("replying to configure request for %s model" %
|
||||
cls._name)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if objname == "all":
|
||||
defaults = None
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
else:
|
||||
defaults = cls.getdefaultvalues()
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
|
||||
if values is None:
|
||||
# node has no active config for this model (don't send defaults)
|
||||
return None
|
||||
# reply with config options
|
||||
reply = cls.toconfmsg(0, nodenum, typeflags, values)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all":
|
||||
mgr.clearconfig(nodenum)
|
||||
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the node
|
||||
# object has been created
|
||||
if objname is None:
|
||||
mgr.info("no configuration object for node %s" % nodenum)
|
||||
return None
|
||||
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
|
||||
defaults = cls.getdefaultvalues()
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
# determine new or old style config
|
||||
new = cls.haskeyvalues(values)
|
||||
if new:
|
||||
new_values = list(defaults)
|
||||
keys = cls.getnames()
|
||||
for v in values:
|
||||
key, value = v.split('=', 1)
|
||||
try:
|
||||
new_values[keys.index(key)] = value
|
||||
except ValueError:
|
||||
mgr.info("warning: ignoring invalid key '%s'" % key)
|
||||
values = new_values
|
||||
mgr.setconfig(nodenum, objname, values)
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def toconfmsg(cls, flags, nodenum, typeflags, values):
|
||||
''' Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
'''
|
||||
keys = cls.getnames()
|
||||
keyvalues = map(lambda a,b: "%s=%s" % (a,b), keys, values)
|
||||
values_str = string.join(keyvalues, '|')
|
||||
tlvdata = ""
|
||||
if nodenum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
|
||||
nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
cls._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
typeflags)
|
||||
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
values_str)
|
||||
captions = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[4], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
possiblevals = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[3], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if cls._bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
|
||||
cls._bitmap)
|
||||
if cls._confgroups is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
cls._confgroups)
|
||||
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
@staticmethod
|
||||
def booltooffon(value):
|
||||
''' Convenience helper turns bool into on (True) or off (False) string.
|
||||
'''
|
||||
if value == "1" or value == "true" or value == "on":
|
||||
return "on"
|
||||
else:
|
||||
return "off"
|
||||
|
||||
@staticmethod
|
||||
def offontobool(value):
|
||||
if type(value) == str:
|
||||
if value.lower() == "on":
|
||||
return 1
|
||||
elif value.lower() == "off":
|
||||
return 0
|
||||
return value
|
||||
|
||||
|
||||
def valueof(self, name, values):
|
||||
''' Helper to return a value by the name defined in confmatrix.
|
||||
Checks if it is boolean'''
|
||||
i = self.getnames().index(name)
|
||||
if self._confmatrix[i][1] == coreapi.CONF_DATA_TYPE_BOOL and \
|
||||
values[i] != "":
|
||||
return self.booltooffon( values[i] )
|
||||
else:
|
||||
return values[i]
|
||||
|
||||
@staticmethod
|
||||
def haskeyvalues(values):
|
||||
''' Helper to check for list of key=value pairs versus a plain old
|
||||
list of values. Returns True if all elements are "key=value".
|
||||
'''
|
||||
if len(values) == 0:
|
||||
return False
|
||||
for v in values:
|
||||
if "=" not in v:
|
||||
return False
|
||||
return True
|
||||
|
||||
def getkeyvaluelist(self):
|
||||
''' Helper to return a list of (key, value) tuples. Keys come from
|
||||
self._confmatrix and values are instance attributes.
|
||||
'''
|
||||
r = []
|
||||
for k in self.getnames():
|
||||
if hasattr(self, k):
|
||||
r.append((k, getattr(self, k)))
|
||||
return r
|
||||
|
||||
|
19
daemon/core/constants.py.in
Normal file
19
daemon/core/constants.py.in
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Constants created by autoconf ./configure script
|
||||
COREDPY_VERSION = "@COREDPY_VERSION@"
|
||||
CORE_STATE_DIR = "@CORE_STATE_DIR@"
|
||||
CORE_CONF_DIR = "@CORE_CONF_DIR@"
|
||||
CORE_DATA_DIR = "@CORE_DATA_DIR@"
|
||||
CORE_LIB_DIR = "@CORE_LIB_DIR@"
|
||||
CORE_SBIN_DIR = "@SBINDIR@"
|
||||
|
||||
BRCTL_BIN = "@brctl_path@/brctl"
|
||||
SYSCTL_BIN = "@sysctl_path@/sysctl"
|
||||
IP_BIN = "@ip_path@/ip"
|
||||
TC_BIN = "@tc_path@/tc"
|
||||
EBTABLES_BIN = "@ebtables_path@/ebtables"
|
||||
IFCONFIG_BIN = "@ifconfig_path@/ifconfig"
|
||||
NGCTL_BIN = "@ngctl_path@/ngctl"
|
||||
VIMAGE_BIN = "@vimage_path@/vimage"
|
||||
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
|
||||
MOUNT_BIN = "@mount_path@/mount"
|
||||
UMOUNT_BIN = "@umount_path@/umount"
|
445
daemon/core/coreobj.py
Normal file
445
daemon/core/coreobj.py
Normal file
|
@ -0,0 +1,445 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
coreobj.py: defines the basic objects for emulation: the PyCoreObj base class,
|
||||
along with PyCoreNode, PyCoreNet, and PyCoreNetIf
|
||||
'''
|
||||
import sys, threading, os, shutil
|
||||
|
||||
from core.api import coreapi
|
||||
from core.misc.ipaddr import *
|
||||
|
||||
class Position(object):
|
||||
''' Helper class for Cartesian coordinate position
|
||||
'''
|
||||
def __init__(self, x = None, y = None, z = None):
|
||||
self.x = None
|
||||
self.y = None
|
||||
self.z = None
|
||||
self.set(x, y, z)
|
||||
|
||||
def set(self, x = None, y = None, z = None):
|
||||
''' Returns True if the position has actually changed.
|
||||
'''
|
||||
if self.x == x and self.y == y and self.z == z:
|
||||
return False
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
return True
|
||||
|
||||
def get(self):
|
||||
''' Fetch the (x,y,z) position tuple.
|
||||
'''
|
||||
return (self.x, self.y, self.z)
|
||||
|
||||
class PyCoreObj(object):
|
||||
''' Base class for pycore objects (nodes and nets)
|
||||
'''
|
||||
apitype = None
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
self.session = session
|
||||
if objid is None:
|
||||
objid = session.getobjid()
|
||||
self.objid = objid
|
||||
if name is None:
|
||||
name = "o%s" % self.objid
|
||||
self.name = name
|
||||
# ifindex is key, PyCoreNetIf instance is value
|
||||
self._netif = {}
|
||||
self.ifindex = 0
|
||||
self.canvas = None
|
||||
self.icon = None
|
||||
self.opaque = None
|
||||
self.verbose = verbose
|
||||
self.position = Position()
|
||||
|
||||
def startup(self):
|
||||
''' Each object implements its own startup method.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
''' Each object implements its own shutdown method.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def setposition(self, x = None, y = None, z = None):
|
||||
''' Set the (x,y,z) position of the object.
|
||||
'''
|
||||
return self.position.set(x = x, y = y, z = z)
|
||||
|
||||
def getposition(self):
|
||||
''' Return an (x,y,z) tuple representing this object's position.
|
||||
'''
|
||||
return self.position.get()
|
||||
|
||||
def ifname(self, ifindex):
|
||||
return self.netif(ifindex).name
|
||||
|
||||
def netifs(self, sort=False):
|
||||
''' Iterate over attached network interfaces.
|
||||
'''
|
||||
if sort:
|
||||
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
|
||||
else:
|
||||
return self._netif.itervalues()
|
||||
|
||||
def numnetif(self):
|
||||
''' Return the attached interface count.
|
||||
'''
|
||||
return len(self._netif)
|
||||
|
||||
def getifindex(self, netif):
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
return ifindex
|
||||
return -1
|
||||
|
||||
def newifindex(self):
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
''' Build a CORE API Node Message for this object. Both nodes and
|
||||
networks can be represented by a Node Message.
|
||||
'''
|
||||
if self.apitype is None:
|
||||
return None
|
||||
tlvdata = ""
|
||||
(x, y, z) = self.getposition()
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER,
|
||||
self.objid)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_TYPE,
|
||||
self.apitype)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NAME,
|
||||
self.name)
|
||||
if hasattr(self, "type") and self.type is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_MODEL,
|
||||
self.type)
|
||||
|
||||
if x is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_XPOS, x)
|
||||
if y is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_YPOS, y)
|
||||
if self.canvas is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_CANVAS,
|
||||
self.canvas)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID,
|
||||
self.objid)
|
||||
if self.icon is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_ICON,
|
||||
self.icon)
|
||||
if self.opaque is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_OPAQUE,
|
||||
self.opaque)
|
||||
msg = coreapi.CoreNodeMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API Link Messages for this object. There is no default
|
||||
method for PyCoreObjs as PyCoreNodes do not implement this but
|
||||
PyCoreNets do.
|
||||
'''
|
||||
return []
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for printing informational messages when verbose
|
||||
is turned on.
|
||||
'''
|
||||
if self.verbose:
|
||||
print "%s: %s" % (self.name, msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for printing warning/error messages
|
||||
'''
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def exception(self, level, source, text):
|
||||
''' Generate an Exception Message for this session, providing this
|
||||
object number.
|
||||
'''
|
||||
if self.session:
|
||||
id = None
|
||||
if isinstance(self.objid, int):
|
||||
id = self.objid
|
||||
elif isinstance(self.objid, str) and self.objid.isdigit():
|
||||
id = int(self.objid)
|
||||
self.session.exception(level, source, id, text)
|
||||
|
||||
|
||||
class PyCoreNode(PyCoreObj):
|
||||
''' Base class for nodes
|
||||
'''
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
''' Initialization for node objects.
|
||||
'''
|
||||
PyCoreObj.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self.services = []
|
||||
self.type = None
|
||||
self.nodedir = None
|
||||
|
||||
def nodeid(self):
|
||||
return self.objid
|
||||
|
||||
def addservice(self, service):
|
||||
if service is not None:
|
||||
self.services.append(service)
|
||||
|
||||
def makenodedir(self):
|
||||
if self.nodedir is None:
|
||||
self.nodedir = \
|
||||
os.path.join(self.session.sessiondir, self.name + ".conf")
|
||||
os.makedirs(self.nodedir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
self.tmpnodedir = False
|
||||
|
||||
def rmnodedir(self):
|
||||
if hasattr(self.session.options, 'preservedir'):
|
||||
if self.session.options.preservedir == '1':
|
||||
return
|
||||
if self.tmpnodedir:
|
||||
shutil.rmtree(self.nodedir, ignore_errors = True)
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
if ifindex in self._netif:
|
||||
raise ValueError, "ifindex %s already exists" % ifindex
|
||||
self._netif[ifindex] = netif
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
netif = self._netif.pop(ifindex)
|
||||
netif.shutdown()
|
||||
del netif
|
||||
|
||||
def netif(self, ifindex, net = None):
|
||||
if ifindex in self._netif:
|
||||
return self._netif[ifindex]
|
||||
else:
|
||||
return None
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def setposition(self, x = None, y = None, z = None):
|
||||
changed = PyCoreObj.setposition(self, x = x, y = y, z = z)
|
||||
if not changed:
|
||||
# save extra interface range calculations
|
||||
return
|
||||
for netif in self.netifs(sort=True):
|
||||
netif.setposition(x, y, z)
|
||||
|
||||
def commonnets(self, obj, want_ctrl=False):
|
||||
''' Given another node or net object, return common networks between
|
||||
this node and that object. A list of tuples is returned, with each tuple
|
||||
consisting of (network, interface1, interface2).
|
||||
'''
|
||||
r = []
|
||||
for netif1 in self.netifs():
|
||||
if not want_ctrl and hasattr(netif1, 'control'):
|
||||
continue
|
||||
for netif2 in obj.netifs():
|
||||
if netif1.net == netif2.net:
|
||||
r += (netif1.net, netif1, netif2),
|
||||
return r
|
||||
|
||||
|
||||
|
||||
class PyCoreNet(PyCoreObj):
|
||||
''' Base class for networks
|
||||
'''
|
||||
linktype = coreapi.CORE_LINK_WIRED
|
||||
|
||||
def __init__(self, session, objid, name, verbose = False, start = True):
|
||||
''' Initialization for network objects.
|
||||
'''
|
||||
PyCoreObj.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self._linked = {}
|
||||
self._linked_lock = threading.Lock()
|
||||
|
||||
def attach(self, netif):
|
||||
i = self.newifindex()
|
||||
self._netif[i] = netif
|
||||
netif.netifi = i
|
||||
with self._linked_lock:
|
||||
self._linked[netif] = {}
|
||||
|
||||
def detach(self, netif):
|
||||
del self._netif[netif.netifi]
|
||||
netif.netifi = None
|
||||
with self._linked_lock:
|
||||
del self._linked[netif]
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API Link Messages for this network. Each link message
|
||||
describes a link between this network and a node.
|
||||
'''
|
||||
msgs = []
|
||||
# build a link message from this network node to each node having a
|
||||
# connected interface
|
||||
for netif in self.netifs(sort=True):
|
||||
if not hasattr(netif, "node"):
|
||||
continue
|
||||
otherobj = netif.node
|
||||
if otherobj is None:
|
||||
# two layer-2 switches/hubs linked together via linknet()
|
||||
if not hasattr(netif, "othernet"):
|
||||
continue
|
||||
otherobj = netif.othernet
|
||||
if otherobj.objid == self.objid:
|
||||
continue
|
||||
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
self.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
otherobj.objid)
|
||||
delay = netif.getparam('delay')
|
||||
bw = netif.getparam('bw')
|
||||
loss = netif.getparam('loss')
|
||||
duplicate = netif.getparam('duplicate')
|
||||
jitter = netif.getparam('jitter')
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW,
|
||||
bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM,
|
||||
otherobj.getifindex(netif))
|
||||
for addr in netif.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, \
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
msgs.append(msg)
|
||||
return msgs
|
||||
|
||||
class PyCoreNetIf(object):
|
||||
''' Base class for interfaces.
|
||||
'''
|
||||
def __init__(self, node, name, mtu):
|
||||
self.node = node
|
||||
self.name = name
|
||||
if not isinstance(mtu, (int, long)):
|
||||
raise ValueError
|
||||
self.mtu = mtu
|
||||
self.net = None
|
||||
self._params = {}
|
||||
self.addrlist = []
|
||||
self.hwaddr = None
|
||||
self.poshook = None
|
||||
# used with EMANE
|
||||
self.transport_type = None
|
||||
# interface index on the network
|
||||
self.netindex = None
|
||||
|
||||
def startup(self):
|
||||
pass
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
def attachnet(self, net):
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
self.net = None
|
||||
net.attach(self)
|
||||
self.net = net
|
||||
|
||||
def detachnet(self):
|
||||
if self.net is not None:
|
||||
self.net.detach(self)
|
||||
|
||||
def addaddr(self, addr):
|
||||
self.addrlist.append(addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
self.addrlist.remove(addr)
|
||||
|
||||
def sethwaddr(self, addr):
|
||||
self.hwaddr = addr
|
||||
|
||||
def getparam(self, key):
|
||||
''' Retrieve a parameter from the _params dict,
|
||||
or None if the parameter does not exist.
|
||||
'''
|
||||
if key not in self._params:
|
||||
return None
|
||||
return self._params[key]
|
||||
|
||||
def getparams(self):
|
||||
''' Return (key, value) pairs from the _params dict.
|
||||
'''
|
||||
r = []
|
||||
for k in sorted(self._params.keys()):
|
||||
r.append((k, self._params[k]))
|
||||
return r
|
||||
|
||||
def setparam(self, key, value):
|
||||
''' Set a parameter in the _params dict.
|
||||
Returns True if the parameter has changed.
|
||||
'''
|
||||
if key in self._params:
|
||||
if self._params[key] == value:
|
||||
return False
|
||||
elif self._params[key] <= 0 and value <= 0:
|
||||
# treat None and 0 as unchanged values
|
||||
return False
|
||||
self._params[key] = value
|
||||
return True
|
||||
|
||||
def setposition(self, x, y, z):
|
||||
''' Dispatch to any position hook (self.poshook) handler.
|
||||
'''
|
||||
if self.poshook is not None:
|
||||
self.poshook(self, x, y, z)
|
||||
|
0
daemon/core/emane/__init__.py
Normal file
0
daemon/core/emane/__init__.py
Normal file
65
daemon/core/emane/bypass.py
Normal file
65
daemon/core/emane/bypass.py
Normal file
|
@ -0,0 +1,65 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
bypass.py: EMANE Bypass model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
class EmaneBypassModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
_name = "emane_bypass"
|
||||
_confmatrix = [
|
||||
("none",coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'True,False','There are no parameters for the bypass model.'),
|
||||
]
|
||||
|
||||
# value groupings
|
||||
_confgroups = "Bypass Parameters:1-1"
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml,
|
||||
nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "BYPASS NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "BYPASS MAC")
|
||||
mac.setAttribute("library", "bypassmaclayer")
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = e.xmldoc("phy")
|
||||
phy = phydoc.getElementsByTagName("phy").pop()
|
||||
phy.setAttribute("name", "BYPASS PHY")
|
||||
phy.setAttribute("library", "bypassphylayer")
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
||||
|
124
daemon/core/emane/commeffect.py
Executable file
124
daemon/core/emane/commeffect.py
Executable file
|
@ -0,0 +1,124 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
# Randy Charland <rcharland@ll.mit.edu>
|
||||
#
|
||||
'''
|
||||
commeffect.py: EMANE CommEffect model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventcommeffect
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
def z(x):
|
||||
''' Helper to use 0 for None values. '''
|
||||
if x is None:
|
||||
return 0
|
||||
else:
|
||||
return x
|
||||
|
||||
class EmaneCommEffectModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_commeffect"
|
||||
# CommEffect parameters
|
||||
_confmatrix_shim = [
|
||||
("defaultconnectivity", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'defaultconnectivity'),
|
||||
("filterfile", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'filter file'),
|
||||
("groupid", coreapi.CONF_DATA_TYPE_UINT32, '0',
|
||||
'', 'NEM Group ID'),
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable promiscuous mode'),
|
||||
("enabletighttimingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable tight timing mode'),
|
||||
("receivebufferperiod", coreapi.CONF_DATA_TYPE_FLOAT, '1.0',
|
||||
'', 'receivebufferperiod'),
|
||||
]
|
||||
|
||||
_confmatrix = _confmatrix_shim
|
||||
# value groupings
|
||||
_confgroups = "CommEffect SHIM Parameters:1-%d" \
|
||||
% len(_confmatrix_shim)
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
shimdoc = e.xmldoc("shim")
|
||||
shim = shimdoc.getElementsByTagName("shim").pop()
|
||||
shim.setAttribute("name", "commeffect SHIM")
|
||||
shim.setAttribute("library", "commeffectshim")
|
||||
|
||||
names = self.getnames()
|
||||
shimnames = list(names[:len(self._confmatrix_shim)])
|
||||
shimnames.remove("filterfile")
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
map( lambda n: shim.appendChild(e.xmlparam(shimdoc, n, \
|
||||
self.valueof(n, values))), shimnames)
|
||||
# empty filterfile is not allowed
|
||||
ff = self.valueof("filterfile", values)
|
||||
if ff.strip() != '':
|
||||
shim.appendChild(e.xmlparam(shimdoc, "filterfile", ff))
|
||||
e.xmlwrite(shimdoc, self.shimxmlname(ifc))
|
||||
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "commeffect NEM")
|
||||
nem.setAttribute("type", "unstructured")
|
||||
nem.appendChild(e.xmlshimdefinition(nemdoc, self.shimxmlname(ifc)))
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Generate CommEffect events when a Link Message is received having
|
||||
link parameters.
|
||||
'''
|
||||
service = self.session.emane.service
|
||||
if service is None:
|
||||
self.session.warn("%s: EMANE event service unavailable" % \
|
||||
self._name)
|
||||
return
|
||||
if netif is None or netif2 is None:
|
||||
self.session.warn("%s: missing NEM information" % self._name)
|
||||
return
|
||||
# TODO: batch these into multiple events per transmission
|
||||
event = emaneeventcommeffect.EventCommEffect(1)
|
||||
index = 0
|
||||
e = self.session.obj(self.objid)
|
||||
nemid = e.getnemid(netif)
|
||||
nemid2 = e.getnemid(netif2)
|
||||
mbw = bw
|
||||
|
||||
event.set(index, nemid, 0, z(delay), 0, z(jitter), z(loss),
|
||||
z(duplicate), long(z(bw)), long(z(mbw)))
|
||||
service.publish(emaneeventcommeffect.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
nemid2, emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
|
||||
|
844
daemon/core/emane/emane.py
Normal file
844
daemon/core/emane/emane.py
Normal file
|
@ -0,0 +1,844 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
emane.py: definition of an Emane class for implementing configuration
|
||||
control of an EMANE emulation.
|
||||
'''
|
||||
|
||||
import sys, os, threading, subprocess, time, string
|
||||
from xml.dom.minidom import parseString, Document
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.misc.ipaddr import MacAddr
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.mobility import WirelessModel
|
||||
from core.emane.nodes import EmaneNode
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventlocation
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
class Emane(ConfigurableManager):
|
||||
''' EMANE controller object. Lives in a Session instance and is used for
|
||||
building EMANE config files from all of the EmaneNode objects in this
|
||||
emulation, and for controlling the EMANE daemons.
|
||||
'''
|
||||
_name = "emane"
|
||||
_type = coreapi.CORE_TLV_REG_EMULSRV
|
||||
_hwaddr_prefix = "02:02"
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self._objs = {}
|
||||
self._objslock = threading.Lock()
|
||||
self._ifccounts = {}
|
||||
self._ifccountslock = threading.Lock()
|
||||
self._modelclsmap = {}
|
||||
# Port numbers are allocated from these counters
|
||||
self.platformport = self.session.getcfgitemint('emane_platform_port',
|
||||
8100)
|
||||
self.transformport = self.session.getcfgitemint('emane_transform_port',
|
||||
8200)
|
||||
# model for global EMANE configuration options
|
||||
self.emane_config = EmaneGlobalModel(session, None, self.verbose)
|
||||
session.broker.handlers += (self.handledistributed, )
|
||||
self.loadmodels()
|
||||
# this allows the event service Python bindings to be absent
|
||||
try:
|
||||
self.service = emaneeventservice.EventService()
|
||||
except:
|
||||
self.service = None
|
||||
self.doeventloop = False
|
||||
self.eventmonthread = None
|
||||
# EMANE 0.7.4 support -- to be removed when 0.7.4 support is deprecated
|
||||
self.emane074 = False
|
||||
try:
|
||||
tmp = emaneeventlocation.EventLocation(1)
|
||||
# check if yaw parameter is supported by Location Events
|
||||
# if so, we have EMANE 0.8.1+; if not, we have EMANE 0.7.4/earlier
|
||||
tmp.set(0, 1, 2, 2, 2, 3)
|
||||
except TypeError:
|
||||
self.emane074 = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def loadmodels(self):
|
||||
''' dynamically load EMANE models that were specified in the config file
|
||||
'''
|
||||
self._modelclsmap.clear()
|
||||
self._modelclsmap[self.emane_config._name] = self.emane_config
|
||||
emane_models = self.session.getcfgitem('emane_models')
|
||||
if emane_models is None:
|
||||
return
|
||||
emane_models = emane_models.split(',')
|
||||
for model in emane_models:
|
||||
model = model.strip()
|
||||
try:
|
||||
modelfile = "%s" % model.lower()
|
||||
clsname = "Emane%sModel" % model
|
||||
importcmd = "from %s import %s" % (modelfile, clsname)
|
||||
exec(importcmd)
|
||||
except Exception, e:
|
||||
warntxt = "unable to load the EMANE model '%s'" % modelfile
|
||||
warntxt += " specified in the config file (%s)" % e
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_WARNING, "emane",
|
||||
None, warntxt)
|
||||
self.warn(warntxt)
|
||||
continue
|
||||
# record the model name to class name mapping
|
||||
# this should match clsname._name
|
||||
confname = "emane_%s" % model.lower()
|
||||
self._modelclsmap[confname] = eval(clsname)
|
||||
# each EmaneModel must have ModelName.configure() defined
|
||||
confmethod = eval("%s.configure_emane" % clsname)
|
||||
self.session.addconfobj(confname, coreapi.CORE_TLV_REG_WIRELESS,
|
||||
confmethod)
|
||||
|
||||
def addobj(self, obj):
|
||||
''' add a new EmaneNode object to this Emane controller object
|
||||
'''
|
||||
self._objslock.acquire()
|
||||
if obj.objid in self._objs:
|
||||
self._objslock.release()
|
||||
raise KeyError, "non-unique EMANE object id %s for %s" % \
|
||||
(obj.objid, obj)
|
||||
self._objs[obj.objid] = obj
|
||||
self._objslock.release()
|
||||
|
||||
def getmodels(self, n):
|
||||
''' Used with XML export; see ConfigurableManager.getmodels()
|
||||
'''
|
||||
r = ConfigurableManager.getmodels(self, n)
|
||||
# EMANE global params are stored with first EMANE node (if non-default
|
||||
# values are configured)
|
||||
sorted_ids = sorted(self.configs.keys())
|
||||
if None in self.configs and len(sorted_ids) > 1 and \
|
||||
n.objid == sorted_ids[1]:
|
||||
v = self.configs[None]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
def getifcconfig(self, nodenum, conftype, defaultvalues, ifc):
|
||||
# use the network-wide config values or interface(NEM)-specific values?
|
||||
if ifc is None:
|
||||
return self.getconfig(nodenum, conftype, defaultvalues)[1]
|
||||
else:
|
||||
# don't use default values when interface config is the same as net
|
||||
# note here that using ifc.node.objid as key allows for only one type
|
||||
# of each model per node; TODO: use both node and interface as key
|
||||
return self.getconfig(ifc.node.objid, conftype, None)[1]
|
||||
|
||||
def setup(self):
|
||||
''' Populate self._objs with EmaneNodes; perform distributed setup;
|
||||
associate models with EmaneNodes from self.config.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for obj in self.session.objs():
|
||||
if isinstance(obj, EmaneNode):
|
||||
self.addobj(obj)
|
||||
if len(self._objs) == 0:
|
||||
return False
|
||||
if self.checkdistributed():
|
||||
# we are slave, but haven't received a platformid yet
|
||||
cfgval = self.getconfig(None, self.emane_config._name,
|
||||
self.emane_config.getdefaultvalues())[1]
|
||||
i = self.emane_config.getnames().index('platform_id_start')
|
||||
if cfgval[i] == self.emane_config.getdefaultvalues()[i]:
|
||||
return False
|
||||
self.setnodemodels()
|
||||
return True
|
||||
|
||||
def startup(self):
|
||||
''' after all the EmaneNode objects have been added, build XML files
|
||||
and start the daemons
|
||||
'''
|
||||
self.reset()
|
||||
if not self.setup():
|
||||
return
|
||||
with self._objslock:
|
||||
self.buildxml()
|
||||
self.starteventmonitor()
|
||||
if self.numnems() > 0:
|
||||
self.startdaemons()
|
||||
self.installnetifs()
|
||||
|
||||
def poststartup(self):
|
||||
''' Retransmit location events now that all NEMs are active.
|
||||
'''
|
||||
if self.doeventmonitor():
|
||||
return
|
||||
with self._objslock:
|
||||
for n in sorted(self._objs.keys()):
|
||||
e = self._objs[n]
|
||||
for netif in e.netifs():
|
||||
(x, y, z) = netif.node.position.get()
|
||||
e.setnemposition(netif, x, y, z)
|
||||
|
||||
def reset(self):
|
||||
''' remove all EmaneNode objects from the dictionary,
|
||||
reset port numbers and nem id counters
|
||||
'''
|
||||
with self._objslock:
|
||||
self._objs.clear()
|
||||
# don't clear self._ifccounts here; NEM counts are needed for buildxml
|
||||
self.platformport = self.session.getcfgitemint('emane_platform_port',
|
||||
8100)
|
||||
self.transformport = self.session.getcfgitemint('emane_transform_port',
|
||||
8200)
|
||||
|
||||
def shutdown(self):
|
||||
''' stop all EMANE daemons
|
||||
'''
|
||||
self._ifccountslock.acquire()
|
||||
self._ifccounts.clear()
|
||||
self._ifccountslock.release()
|
||||
self._objslock.acquire()
|
||||
if len(self._objs) == 0:
|
||||
self._objslock.release()
|
||||
return
|
||||
self.info("Stopping EMANE daemons.")
|
||||
self.deinstallnetifs()
|
||||
self.stopdaemons()
|
||||
self.stopeventmonitor()
|
||||
self._objslock.release()
|
||||
|
||||
def handledistributed(self, msg):
|
||||
''' Broker handler for processing CORE API messages as they are
|
||||
received. This is used to snoop the Link add messages to get NEM
|
||||
counts of NEMs that exist on other servers.
|
||||
'''
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG and \
|
||||
msg.flags & coreapi.CORE_API_ADD_FLAG:
|
||||
nn = msg.nodenumbers()
|
||||
# first node is always link layer node in Link add message
|
||||
if nn[0] in self.session.broker.nets:
|
||||
serverlist = self.session.broker.getserversbynode(nn[1])
|
||||
for server in serverlist:
|
||||
self._ifccountslock.acquire()
|
||||
if server not in self._ifccounts:
|
||||
self._ifccounts[server] = 1
|
||||
else:
|
||||
self._ifccounts[server] += 1
|
||||
self._ifccountslock.release()
|
||||
|
||||
def checkdistributed(self):
|
||||
''' Check for EMANE nodes that exist on multiple emulation servers and
|
||||
coordinate the NEM id and port number space.
|
||||
If we are the master EMANE node, return False so initialization will
|
||||
proceed as normal; otherwise slaves return True here and
|
||||
initialization is deferred.
|
||||
'''
|
||||
# check with the session if we are the "master" Emane object?
|
||||
master = False
|
||||
self._objslock.acquire()
|
||||
if len(self._objs) > 0:
|
||||
master = self.session.master
|
||||
self.info("Setup EMANE with master=%s." % master)
|
||||
self._objslock.release()
|
||||
|
||||
# we are not the master Emane object, wait for nem id and ports
|
||||
if not master:
|
||||
return True
|
||||
|
||||
cfgval = self.getconfig(None, self.emane_config._name,
|
||||
self.emane_config.getdefaultvalues())[1]
|
||||
values = list(cfgval)
|
||||
|
||||
nemcount = 0
|
||||
self._objslock.acquire()
|
||||
for n in self._objs:
|
||||
emanenode = self._objs[n]
|
||||
nemcount += emanenode.numnetif()
|
||||
nemid = int(self.emane_config.valueof("nem_id_start", values))
|
||||
nemid += nemcount
|
||||
platformid = int(self.emane_config.valueof("platform_id_start", values))
|
||||
names = list(self.emane_config.getnames())
|
||||
|
||||
# build an ordered list of servers so platform ID is deterministic
|
||||
servers = []
|
||||
for n in sorted(self._objs):
|
||||
for s in self.session.broker.getserversbynode(n):
|
||||
if s not in servers:
|
||||
servers.append(s)
|
||||
self._objslock.release()
|
||||
|
||||
for server in servers:
|
||||
if server == "localhost":
|
||||
continue
|
||||
(host, port, sock) = self.session.broker.getserver(server)
|
||||
if sock is None:
|
||||
continue
|
||||
platformid += 1
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
values[names.index("platform_id_start")] = str(platformid)
|
||||
values[names.index("nem_id_start")] = str(nemid)
|
||||
msg = EmaneGlobalModel.toconfmsg(flags=0, nodenum=None,
|
||||
typeflags=typeflags, values=values)
|
||||
sock.send(msg)
|
||||
# increment nemid for next server by number of interfaces
|
||||
self._ifccountslock.acquire()
|
||||
if server in self._ifccounts:
|
||||
nemid += self._ifccounts[server]
|
||||
self._ifccountslock.release()
|
||||
|
||||
return False
|
||||
|
||||
def buildxml(self):
|
||||
''' Build all of the XML files required to run EMANE.
|
||||
'''
|
||||
# assume self._objslock is already held here
|
||||
if self.verbose:
|
||||
self.info("Emane.buildxml()")
|
||||
self.buildplatformxml()
|
||||
self.buildnemxml()
|
||||
self.buildtransportxml()
|
||||
|
||||
def xmldoc(self, doctype):
|
||||
''' Returns an XML xml.minidom.Document with a DOCTYPE tag set to the
|
||||
provided doctype string, and an initial element having the same
|
||||
name.
|
||||
'''
|
||||
# we hack in the DOCTYPE using the parser
|
||||
docstr = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE %s SYSTEM "file:///usr/share/emane/dtd/%s.dtd">
|
||||
<%s/>""" % (doctype, doctype, doctype)
|
||||
# normally this would be: doc = Document()
|
||||
return parseString(docstr)
|
||||
|
||||
def xmlparam(self, doc, name, value):
|
||||
''' Convenience function for building a parameter tag of the format:
|
||||
<param name="name" value="value" />
|
||||
'''
|
||||
p = doc.createElement("param")
|
||||
p.setAttribute("name", name)
|
||||
p.setAttribute("value", value)
|
||||
return p
|
||||
|
||||
def xmlshimdefinition(self, doc, name):
|
||||
''' Convenience function for building a definition tag of the format:
|
||||
<shim definition="name" />
|
||||
'''
|
||||
p = doc.createElement("shim")
|
||||
p.setAttribute("definition", name)
|
||||
return p
|
||||
|
||||
def xmlwrite(self, doc, filename):
|
||||
''' Write the given XML document to the specified filename.
|
||||
'''
|
||||
#self.info("%s" % doc.toprettyxml(indent=" "))
|
||||
pathname = os.path.join(self.session.sessiondir, filename)
|
||||
f = open(pathname, "w")
|
||||
doc.writexml(writer=f, indent="", addindent=" ", newl="\n", \
|
||||
encoding="UTF-8")
|
||||
f.close()
|
||||
|
||||
def setnodemodels(self):
|
||||
''' Associate EmaneModel classes with EmaneNode nodes. The model
|
||||
configurations are stored in self.configs.
|
||||
'''
|
||||
for n in self._objs:
|
||||
self.setnodemodel(n)
|
||||
|
||||
def setnodemodel(self, n):
|
||||
emanenode = self._objs[n]
|
||||
for (t, v) in self.configs[n]:
|
||||
if t is None:
|
||||
continue
|
||||
if t == self.emane_config._name:
|
||||
continue
|
||||
# only use the first valid EmaneModel
|
||||
# convert model name to class (e.g. emane_rfpipe -> EmaneRfPipe)
|
||||
cls = self._modelclsmap[t]
|
||||
emanenode.setmodel(cls, v)
|
||||
return True
|
||||
# no model has been configured for this EmaneNode
|
||||
return False
|
||||
|
||||
def nemlookup(self, nemid):
|
||||
''' Look for the given numerical NEM ID and return the first matching
|
||||
EmaneNode and NEM interface.
|
||||
'''
|
||||
emanenode = None
|
||||
netif = None
|
||||
|
||||
for n in self._objs:
|
||||
emanenode = self._objs[n]
|
||||
netif = emanenode.getnemnetif(nemid)
|
||||
if netif is not None:
|
||||
break
|
||||
else:
|
||||
emanenode = None
|
||||
return (emanenode, netif)
|
||||
|
||||
def numnems(self):
|
||||
''' Return the number of NEMs emulated locally.
|
||||
'''
|
||||
count = 0
|
||||
for o in self._objs.values():
|
||||
count += len(o.netifs())
|
||||
return count
|
||||
|
||||
def buildplatformxml(self):
|
||||
''' Build a platform.xml file now that all nodes are configured.
|
||||
'''
|
||||
values = self.getconfig(None, "emane",
|
||||
self.emane_config.getdefaultvalues())[1]
|
||||
doc = self.xmldoc("platform")
|
||||
plat = doc.getElementsByTagName("platform").pop()
|
||||
platformid = self.emane_config.valueof("platform_id_start", values)
|
||||
plat.setAttribute("name", "Platform %s" % platformid)
|
||||
plat.setAttribute("id", platformid)
|
||||
|
||||
names = list(self.emane_config.getnames())
|
||||
platform_names = names[:len(self.emane_config._confmatrix_platform)]
|
||||
platform_names.remove('platform_id_start')
|
||||
|
||||
# append all platform options (except starting id) to doc
|
||||
map( lambda n: plat.appendChild(self.xmlparam(doc, n, \
|
||||
self.emane_config.valueof(n, values))), platform_names)
|
||||
|
||||
nemid = int(self.emane_config.valueof("nem_id_start", values))
|
||||
# assume self._objslock is already held here
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
nems = emanenode.buildplatformxmlentry(doc)
|
||||
for netif in sorted(nems, key=lambda n: n.node.objid):
|
||||
# set ID, endpoints here
|
||||
nementry = nems[netif]
|
||||
nementry.setAttribute("id", "%d" % nemid)
|
||||
# insert nem options (except nem id) to doc
|
||||
trans_addr = self.emane_config.valueof("transportendpoint", \
|
||||
values)
|
||||
nementry.insertBefore(self.xmlparam(doc, "transportendpoint", \
|
||||
"%s:%d" % (trans_addr, self.transformport)),
|
||||
nementry.firstChild)
|
||||
platform_addr = self.emane_config.valueof("platformendpoint", \
|
||||
values)
|
||||
nementry.insertBefore(self.xmlparam(doc, "platformendpoint", \
|
||||
"%s:%d" % (platform_addr, self.platformport)),
|
||||
nementry.firstChild)
|
||||
plat.appendChild(nementry)
|
||||
emanenode.setnemid(netif, nemid)
|
||||
# NOTE: MAC address set before here is incorrect, including the one
|
||||
# sent from the GUI via link message
|
||||
# MAC address determined by NEM ID: 02:02:00:00:nn:nn"
|
||||
macstr = self._hwaddr_prefix + ":00:00:"
|
||||
macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF)
|
||||
netif.sethwaddr(MacAddr.fromstring(macstr))
|
||||
# increment counters used to manage IDs, endpoint port numbers
|
||||
nemid += 1
|
||||
self.platformport += 1
|
||||
self.transformport += 1
|
||||
self.xmlwrite(doc, "platform.xml")
|
||||
|
||||
def buildnemxml(self):
|
||||
''' Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which
|
||||
are defined on a per-EmaneNode basis.
|
||||
'''
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
nems = emanenode.buildnemxmlfiles(self)
|
||||
|
||||
def buildtransportxml(self):
|
||||
''' Calls emanegentransportxml using a platform.xml file to build
|
||||
the transportdaemon*.xml.
|
||||
'''
|
||||
try:
|
||||
subprocess.check_call(["emanegentransportxml", "platform.xml"], \
|
||||
cwd=self.session.sessiondir)
|
||||
except Exception, e:
|
||||
self.info("error running emanegentransportxml: %s" % e)
|
||||
|
||||
def startdaemons(self):
|
||||
''' Start the appropriate EMANE daemons. The transport daemon will
|
||||
bind to the TAP interfaces.
|
||||
'''
|
||||
if self.verbose:
|
||||
self.info("Emane.startdaemons()")
|
||||
path = self.session.sessiondir
|
||||
loglevel = "2"
|
||||
cfgloglevel = self.session.getcfgitemint("emane_log_level")
|
||||
realtime = self.session.getcfgitembool("emane_realtime", True)
|
||||
if cfgloglevel:
|
||||
self.info("setting user-defined EMANE log level: %d" % cfgloglevel)
|
||||
loglevel = str(cfgloglevel)
|
||||
emanecmd = ["emane", "-d", "--logl", loglevel, "-f", \
|
||||
os.path.join(path, "emane.log")]
|
||||
if realtime:
|
||||
emanecmd += "-r",
|
||||
try:
|
||||
cmd = emanecmd + [os.path.join(path, "platform.xml")]
|
||||
if self.verbose:
|
||||
self.info("Emane.startdaemons() running %s" % str(cmd))
|
||||
subprocess.check_call(cmd, cwd=path)
|
||||
except Exception, e:
|
||||
errmsg = "error starting emane: %s" % e
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane",
|
||||
None, errmsg)
|
||||
self.info(errmsg)
|
||||
|
||||
# start one transport daemon per transportdaemon*.xml file
|
||||
transcmd = ["emanetransportd", "-d", "--logl", loglevel, "-f", \
|
||||
os.path.join(path, "emanetransportd.log")]
|
||||
if realtime:
|
||||
transcmd += "-r",
|
||||
files = os.listdir(path)
|
||||
for file in files:
|
||||
if file[-3:] == "xml" and file[:15] == "transportdaemon":
|
||||
cmd = transcmd + [os.path.join(path, file)]
|
||||
try:
|
||||
if self.verbose:
|
||||
self.info("Emane.startdaemons() running %s" % str(cmd))
|
||||
subprocess.check_call(cmd, cwd=path)
|
||||
except Exception, e:
|
||||
errmsg = "error starting emanetransportd: %s" % e
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane",
|
||||
None, errmsg)
|
||||
self.info(errmsg)
|
||||
|
||||
def stopdaemons(self):
|
||||
''' Kill the appropriate EMANE daemons.
|
||||
'''
|
||||
# TODO: we may want to improve this if we had the PIDs from the
|
||||
# specific EMANE daemons that we've started
|
||||
subprocess.call(["killall", "-q", "emane"])
|
||||
subprocess.call(["killall", "-q", "emanetransportd"])
|
||||
|
||||
def installnetifs(self):
|
||||
''' Install TUN/TAP virtual interfaces into their proper namespaces
|
||||
now that the EMANE daemons are running.
|
||||
'''
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
if self.verbose:
|
||||
self.info("Emane.installnetifs() for node %d" % n)
|
||||
emanenode.installnetifs()
|
||||
|
||||
def deinstallnetifs(self):
|
||||
''' Uninstall TUN/TAP virtual interfaces.
|
||||
'''
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
emanenode.deinstallnetifs()
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configuration messages for global EMANE config.
|
||||
'''
|
||||
r = self.emane_config.configure_emane(session, msg)
|
||||
|
||||
# extra logic to start slave Emane object after nemid has been
|
||||
# configured from the master
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_UPDATE and \
|
||||
self.session.master == False:
|
||||
self.startup()
|
||||
|
||||
return r
|
||||
|
||||
def doeventmonitor(self):
|
||||
''' Returns boolean whether or not EMANE events will be monitored.
|
||||
'''
|
||||
# this support must be explicitly turned on; by default, CORE will
|
||||
# generate the EMANE events when nodes are moved
|
||||
return self.session.getcfgitembool('emane_event_monitor', False)
|
||||
|
||||
def starteventmonitor(self):
|
||||
''' Start monitoring EMANE location events if configured to do so.
|
||||
'''
|
||||
if self.verbose:
|
||||
self.info("Emane.starteventmonitor()")
|
||||
if not self.doeventmonitor():
|
||||
return
|
||||
if self.service is None:
|
||||
errmsg = "Warning: EMANE events will not be generated " \
|
||||
"because the emaneeventservice\n binding was " \
|
||||
"unable to load " \
|
||||
"(install the python-emaneeventservice bindings)"
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_WARNING, "emane",
|
||||
None, errmsg)
|
||||
self.warn(errmsg)
|
||||
|
||||
return
|
||||
self.doeventloop = True
|
||||
self.eventmonthread = threading.Thread(target = self.eventmonitorloop)
|
||||
self.eventmonthread.daemon = True
|
||||
self.eventmonthread.start()
|
||||
|
||||
|
||||
def stopeventmonitor(self):
|
||||
''' Stop monitoring EMANE location events.
|
||||
'''
|
||||
self.doeventloop = False
|
||||
if self.service is not None:
|
||||
self.service.breakloop()
|
||||
# reset the service, otherwise nextEvent won't work
|
||||
del self.service
|
||||
self.service = emaneeventservice.EventService()
|
||||
if self.eventmonthread is not None:
|
||||
self.eventmonthread.join()
|
||||
self.eventmonthread = None
|
||||
|
||||
def eventmonitorloop(self):
|
||||
''' Thread target that monitors EMANE location events.
|
||||
'''
|
||||
if self.service is None:
|
||||
return
|
||||
self.info("subscribing to EMANE location events")
|
||||
#self.service.subscribe(emaneeventlocation.EVENT_ID,
|
||||
# self.handlelocationevent)
|
||||
#self.service.loop()
|
||||
#self.service.subscribe(emaneeventlocation.EVENT_ID, None)
|
||||
while self.doeventloop is True:
|
||||
(event, platform, nem, component, data) = self.service.nextEvent()
|
||||
if event == emaneeventlocation.EVENT_ID:
|
||||
self.handlelocationevent(event, platform, nem, component, data)
|
||||
|
||||
self.info("unsubscribing from EMANE location events")
|
||||
#self.service.unsubscribe(emaneeventlocation.EVENT_ID)
|
||||
|
||||
def handlelocationevent(self, event, platform, nem, component, data):
|
||||
''' Handle an EMANE location event.
|
||||
'''
|
||||
event = emaneeventlocation.EventLocation(data)
|
||||
entries = event.entries()
|
||||
for e in entries.values():
|
||||
# yaw,pitch,roll,azimuth,elevation,velocity are unhandled
|
||||
(nemid, lat, long, alt) = e[:4]
|
||||
# convert nemid to node number
|
||||
(emanenode, netif) = self.nemlookup(nemid)
|
||||
if netif is None:
|
||||
if self.verbose:
|
||||
self.info("location event for unknown NEM %s" % nemid)
|
||||
continue
|
||||
n = netif.node.objid
|
||||
# convert from lat/long/alt to x,y,z coordinates
|
||||
(x, y, z) = self.session.location.getxyz(lat, long, alt)
|
||||
x = int(x)
|
||||
y = int(y)
|
||||
z = int(z)
|
||||
if self.verbose:
|
||||
self.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)" \
|
||||
% (nemid, lat, long, alt, x, y, z))
|
||||
try:
|
||||
if (x.bit_length() > 16) or (y.bit_length() > 16) or \
|
||||
(z.bit_length() > 16) or (x < 0) or (y < 0) or (z < 0):
|
||||
warntxt = "Unable to build node location message since " \
|
||||
"received lat/long/alt exceeds coordinate " \
|
||||
"space: NEM %s (%d, %d, %d)" % (nemid, x, y, z)
|
||||
self.info(warntxt)
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"emane", None, warntxt)
|
||||
continue
|
||||
except AttributeError:
|
||||
# int.bit_length() not present on Python 2.6
|
||||
pass
|
||||
|
||||
# generate a node message for this location update
|
||||
try:
|
||||
node = self.session.obj(n)
|
||||
except KeyError:
|
||||
self.warn("location event NEM %s has no corresponding node %s" \
|
||||
% (nemid, n))
|
||||
continue
|
||||
# don't use node.setposition(x,y,z) which generates an event
|
||||
node.position.set(x,y,z)
|
||||
msg = node.tonodemsg(flags=0)
|
||||
self.session.broadcastraw(None, msg)
|
||||
self.session.sdt.updatenodegeo(node, lat, long, alt)
|
||||
|
||||
|
||||
class EmaneModel(WirelessModel):
|
||||
''' EMANE models inherit from this parent class, which takes care of
|
||||
handling configuration messages based on the _confmatrix list of
|
||||
configurable parameters. Helper functions also live here.
|
||||
'''
|
||||
_prefix = {'y': 1e-24, # yocto
|
||||
'z': 1e-21, # zepto
|
||||
'a': 1e-18, # atto
|
||||
'f': 1e-15, # femto
|
||||
'p': 1e-12, # pico
|
||||
'n': 1e-9, # nano
|
||||
'u': 1e-6, # micro
|
||||
'm': 1e-3, # mili
|
||||
'c': 1e-2, # centi
|
||||
'd': 1e-1, # deci
|
||||
'k': 1e3, # kilo
|
||||
'M': 1e6, # mega
|
||||
'G': 1e9, # giga
|
||||
'T': 1e12, # tera
|
||||
'P': 1e15, # peta
|
||||
'E': 1e18, # exa
|
||||
'Z': 1e21, # zetta
|
||||
'Y': 1e24, # yotta
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def configure_emane(cls, session, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Pass the Emane object as the manager object.
|
||||
'''
|
||||
return cls.configure(session.emane, msg)
|
||||
|
||||
@classmethod
|
||||
def emane074_fixup(cls, value, div=1.0):
|
||||
''' Helper for converting 0.8.1 and newer values to EMANE 0.7.4
|
||||
compatible values.
|
||||
NOTE: This should be removed when support for 0.7.4 has been
|
||||
deprecated.
|
||||
'''
|
||||
if div == 0:
|
||||
return "0"
|
||||
if type(value) is not str:
|
||||
return str(value / div)
|
||||
if value.endswith(tuple(cls._prefix.keys())):
|
||||
suffix = value[-1]
|
||||
value = float(value[:-1]) * cls._prefix[suffix]
|
||||
return str(int(value / div))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def buildplatformxmlnementry(self, doc, n, ifc):
|
||||
''' Build the NEM definition that goes into the platform.xml file.
|
||||
This returns an XML element that will be added to the <platform/> element.
|
||||
This default method supports per-interface config
|
||||
(e.g. <nem definition="n2_0_63emane_rfpipe.xml" id="1"> or per-EmaneNode
|
||||
config (e.g. <nem definition="n1emane_rfpipe.xml" id="1">.
|
||||
This can be overriden by a model for NEM flexibility; n is the EmaneNode.
|
||||
'''
|
||||
nem = doc.createElement("nem")
|
||||
nem.setAttribute("name", ifc.localname)
|
||||
# if this netif contains a non-standard (per-interface) config,
|
||||
# then we need to use a more specific xml file here
|
||||
nem.setAttribute("definition", self.nemxmlname(ifc))
|
||||
return nem
|
||||
|
||||
def buildplatformxmltransportentry(self, doc, n, ifc):
|
||||
''' Build the transport definition that goes into the platform.xml file.
|
||||
This returns an XML element that will added to the nem definition.
|
||||
This default method supports raw and virtual transport types, but may be
|
||||
overriden by a model to support the e.g. pluggable virtual transport.
|
||||
n is the EmaneNode.
|
||||
'''
|
||||
type = ifc.transport_type
|
||||
if not type:
|
||||
e.info("warning: %s interface type unsupported!" % ifc.name)
|
||||
type = "raw"
|
||||
trans = doc.createElement("transport")
|
||||
trans.setAttribute("definition", n.transportxmlname(type))
|
||||
trans.setAttribute("group", "1")
|
||||
param = doc.createElement("param")
|
||||
param.setAttribute("name", "device")
|
||||
if type == "raw":
|
||||
# raw RJ45 name e.g. 'eth0'
|
||||
param.setAttribute("value", ifc.name)
|
||||
else:
|
||||
# virtual TAP name e.g. 'n3.0.17'
|
||||
param.setAttribute("value", ifc.localname)
|
||||
trans.appendChild(param)
|
||||
return trans
|
||||
|
||||
def basename(self, ifc = None):
|
||||
''' Return the string that other names are based on.
|
||||
If a specific config is stored for a node's interface, a unique
|
||||
filename is needed; otherwise the name of the EmaneNode is used.
|
||||
'''
|
||||
emane = self.session.emane
|
||||
name = "n%s" % self.objid
|
||||
if ifc is not None:
|
||||
nodenum = ifc.node.objid
|
||||
if emane.getconfig(nodenum, self._name, None)[1] is not None:
|
||||
name = ifc.localname.replace('.','_')
|
||||
return "%s%s" % (name, self._name)
|
||||
|
||||
def nemxmlname(self, ifc = None):
|
||||
''' Return the string name for the NEM XML file, e.g. 'n3rfpipenem.xml'
|
||||
'''
|
||||
return "%snem.xml" % self.basename(ifc)
|
||||
|
||||
def shimxmlname(self, ifc = None):
|
||||
''' Return the string name for the SHIM XML file, e.g. 'commeffectshim.xml'
|
||||
'''
|
||||
return "%sshim.xml" % self.basename(ifc)
|
||||
|
||||
def macxmlname(self, ifc = None):
|
||||
''' Return the string name for the MAC XML file, e.g. 'n3rfpipemac.xml'
|
||||
'''
|
||||
return "%smac.xml" % self.basename(ifc)
|
||||
|
||||
def phyxmlname(self, ifc = None):
|
||||
''' Return the string name for the PHY XML file, e.g. 'n3rfpipephy.xml'
|
||||
'''
|
||||
return "%sphy.xml" % self.basename(ifc)
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
''' invoked from MobilityModel when nodes are moved; this causes
|
||||
EMANE location events to be generated for the nodes in the moved
|
||||
list, making EmaneModels compatible with Ns2ScriptedMobility
|
||||
'''
|
||||
try:
|
||||
wlan = self.session.obj(self.objid)
|
||||
except KeyError:
|
||||
return
|
||||
wlan.setnempositions(moved_netifs)
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Invoked when a Link Message is received. Default is unimplemented.
|
||||
'''
|
||||
warntxt = "EMANE model %s does not support link " % self._name
|
||||
warntxt += "configuration, dropping Link Message"
|
||||
self.session.warn(warntxt)
|
||||
|
||||
|
||||
class EmaneGlobalModel(EmaneModel):
|
||||
''' Global EMANE configuration options.
|
||||
'''
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
_name = "emane"
|
||||
_confmatrix_platform = [
|
||||
("otamanagerchannelenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'on,off', 'enable OTA Manager channel'),
|
||||
("otamanagergroup", coreapi.CONF_DATA_TYPE_STRING, '224.1.2.8:45702',
|
||||
'', 'OTA Manager group'),
|
||||
("otamanagerdevice", coreapi.CONF_DATA_TYPE_STRING, 'lo',
|
||||
'', 'OTA Manager device'),
|
||||
("eventservicegroup", coreapi.CONF_DATA_TYPE_STRING, '224.1.2.8:45703',
|
||||
'', 'Event Service group'),
|
||||
("eventservicedevice", coreapi.CONF_DATA_TYPE_STRING, 'lo',
|
||||
'', 'Event Service device'),
|
||||
("platform_id_start", coreapi.CONF_DATA_TYPE_INT32, '1',
|
||||
'', 'starting Platform ID'),
|
||||
("debugportenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'on,off', 'enable debug port'),
|
||||
("debugport", coreapi.CONF_DATA_TYPE_UINT16, '47000',
|
||||
'', 'debug port number'),
|
||||
]
|
||||
_confmatrix_nem = [
|
||||
("transportendpoint", coreapi.CONF_DATA_TYPE_STRING, 'localhost',
|
||||
'', 'Transport endpoint address (port is automatic)'),
|
||||
("platformendpoint", coreapi.CONF_DATA_TYPE_STRING, 'localhost',
|
||||
'', 'Platform endpoint address (port is automatic)'),
|
||||
("nem_id_start", coreapi.CONF_DATA_TYPE_INT32, '1',
|
||||
'', 'starting NEM ID'),
|
||||
]
|
||||
_confmatrix = _confmatrix_platform + _confmatrix_nem
|
||||
_confgroups = "Platform Attributes:1-%d|NEM Parameters:%d-%d" % \
|
||||
(len(_confmatrix_platform), len(_confmatrix_platform) + 1,
|
||||
len(_confmatrix))
|
||||
|
119
daemon/core/emane/ieee80211abg.py
Normal file
119
daemon/core/emane/ieee80211abg.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
|
||||
class EmaneIeee80211abgModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_ieee80211abg"
|
||||
_80211rates = '1 1 Mbps,2 2 Mbps,3 5.5 Mbps,4 11 Mbps,5 6 Mbps,' + \
|
||||
'6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps,' + \
|
||||
'12 54 Mbps'
|
||||
# MAC parameters
|
||||
_confmatrix_mac = [
|
||||
("mode", coreapi.CONF_DATA_TYPE_UINT8, '0',
|
||||
'0 802.11b (DSSS only),1 802.11b (DSSS only),' + \
|
||||
'2 802.11a or g (OFDM),3 802.11b/g (DSSS and OFDM)', 'mode'),
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable promiscuous mode'),
|
||||
("distance", coreapi.CONF_DATA_TYPE_UINT32, '1000',
|
||||
'', 'max distance (m)'),
|
||||
("unicastrate", coreapi.CONF_DATA_TYPE_UINT8, '4', _80211rates,
|
||||
'unicast rate (Mbps)'),
|
||||
("multicastrate", coreapi.CONF_DATA_TYPE_UINT8, '1', _80211rates,
|
||||
'multicast rate (Mbps)'),
|
||||
("rtsthreshold", coreapi.CONF_DATA_TYPE_UINT16, '0',
|
||||
'', 'RTS threshold (bytes)'),
|
||||
("wmmenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'WiFi Multimedia (WMM)'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
|
||||
'/usr/share/emane/models/ieee80211abg/xml/ieee80211pcr.xml',
|
||||
'', 'SINR/PCR curve file'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
("queuesize", coreapi.CONF_DATA_TYPE_STRING, '0:255 1:255 2:255 3:255',
|
||||
'', 'queue size (0-4:size)'),
|
||||
("cwmin", coreapi.CONF_DATA_TYPE_STRING, '0:32 1:32 2:16 3:8',
|
||||
'', 'min contention window (0-4:minw)'),
|
||||
("cwmax", coreapi.CONF_DATA_TYPE_STRING, '0:1024 1:1024 2:64 3:16',
|
||||
'', 'max contention window (0-4:maxw)'),
|
||||
("aifs", coreapi.CONF_DATA_TYPE_STRING, '0:2 1:2 2:2 3:1',
|
||||
'', 'arbitration inter frame space (0-4:aifs)'),
|
||||
("txop", coreapi.CONF_DATA_TYPE_STRING, '0:0 1:0 2:0 3:0',
|
||||
'', 'txop (0-4:usec)'),
|
||||
("retrylimit", coreapi.CONF_DATA_TYPE_STRING, '0:3 1:3 2:3 3:3',
|
||||
'', 'retry limit (0-4:numretries)'),
|
||||
]
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
# value groupings
|
||||
_confgroups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% (len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml,
|
||||
nXXemane_ieee80211abgphy.xml are used.
|
||||
'''
|
||||
# use the network-wide config values or interface(NEM)-specific values?
|
||||
if ifc is None:
|
||||
values = e.getconfig(self.objid, self._name,
|
||||
self.getdefaultvalues())[1]
|
||||
else:
|
||||
nodenum = ifc.node.objid
|
||||
values = e.getconfig(nodenum, self._name, None)[1]
|
||||
if values is None:
|
||||
# do not build specific files for this NEM when config is same
|
||||
# as the network
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "ieee80211abg NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "ieee80211abg MAC")
|
||||
mac.setAttribute("library", "ieee80211abgmaclayer")
|
||||
|
||||
names = self.getnames()
|
||||
macnames = names[:len(self._confmatrix_mac)]
|
||||
phynames = names[len(self._confmatrix_mac):]
|
||||
|
||||
# append all MAC options to macdoc
|
||||
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
281
daemon/core/emane/nodes.py
Normal file
281
daemon/core/emane/nodes.py
Normal file
|
@ -0,0 +1,281 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
nodes.py: definition of an EmaneNode class for implementing configuration
|
||||
control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
||||
share the same MAC+PHY model.
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNet
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventlocation
|
||||
except Exception, e:
|
||||
''' Don't require all CORE users to have EMANE libeventservice and its
|
||||
Python bindings installed.
|
||||
'''
|
||||
pass
|
||||
|
||||
class EmaneNet(PyCoreNet):
|
||||
''' EMANE network base class.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_EMANE
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
type = "wlan" # icon used
|
||||
|
||||
class EmaneNode(EmaneNet):
|
||||
''' EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
'''
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
PyCoreNet.__init__(self, session, objid, name, verbose, start)
|
||||
self.verbose = verbose
|
||||
self.conf = ""
|
||||
self.up = False
|
||||
self.nemidmap = {}
|
||||
self.model = None
|
||||
self.mobility = None
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' The CommEffect model supports link configuration.
|
||||
'''
|
||||
if not self.model:
|
||||
return
|
||||
return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
|
||||
def config(self, conf):
|
||||
#print "emane", self.name, "got config:", conf
|
||||
self.conf = conf
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' set the EmaneModel associated with this node
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
# EmaneModel really uses values from ConfigurableManager
|
||||
# when buildnemxml() is called, not during init()
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose)
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
def setnemid(self, netif, nemid):
|
||||
''' Record an interface to numerical ID mapping. The Emane controller
|
||||
object manages and assigns these IDs for all NEMs.
|
||||
'''
|
||||
self.nemidmap[netif] = nemid
|
||||
|
||||
def getnemid(self, netif):
|
||||
''' Given an interface, return its numerical ID.
|
||||
'''
|
||||
if netif not in self.nemidmap:
|
||||
return None
|
||||
else:
|
||||
return self.nemidmap[netif]
|
||||
|
||||
def getnemnetif(self, nemid):
|
||||
''' Given a numerical NEM ID, return its interface. This returns the
|
||||
first interface that matches the given NEM ID.
|
||||
'''
|
||||
for netif in self.nemidmap:
|
||||
if self.nemidmap[netif] == nemid:
|
||||
return netif
|
||||
return None
|
||||
|
||||
def netifs(self, sort=True):
|
||||
''' Retrieve list of linked interfaces sorted by node number.
|
||||
'''
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
|
||||
|
||||
def buildplatformxmlentry(self, doc):
|
||||
''' Return a dictionary of XML elements describing the NEMs
|
||||
connected to this EmaneNode for inclusion in the platform.xml file.
|
||||
'''
|
||||
ret = {}
|
||||
if self.model is None:
|
||||
self.info("warning: EmaneNode %s has no associated model" % \
|
||||
self.name)
|
||||
return ret
|
||||
for netif in self.netifs():
|
||||
# <nem name="NODE-001" definition="rfpipenem.xml">
|
||||
nementry = self.model.buildplatformxmlnementry(doc, self, netif)
|
||||
# <transport definition="transvirtual.xml" group="1">
|
||||
# <param name="device" value="n1.0.158" />
|
||||
# </transport>
|
||||
trans = self.model.buildplatformxmltransportentry(doc, self, netif)
|
||||
nementry.appendChild(trans)
|
||||
ret[netif] = nementry
|
||||
|
||||
return ret
|
||||
|
||||
def buildnemxmlfiles(self, emane):
|
||||
''' Let the configured model build the necessary nem, mac, and phy
|
||||
XMLs.
|
||||
'''
|
||||
if self.model is None:
|
||||
return
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
self.model.buildnemxmlfiles(emane, ifc=None)
|
||||
# build XML for specific interface (NEM) configs
|
||||
need_virtual = False
|
||||
need_raw = False
|
||||
vtype = "virtual"
|
||||
rtype = "raw"
|
||||
for netif in self.netifs():
|
||||
self.model.buildnemxmlfiles(emane, netif)
|
||||
if "virtual" in netif.transport_type:
|
||||
need_virtual = True
|
||||
vtype = netif.transport_type
|
||||
else:
|
||||
need_raw = True
|
||||
rtype = netif.transport_type
|
||||
# build transport XML files depending on type of interfaces involved
|
||||
if need_virtual:
|
||||
self.buildtransportxml(emane, vtype)
|
||||
if need_raw:
|
||||
self.buildtransportxml(emane, rtype)
|
||||
|
||||
def buildtransportxml(self, emane, type):
|
||||
''' Write a transport XML file for the Virtual or Raw Transport.
|
||||
'''
|
||||
transdoc = emane.xmldoc("transport")
|
||||
trans = transdoc.getElementsByTagName("transport").pop()
|
||||
trans.setAttribute("name", "%s Transport" % type.capitalize())
|
||||
trans.setAttribute("library", "trans%s" % type.lower())
|
||||
trans.appendChild(emane.xmlparam(transdoc, "bitrate", "0"))
|
||||
if "virtual" in type.lower():
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath",
|
||||
"/dev/net/tun"))
|
||||
emane.xmlwrite(transdoc, self.transportxmlname(type.lower()))
|
||||
|
||||
def transportxmlname(self, type):
|
||||
''' Return the string name for the Transport XML file,
|
||||
e.g. 'n3transvirtual.xml'
|
||||
'''
|
||||
return "n%strans%s.xml" % (self.objid, type)
|
||||
|
||||
|
||||
def installnetifs(self):
|
||||
''' Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
to bind to the TAPs.
|
||||
'''
|
||||
if not self.session.emane.doeventmonitor() and \
|
||||
self.session.emane.service is None:
|
||||
warntxt = "unable to publish EMANE events because the eventservice "
|
||||
warntxt += "Python bindings failed to load"
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.name,
|
||||
self.objid, warntxt)
|
||||
|
||||
for netif in self.netifs():
|
||||
if "virtual" in netif.transport_type.lower():
|
||||
netif.install()
|
||||
# if we are listening for EMANE events, don't generate them
|
||||
if self.session.emane.doeventmonitor():
|
||||
netif.poshook = None
|
||||
continue
|
||||
# at this point we register location handlers for generating
|
||||
# EMANE location events
|
||||
netif.poshook = self.setnemposition
|
||||
(x,y,z) = netif.node.position.get()
|
||||
self.setnemposition(netif, x, y, z)
|
||||
|
||||
def deinstallnetifs(self):
|
||||
''' Uninstall TAP devices. This invokes their shutdown method for
|
||||
any required cleanup; the device may be actually removed when
|
||||
emanetransportd terminates.
|
||||
'''
|
||||
for netif in self.netifs():
|
||||
if "virtual" in netif.transport_type.lower():
|
||||
netif.shutdown()
|
||||
netif.poshook = None
|
||||
|
||||
def setnemposition(self, netif, x, y, z):
|
||||
''' Publish a NEM location change event using the EMANE event service.
|
||||
'''
|
||||
if self.session.emane.service is None:
|
||||
if self.verbose:
|
||||
self.info("position service not available")
|
||||
return
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
self.info("nemid for %s is unknown" % ifname)
|
||||
return
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
if self.verbose:
|
||||
self.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(ifname, nemid, x, y, z, lat, long, alt))
|
||||
event = emaneeventlocation.EventLocation(1)
|
||||
# altitude must be an integer or warning is printed
|
||||
# unused: yaw, pitch, roll, azimuth, elevation, velocity
|
||||
alt = int(round(alt))
|
||||
event.set(0, nemid, lat, long, alt)
|
||||
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
def setnempositions(self, moved_netifs):
|
||||
''' Several NEMs have moved, from e.g. a WaypointMobilityModel
|
||||
calculation. Generate an EMANE Location Event having several
|
||||
entries for each netif that has moved.
|
||||
'''
|
||||
if len(moved_netifs) == 0:
|
||||
return
|
||||
if self.session.emane.service is None:
|
||||
if self.verbose:
|
||||
self.info("position service not available")
|
||||
return
|
||||
|
||||
event = emaneeventlocation.EventLocation(len(moved_netifs))
|
||||
i = 0
|
||||
for netif in moved_netifs:
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
self.info("nemid for %s is unknown" % ifname)
|
||||
continue
|
||||
(x, y, z) = netif.node.getposition()
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
if self.verbose:
|
||||
self.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(i, ifname, nemid, x, y, z, lat, long, alt))
|
||||
# altitude must be an integer or warning is printed
|
||||
alt = int(round(alt))
|
||||
event.set(i, nemid, lat, long, alt)
|
||||
i += 1
|
||||
|
||||
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
|
106
daemon/core/emane/rfpipe.py
Normal file
106
daemon/core/emane/rfpipe.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
# author: Harry Bullen <hbullen@i-a-i.com>
|
||||
#
|
||||
'''
|
||||
rfpipe.py: EMANE RF-PIPE model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
|
||||
class EmaneRfPipeModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_rfpipe"
|
||||
|
||||
# configuration parameters are
|
||||
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
# MAC parameters
|
||||
_confmatrix_mac = [
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'True,False', 'enable promiscuous mode'),
|
||||
("datarate", coreapi.CONF_DATA_TYPE_UINT32, '1M',
|
||||
'', 'data rate (bps)'),
|
||||
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'transmission jitter (usec)'),
|
||||
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'transmission delay (usec)'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
("enabletighttiming", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable tight timing for pkt delay'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
|
||||
'/usr/share/emane/models/rfpipe/xml/rfpipepcr.xml',
|
||||
'', 'SINR/PCR curve file'),
|
||||
("transmissioncontrolmap", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'tx control map (nem:rate:freq:tx_dBm)'),
|
||||
]
|
||||
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
|
||||
# value groupings
|
||||
_confgroups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml,
|
||||
nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "RF-PIPE NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
names = list(self.getnames())
|
||||
macnames = names[:len(self._confmatrix_mac)]
|
||||
phynames = names[len(self._confmatrix_mac):]
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "RF-PIPE MAC")
|
||||
mac.setAttribute("library", "rfpipemaclayer")
|
||||
if self.valueof("transmissioncontrolmap", values) is "":
|
||||
macnames.remove("transmissioncontrolmap")
|
||||
# EMANE 0.7.4 support
|
||||
if e.emane074:
|
||||
# convert datarate from bps to kbps
|
||||
i = names.index('datarate')
|
||||
values = list(values)
|
||||
values[i] = self.emane074_fixup(values[i], 1000)
|
||||
# append MAC options to macdoc
|
||||
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
113
daemon/core/emane/universal.py
Normal file
113
daemon/core/emane/universal.py
Normal file
|
@ -0,0 +1,113 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
universal.py: EMANE Universal PHY model for CORE. Enumerates configuration items
|
||||
used for the Universal PHY.
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
class EmaneUniversalModel(EmaneModel):
|
||||
''' This Univeral PHY model is meant to be imported by other models,
|
||||
not instantiated.
|
||||
'''
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
raise SyntaxError
|
||||
|
||||
_name = "emane_universal"
|
||||
_xmlname = "universalphy"
|
||||
_xmllibrary = "universalphylayer"
|
||||
|
||||
# universal PHY parameters
|
||||
_confmatrix = [
|
||||
("antennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna gain (dBi)'),
|
||||
("antennaazimuth", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna azimuth (deg)'),
|
||||
("antennaelevation", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna elevation (deg)'),
|
||||
("antennaprofileid", coreapi.CONF_DATA_TYPE_STRING, '1',
|
||||
'','antenna profile ID'),
|
||||
("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'','antenna profile manifest URI'),
|
||||
("antennaprofileenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off','antenna profile mode'),
|
||||
("bandwidth", coreapi.CONF_DATA_TYPE_UINT64, '1M',
|
||||
'', 'rf bandwidth (hz)'),
|
||||
("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','default connectivity'),
|
||||
("frequency", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
|
||||
'','frequency (Hz)'),
|
||||
("frequencyofinterest", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
|
||||
'','frequency of interest (Hz)'),
|
||||
("frequencyofinterestfilterenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','frequency of interest filter enable'),
|
||||
("noiseprocessingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off','enable noise processing'),
|
||||
("pathlossmode", coreapi.CONF_DATA_TYPE_STRING, '2ray',
|
||||
'pathloss,2ray,freespace','path loss mode'),
|
||||
("subid", coreapi.CONF_DATA_TYPE_UINT16, '1',
|
||||
'','subid'),
|
||||
("systemnoisefigure", coreapi.CONF_DATA_TYPE_FLOAT, '4.0',
|
||||
'','system noise figure (dB)'),
|
||||
("txpower", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','transmit power (dBm)'),
|
||||
]
|
||||
|
||||
# old parameters
|
||||
_confmatrix_ver074 = [
|
||||
("antennaazimuthbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '360.0',
|
||||
'','azimith beam width (deg)'),
|
||||
("antennaelevationbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '180.0',
|
||||
'','elevation beam width (deg)'),
|
||||
("antennatype", coreapi.CONF_DATA_TYPE_STRING, 'omnidirectional',
|
||||
'omnidirectional,unidirectional','antenna type'),
|
||||
]
|
||||
|
||||
# parameters that require unit conversion for 0.7.4
|
||||
_update_ver074 = ("bandwidth", "frequency", "frequencyofinterest")
|
||||
# parameters that should be removed for 0.7.4
|
||||
_remove_ver074 = ("antennaprofileenable", "antennaprofileid",
|
||||
"antennaprofilemanifesturi",
|
||||
"frequencyofinterestfilterenable")
|
||||
|
||||
|
||||
@classmethod
|
||||
def getphydoc(cls, e, mac, values, phynames):
|
||||
phydoc = e.xmldoc("phy")
|
||||
phy = phydoc.getElementsByTagName("phy").pop()
|
||||
phy.setAttribute("name", cls._xmlname)
|
||||
phy.setAttribute("library", cls._xmllibrary)
|
||||
# EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated
|
||||
if e.emane074:
|
||||
names = mac.getnames()
|
||||
values = list(values)
|
||||
phynames = list(phynames)
|
||||
# update units for some parameters
|
||||
for p in cls._update_ver074:
|
||||
i = names.index(p)
|
||||
# these all happen to be KHz, so 1000 is used
|
||||
values[i] = cls.emane074_fixup(values[i], 1000)
|
||||
# remove new incompatible options
|
||||
for p in cls._remove_ver074:
|
||||
phynames.remove(p)
|
||||
# insert old options with their default values
|
||||
for old in cls._confmatrix_ver074:
|
||||
phy.appendChild(e.xmlparam(phydoc, old[0], old[2]))
|
||||
|
||||
# append all PHY options to phydoc
|
||||
map( lambda n: phy.appendChild(e.xmlparam(phydoc, n, \
|
||||
mac.valueof(n, values))), phynames)
|
||||
return phydoc
|
||||
|
||||
|
246
daemon/core/location.py
Normal file
246
daemon/core/location.py
Normal file
|
@ -0,0 +1,246 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
location.py: definition of CoreLocation class that is a member of the
|
||||
Session object. Provides conversions between Cartesian and geographic coordinate
|
||||
systems. Depends on utm contributed module, from
|
||||
https://pypi.python.org/pypi/utm (version 0.3.0).
|
||||
'''
|
||||
|
||||
from core.conf import ConfigurableManager
|
||||
from core.api import coreapi
|
||||
from core.misc import utm
|
||||
|
||||
class CoreLocation(ConfigurableManager):
|
||||
''' Member of session class for handling global location data. This keeps
|
||||
track of a latitude/longitude/altitude reference point and scale in
|
||||
order to convert between X,Y and geo coordinates.
|
||||
|
||||
TODO: this could be updated to use more generic
|
||||
Configurable/ConfigurableManager code like other Session objects
|
||||
'''
|
||||
_name = "location"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.reset()
|
||||
self.zonemap = {}
|
||||
for n, l in utm.ZONE_LETTERS:
|
||||
self.zonemap[l] = n
|
||||
|
||||
def reset(self):
|
||||
''' Reset to initial state.
|
||||
'''
|
||||
# (x, y, z) coordinates of the point given by self.refgeo
|
||||
self.refxyz = (0.0, 0.0, 0.0)
|
||||
# decimal latitude, longitude, and altitude at the point (x, y, z)
|
||||
self.setrefgeo(0.0, 0.0, 0.0)
|
||||
# 100 pixels equals this many meters
|
||||
self.refscale = 1.0
|
||||
# cached distance to refpt in other zones
|
||||
self.zoneshifts = {}
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message for setting the reference point
|
||||
and scale.
|
||||
'''
|
||||
if values is None:
|
||||
self.session.info("location data missing")
|
||||
return None
|
||||
values = values.split('|')
|
||||
# Cartesian coordinate reference point
|
||||
refx,refy = map(lambda x: float(x), values[0:2])
|
||||
refz = 0.0
|
||||
self.refxyz = (refx, refy, refz)
|
||||
# Geographic reference point
|
||||
lat,long,alt = map(lambda x: float(x), values[2:5])
|
||||
self.setrefgeo(lat, long, alt)
|
||||
self.refscale = float(values[5])
|
||||
self.session.info("location configured: (%.2f,%.2f,%.2f) = "
|
||||
"(%.5f,%.5f,%.5f) scale=%.2f" %
|
||||
(self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0],
|
||||
self.refgeo[1], self.refgeo[2], self.refscale))
|
||||
self.session.info("location configured: UTM(%.5f,%.5f,%.5f)" %
|
||||
(self.refutm[1], self.refutm[2], self.refutm[3]))
|
||||
|
||||
def px2m(self, val):
|
||||
''' Convert the specified value in pixels to meters using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
'''
|
||||
return (val / 100.0) * self.refscale
|
||||
|
||||
def m2px(self, val):
|
||||
''' Convert the specified value in meters to pixels using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
'''
|
||||
if self.refscale == 0.0:
|
||||
return 0.0
|
||||
return 100.0 * (val / self.refscale)
|
||||
|
||||
def setrefgeo(self, lat, lon, alt):
|
||||
''' Record the geographical reference point decimal (lat, lon, alt)
|
||||
and convert and store its UTM equivalent for later use.
|
||||
'''
|
||||
self.refgeo = (lat, lon, alt)
|
||||
# easting, northing, zone
|
||||
(e, n, zonen, zonel) = utm.from_latlon(lat, lon)
|
||||
self.refutm = ( (zonen, zonel), e, n, alt)
|
||||
|
||||
def getgeo(self, x, y, z):
|
||||
''' Given (x, y, z) Cartesian coordinates, convert them to latitude,
|
||||
longitude, and altitude based on the configured reference point
|
||||
and scale.
|
||||
'''
|
||||
# shift (x,y,z) over to reference point (x,y,z)
|
||||
x = x - self.refxyz[0]
|
||||
y = -(y - self.refxyz[1])
|
||||
if z is None:
|
||||
z = self.refxyz[2]
|
||||
else:
|
||||
z = z - self.refxyz[2]
|
||||
# use UTM coordinates since unit is meters
|
||||
zone = self.refutm[0]
|
||||
if zone == "":
|
||||
raise ValueError, "reference point not configured"
|
||||
e = self.refutm[1] + self.px2m(x)
|
||||
n = self.refutm[2] + self.px2m(y)
|
||||
alt = self.refutm[3] + self.px2m(z)
|
||||
(e, n, zone) = self.getutmzoneshift(e, n)
|
||||
try:
|
||||
lat, lon = utm.to_latlon(e, n, zone[0], zone[1])
|
||||
except utm.OutOfRangeError:
|
||||
self.info("UTM out of range error for e=%s n=%s zone=%s" \
|
||||
"xyz=(%s,%s,%s)" % (e, n, zone, x, y, z))
|
||||
(lat, lon) = self.refgeo[:2]
|
||||
#self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \
|
||||
# "%.3f,%.3f,%.3f" % (x, y, z, e, n, zone, lat, lon, alt))
|
||||
return (lat, lon, alt)
|
||||
|
||||
def getxyz(self, lat, lon, alt):
|
||||
''' Given latitude, longitude, and altitude location data, convert them
|
||||
to (x, y, z) Cartesian coordinates based on the configured
|
||||
reference point and scale. Lat/lon is converted to UTM meter
|
||||
coordinates, UTM zones are accounted for, and the scale turns
|
||||
meters to pixels.
|
||||
'''
|
||||
# convert lat/lon to UTM coordinates in meters
|
||||
(e, n, zonen, zonel) = utm.from_latlon(lat, lon)
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
xshift = self.geteastingshift(zonen, zonel)
|
||||
if xshift is None:
|
||||
xm = e - self.refutm[1]
|
||||
else:
|
||||
xm = e + xshift
|
||||
yshift = self.getnorthingshift(zonen, zonel)
|
||||
if yshift is None:
|
||||
ym = n - self.refutm[2]
|
||||
else:
|
||||
ym = n + yshift
|
||||
zm = alt - ralt
|
||||
|
||||
# shift (x,y,z) over to reference point (x,y,z)
|
||||
x = self.m2px(xm) + self.refxyz[0]
|
||||
y = -(self.m2px(ym) + self.refxyz[1])
|
||||
z = self.m2px(zm) + self.refxyz[2]
|
||||
return (x, y, z)
|
||||
|
||||
def geteastingshift(self, zonen, zonel):
|
||||
''' If the lat, lon coordinates being converted are located in a
|
||||
different UTM zone than the canvas reference point, the UTM meters
|
||||
may need to be shifted.
|
||||
This picks a reference point in the same longitudinal band
|
||||
(UTM zone number) as the provided zone, to calculate the shift in
|
||||
meters for the x coordinate.
|
||||
'''
|
||||
rzonen = int(self.refutm[0][0])
|
||||
if zonen == rzonen:
|
||||
return None # same zone number, no x shift required
|
||||
z = (zonen, zonel)
|
||||
if z in self.zoneshifts and self.zoneshifts[z][0] is not None:
|
||||
return self.zoneshifts[z][0] # x shift already calculated, cached
|
||||
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
lon2 = rlon + 6*(zonen - rzonen) # ea. zone is 6deg band
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, lon2) # ignore northing
|
||||
# NOTE: great circle distance used here, not reference ellipsoid!
|
||||
xshift = utm.haversine(rlon, rlat, lon2, rlat) - e2
|
||||
# cache the return value
|
||||
yshift = None
|
||||
if z in self.zoneshifts:
|
||||
yshift = self.zoneshifts[z][1]
|
||||
self.zoneshifts[z] = (xshift, yshift)
|
||||
return xshift
|
||||
|
||||
def getnorthingshift(self, zonen, zonel):
|
||||
''' If the lat, lon coordinates being converted are located in a
|
||||
different UTM zone than the canvas reference point, the UTM meters
|
||||
may need to be shifted.
|
||||
This picks a reference point in the same latitude band (UTM zone letter)
|
||||
as the provided zone, to calculate the shift in meters for the
|
||||
y coordinate.
|
||||
'''
|
||||
rzonel = self.refutm[0][1]
|
||||
if zonel == rzonel:
|
||||
return None # same zone letter, no y shift required
|
||||
z = (zonen, zonel)
|
||||
if z in self.zoneshifts and self.zoneshifts[z][1] is not None:
|
||||
return self.zoneshifts[z][1] # y shift already calculated, cached
|
||||
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
# zonemap is used to calculate degrees difference between zone letters
|
||||
latshift = self.zonemap[zonel] - self.zonemap[rzonel]
|
||||
lat2 = rlat + latshift # ea. latitude band is 8deg high
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(lat2, rlon)
|
||||
# NOTE: great circle distance used here, not reference ellipsoid
|
||||
yshift = -(utm.haversine(rlon, rlat, rlon, lat2) + n2)
|
||||
# cache the return value
|
||||
xshift = None
|
||||
if z in self.zoneshifts:
|
||||
xshift = self.zoneshifts[z][0]
|
||||
self.zoneshifts[z] = (xshift, yshift)
|
||||
return yshift
|
||||
|
||||
def getutmzoneshift(self, e, n):
|
||||
''' Given UTM easting and northing values, check if they fall outside
|
||||
the reference point's zone boundary. Return the UTM coordinates in a
|
||||
different zone and the new zone if they do. Zone lettering is only
|
||||
changed when the reference point is in the opposite hemisphere.
|
||||
'''
|
||||
zone = self.refutm[0]
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
if e > 834000 or e < 166000:
|
||||
num_zones = (int(e) - 166000) / (utm.R/10)
|
||||
# estimate number of zones to shift, E (positive) or W (negative)
|
||||
rlon2 = self.refgeo[1] + (num_zones * 6)
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2)
|
||||
xshift = utm.haversine(rlon, rlat, rlon2, rlat)
|
||||
# after >3 zones away from refpt, the above estimate won't work
|
||||
# (the above estimate could be improved)
|
||||
if not 100000 <= (e - xshift) < 1000000:
|
||||
# move one more zone away
|
||||
num_zones = (abs(num_zones)+1) * (abs(num_zones)/num_zones)
|
||||
rlon2 = self.refgeo[1] + (num_zones * 6)
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2)
|
||||
xshift = utm.haversine(rlon, rlat, rlon2, rlat)
|
||||
e = e - xshift
|
||||
zone = (zonen2, zonel2)
|
||||
if n < 0:
|
||||
# refpt in northern hemisphere and we crossed south of equator
|
||||
n += 10000000
|
||||
zone = (zone[0], 'M')
|
||||
elif n > 10000000:
|
||||
# refpt in southern hemisphere and we crossed north of equator
|
||||
n -= 10000000
|
||||
zone = (zone[0], 'N')
|
||||
return (e, n, zone)
|
||||
|
||||
|
||||
|
216
daemon/core/misc/LatLongUTMconversion.py
Executable file
216
daemon/core/misc/LatLongUTMconversion.py
Executable file
|
@ -0,0 +1,216 @@
|
|||
#!/usr/bin/env python
|
||||
# this file is from http://pygps.org/
|
||||
|
||||
# Lat Long - UTM, UTM - Lat Long conversions
|
||||
|
||||
from math import pi, sin, cos, tan, sqrt
|
||||
|
||||
#LatLong- UTM conversion..h
|
||||
#definitions for lat/long to UTM and UTM to lat/lng conversions
|
||||
#include <string.h>
|
||||
|
||||
_deg2rad = pi / 180.0
|
||||
_rad2deg = 180.0 / pi
|
||||
|
||||
_EquatorialRadius = 2
|
||||
_eccentricitySquared = 3
|
||||
|
||||
_ellipsoid = [
|
||||
# id, Ellipsoid name, Equatorial Radius, square of eccentricity
|
||||
# first once is a placeholder only, To allow array indices to match id numbers
|
||||
[ -1, "Placeholder", 0, 0],
|
||||
[ 1, "Airy", 6377563, 0.00667054],
|
||||
[ 2, "Australian National", 6378160, 0.006694542],
|
||||
[ 3, "Bessel 1841", 6377397, 0.006674372],
|
||||
[ 4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372],
|
||||
[ 5, "Clarke 1866", 6378206, 0.006768658],
|
||||
[ 6, "Clarke 1880", 6378249, 0.006803511],
|
||||
[ 7, "Everest", 6377276, 0.006637847],
|
||||
[ 8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422],
|
||||
[ 9, "Fischer 1968", 6378150, 0.006693422],
|
||||
[ 10, "GRS 1967", 6378160, 0.006694605],
|
||||
[ 11, "GRS 1980", 6378137, 0.00669438],
|
||||
[ 12, "Helmert 1906", 6378200, 0.006693422],
|
||||
[ 13, "Hough", 6378270, 0.00672267],
|
||||
[ 14, "International", 6378388, 0.00672267],
|
||||
[ 15, "Krassovsky", 6378245, 0.006693422],
|
||||
[ 16, "Modified Airy", 6377340, 0.00667054],
|
||||
[ 17, "Modified Everest", 6377304, 0.006637847],
|
||||
[ 18, "Modified Fischer 1960", 6378155, 0.006693422],
|
||||
[ 19, "South American 1969", 6378160, 0.006694542],
|
||||
[ 20, "WGS 60", 6378165, 0.006693422],
|
||||
[ 21, "WGS 66", 6378145, 0.006694542],
|
||||
[ 22, "WGS-72", 6378135, 0.006694318],
|
||||
[ 23, "WGS-84", 6378137, 0.00669438]
|
||||
]
|
||||
|
||||
#Reference ellipsoids derived from Peter H. Dana's website-
|
||||
#http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html
|
||||
#Department of Geography, University of Texas at Austin
|
||||
#Internet: pdana@mail.utexas.edu
|
||||
#3/22/95
|
||||
|
||||
#Source
|
||||
#Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System
|
||||
#1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency
|
||||
|
||||
#def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long,
|
||||
# double &UTMNorthing, double &UTMEasting, char* UTMZone)
|
||||
|
||||
def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None):
|
||||
"""converts lat/long to UTM coords. Equations from USGS Bulletin 1532
|
||||
East Longitudes are positive, West longitudes are negative.
|
||||
North latitudes are positive, South latitudes are negative
|
||||
Lat and Long are in decimal degrees
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
|
||||
|
||||
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
|
||||
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
|
||||
k0 = 0.9996
|
||||
|
||||
#Make sure the longitude is between -180.00 .. 179.9
|
||||
LongTemp = (Long+180)-int((Long+180)/360)*360-180 # -180.00 .. 179.9
|
||||
|
||||
LatRad = Lat*_deg2rad
|
||||
LongRad = LongTemp*_deg2rad
|
||||
|
||||
if zone is None:
|
||||
ZoneNumber = int((LongTemp + 180)/6) + 1
|
||||
else:
|
||||
ZoneNumber = zone
|
||||
|
||||
if Lat >= 56.0 and Lat < 64.0 and LongTemp >= 3.0 and LongTemp < 12.0:
|
||||
ZoneNumber = 32
|
||||
|
||||
# Special zones for Svalbard
|
||||
if Lat >= 72.0 and Lat < 84.0:
|
||||
if LongTemp >= 0.0 and LongTemp < 9.0:ZoneNumber = 31
|
||||
elif LongTemp >= 9.0 and LongTemp < 21.0: ZoneNumber = 33
|
||||
elif LongTemp >= 21.0 and LongTemp < 33.0: ZoneNumber = 35
|
||||
elif LongTemp >= 33.0 and LongTemp < 42.0: ZoneNumber = 37
|
||||
|
||||
LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 #+3 puts origin in middle of zone
|
||||
LongOriginRad = LongOrigin * _deg2rad
|
||||
|
||||
#compute the UTM Zone from the latitude and longitude
|
||||
UTMZone = "%d%c" % (ZoneNumber, _UTMLetterDesignator(Lat))
|
||||
|
||||
eccPrimeSquared = (eccSquared)/(1-eccSquared)
|
||||
N = a/sqrt(1-eccSquared*sin(LatRad)*sin(LatRad))
|
||||
T = tan(LatRad)*tan(LatRad)
|
||||
C = eccPrimeSquared*cos(LatRad)*cos(LatRad)
|
||||
A = cos(LatRad)*(LongRad-LongOriginRad)
|
||||
|
||||
M = a*((1
|
||||
- eccSquared/4
|
||||
- 3*eccSquared*eccSquared/64
|
||||
- 5*eccSquared*eccSquared*eccSquared/256)*LatRad
|
||||
- (3*eccSquared/8
|
||||
+ 3*eccSquared*eccSquared/32
|
||||
+ 45*eccSquared*eccSquared*eccSquared/1024)*sin(2*LatRad)
|
||||
+ (15*eccSquared*eccSquared/256 + 45*eccSquared*eccSquared*eccSquared/1024)*sin(4*LatRad)
|
||||
- (35*eccSquared*eccSquared*eccSquared/3072)*sin(6*LatRad))
|
||||
|
||||
UTMEasting = (k0*N*(A+(1-T+C)*A*A*A/6
|
||||
+ (5-18*T+T*T+72*C-58*eccPrimeSquared)*A*A*A*A*A/120)
|
||||
+ 500000.0)
|
||||
|
||||
UTMNorthing = (k0*(M+N*tan(LatRad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24
|
||||
+ (61
|
||||
-58*T
|
||||
+T*T
|
||||
+600*C
|
||||
-330*eccPrimeSquared)*A*A*A*A*A*A/720)))
|
||||
|
||||
if Lat < 0:
|
||||
UTMNorthing = UTMNorthing + 10000000.0; #10000000 meter offset for southern hemisphere
|
||||
return (UTMZone, UTMEasting, UTMNorthing)
|
||||
|
||||
|
||||
def _UTMLetterDesignator(Lat):
|
||||
"""This routine determines the correct UTM letter designator for the given
|
||||
latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
|
||||
|
||||
if 84 >= Lat >= 72: return 'X'
|
||||
elif 72 > Lat >= 64: return 'W'
|
||||
elif 64 > Lat >= 56: return 'V'
|
||||
elif 56 > Lat >= 48: return 'U'
|
||||
elif 48 > Lat >= 40: return 'T'
|
||||
elif 40 > Lat >= 32: return 'S'
|
||||
elif 32 > Lat >= 24: return 'R'
|
||||
elif 24 > Lat >= 16: return 'Q'
|
||||
elif 16 > Lat >= 8: return 'P'
|
||||
elif 8 > Lat >= 0: return 'N'
|
||||
elif 0 > Lat >= -8: return 'M'
|
||||
elif -8> Lat >= -16: return 'L'
|
||||
elif -16 > Lat >= -24: return 'K'
|
||||
elif -24 > Lat >= -32: return 'J'
|
||||
elif -32 > Lat >= -40: return 'H'
|
||||
elif -40 > Lat >= -48: return 'G'
|
||||
elif -48 > Lat >= -56: return 'F'
|
||||
elif -56 > Lat >= -64: return 'E'
|
||||
elif -64 > Lat >= -72: return 'D'
|
||||
elif -72 > Lat >= -80: return 'C'
|
||||
else: return 'Z' # if the Latitude is outside the UTM limits
|
||||
|
||||
#void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone,
|
||||
# double& Lat, double& Long )
|
||||
|
||||
def UTMtoLL(ReferenceEllipsoid, northing, easting, zone):
|
||||
"""converts UTM coords to lat/long. Equations from USGS Bulletin 1532
|
||||
East Longitudes are positive, West longitudes are negative.
|
||||
North latitudes are positive, South latitudes are negative
|
||||
Lat and Long are in decimal degrees.
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com
|
||||
Converted to Python by Russ Nelson <nelson@crynwr.com>"""
|
||||
|
||||
k0 = 0.9996
|
||||
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
|
||||
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
|
||||
e1 = (1-sqrt(1-eccSquared))/(1+sqrt(1-eccSquared))
|
||||
#NorthernHemisphere; //1 for northern hemispher, 0 for southern
|
||||
|
||||
x = easting - 500000.0 #remove 500,000 meter offset for longitude
|
||||
y = northing
|
||||
|
||||
ZoneLetter = zone[-1]
|
||||
ZoneNumber = int(zone[:-1])
|
||||
if ZoneLetter >= 'N':
|
||||
NorthernHemisphere = 1 # point is in northern hemisphere
|
||||
else:
|
||||
NorthernHemisphere = 0 # point is in southern hemisphere
|
||||
y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere
|
||||
|
||||
LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 # +3 puts origin in middle of zone
|
||||
|
||||
eccPrimeSquared = (eccSquared)/(1-eccSquared)
|
||||
|
||||
M = y / k0
|
||||
mu = M/(a*(1-eccSquared/4-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256))
|
||||
|
||||
phi1Rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu)
|
||||
+ (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu)
|
||||
+(151*e1*e1*e1/96)*sin(6*mu))
|
||||
phi1 = phi1Rad*_rad2deg;
|
||||
|
||||
N1 = a/sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad))
|
||||
T1 = tan(phi1Rad)*tan(phi1Rad)
|
||||
C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad)
|
||||
R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5)
|
||||
D = x/(N1*k0)
|
||||
|
||||
Lat = phi1Rad - (N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24
|
||||
+(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720)
|
||||
Lat = Lat * _rad2deg
|
||||
|
||||
Long = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1)
|
||||
*D*D*D*D*D/120)/cos(phi1Rad)
|
||||
Long = LongOrigin + Long * _rad2deg
|
||||
return (Lat, Long)
|
||||
|
||||
if __name__ == '__main__':
|
||||
(z, e, n) = LLtoUTM(23, 45.00, -75.00)
|
||||
print z, e, n
|
||||
print UTMtoLL(23, n, e, z)
|
||||
|
0
daemon/core/misc/__init__.py
Normal file
0
daemon/core/misc/__init__.py
Normal file
160
daemon/core/misc/event.py
Normal file
160
daemon/core/misc/event.py
Normal file
|
@ -0,0 +1,160 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
event.py: event loop implementation using a heap queue and threads.
|
||||
'''
|
||||
import time
|
||||
import threading
|
||||
import heapq
|
||||
|
||||
class EventLoop(object):
|
||||
|
||||
class Event(object):
|
||||
def __init__(self, eventnum, time, func, *args, **kwds):
|
||||
self.eventnum = eventnum
|
||||
self.time = time
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.kwds = kwds
|
||||
self.canceled = False
|
||||
|
||||
def __cmp__(self, other):
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.eventnum, other.eventnum)
|
||||
return tmp
|
||||
|
||||
def run(self):
|
||||
if self.canceled:
|
||||
return
|
||||
self.func(*self.args, **self.kwds)
|
||||
|
||||
def cancel(self):
|
||||
self.canceled = True # XXX not thread-safe
|
||||
|
||||
def __init__(self):
|
||||
self.lock = threading.RLock()
|
||||
self.queue = []
|
||||
self.eventnum = 0
|
||||
self.timer = None
|
||||
self.running = False
|
||||
self.start = None
|
||||
|
||||
def __del__(self):
|
||||
self.stop()
|
||||
|
||||
def __run_events(self):
|
||||
schedule = False
|
||||
while True:
|
||||
with self.lock:
|
||||
if not self.running or not self.queue:
|
||||
break
|
||||
now = time.time()
|
||||
if self.queue[0].time > now:
|
||||
schedule = True
|
||||
break
|
||||
event = heapq.heappop(self.queue)
|
||||
assert event.time <= now
|
||||
event.run()
|
||||
with self.lock:
|
||||
self.timer = None
|
||||
if schedule:
|
||||
self.__schedule_event()
|
||||
|
||||
def __schedule_event(self):
|
||||
with self.lock:
|
||||
assert self.running
|
||||
if not self.queue:
|
||||
return
|
||||
delay = self.queue[0].time - time.time()
|
||||
assert self.timer is None
|
||||
self.timer = threading.Timer(delay, self.__run_events)
|
||||
self.timer.daemon = True
|
||||
self.timer.start()
|
||||
|
||||
def run(self):
|
||||
with self.lock:
|
||||
if self.running:
|
||||
return
|
||||
self.running = True
|
||||
self.start = time.time()
|
||||
for event in self.queue:
|
||||
event.time += self.start
|
||||
self.__schedule_event()
|
||||
|
||||
def stop(self):
|
||||
with self.lock:
|
||||
if not self.running:
|
||||
return
|
||||
self.queue = []
|
||||
self.eventnum = 0
|
||||
if self.timer is not None:
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
self.running = False
|
||||
self.start = None
|
||||
|
||||
def add_event(self, delaysec, func, *args, **kwds):
|
||||
with self.lock:
|
||||
eventnum = self.eventnum
|
||||
self.eventnum += 1
|
||||
evtime = float(delaysec)
|
||||
if self.running:
|
||||
evtime += time.time()
|
||||
event = self.Event(eventnum, evtime, func, *args, **kwds)
|
||||
|
||||
if self.queue:
|
||||
prevhead = self.queue[0]
|
||||
else:
|
||||
prevhead = None
|
||||
|
||||
heapq.heappush(self.queue, event)
|
||||
head = self.queue[0]
|
||||
if prevhead is not None and prevhead != head:
|
||||
if self.timer is not None and not self.timer.is_alive():
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
|
||||
if self.running and self.timer is None:
|
||||
self.__schedule_event()
|
||||
return event
|
||||
|
||||
def example():
|
||||
loop = EventLoop()
|
||||
|
||||
def msg(arg):
|
||||
delta = time.time() - loop.start
|
||||
print delta, 'arg:', arg
|
||||
|
||||
def repeat(interval, count):
|
||||
count -= 1
|
||||
msg('repeat: interval: %s; remaining: %s' % (interval, count))
|
||||
if count > 0:
|
||||
loop.add_event(interval, repeat, interval, count)
|
||||
|
||||
def sleep(delay):
|
||||
msg('sleep %s' % delay)
|
||||
time.sleep(delay)
|
||||
msg('sleep done')
|
||||
|
||||
def stop(arg):
|
||||
msg(arg)
|
||||
loop.stop()
|
||||
|
||||
loop.add_event(0, msg, 'start')
|
||||
loop.add_event(0, msg, 'time zero')
|
||||
|
||||
for delay in 5, 4, 10, -1, 0, 9, 3, 7, 3.14:
|
||||
loop.add_event(delay, msg, 'time %s' % delay)
|
||||
|
||||
loop.run()
|
||||
|
||||
loop.add_event(0, repeat, 1, 5)
|
||||
loop.add_event(12, sleep, 10)
|
||||
|
||||
loop.add_event(15.75, stop, 'stop time: 15.75')
|
230
daemon/core/misc/ipaddr.py
Normal file
230
daemon/core/misc/ipaddr.py
Normal file
|
@ -0,0 +1,230 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
ipaddr.py: helper objects for dealing with IPv4/v6 addresses.
|
||||
'''
|
||||
|
||||
import socket
|
||||
import struct
|
||||
import random
|
||||
|
||||
AF_INET = socket.AF_INET
|
||||
AF_INET6 = socket.AF_INET6
|
||||
|
||||
class MacAddr(object):
|
||||
def __init__(self, addr):
|
||||
self.addr = addr
|
||||
|
||||
def __str__(self):
|
||||
return ":".join(map(lambda x: ("%02x" % ord(x)), self.addr))
|
||||
|
||||
def tolinklocal(self):
|
||||
''' Convert the MAC address to a IPv6 link-local address, using EUI 48
|
||||
to EUI 64 conversion process per RFC 5342.
|
||||
'''
|
||||
if not self.addr:
|
||||
return IPAddr.fromstring("::")
|
||||
tmp = struct.unpack("!Q", '\x00\x00' + self.addr)[0]
|
||||
nic = long(tmp) & 0x000000FFFFFFL
|
||||
oui = long(tmp) & 0xFFFFFF000000L
|
||||
# toggle U/L bit
|
||||
oui ^= 0x020000000000L
|
||||
# append EUI-48 octets
|
||||
oui = (oui << 16) | 0xFFFE000000L
|
||||
return IPAddr(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic))
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
addr = "".join(map(lambda x: chr(int(x, 16)), s.split(":")))
|
||||
return cls(addr)
|
||||
|
||||
@classmethod
|
||||
def random(cls):
|
||||
tmp = random.randint(0, 0xFFFFFF)
|
||||
tmp |= 0x00163E << 24 # use the Xen OID 00:16:3E
|
||||
tmpbytes = struct.pack("!Q", tmp)
|
||||
return cls(tmpbytes[2:])
|
||||
|
||||
class IPAddr(object):
|
||||
def __init__(self, af, addr):
|
||||
# check if (af, addr) is valid
|
||||
if not socket.inet_ntop(af, addr):
|
||||
raise ValueError, "invalid af/addr"
|
||||
self.af = af
|
||||
self.addr = addr
|
||||
|
||||
def isIPv4(self):
|
||||
return self.af == AF_INET
|
||||
|
||||
def isIPv6(self):
|
||||
return self.af == AF_INET6
|
||||
|
||||
def __str__(self):
|
||||
return socket.inet_ntop(self.af, self.addr)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return other.af == self.af and other.addr == self.addr
|
||||
except:
|
||||
return False
|
||||
|
||||
def __add__(self, other):
|
||||
try:
|
||||
carry = int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
tmp = map(lambda x: ord(x), self.addr)
|
||||
for i in xrange(len(tmp) - 1, -1, -1):
|
||||
x = tmp[i] + carry
|
||||
tmp[i] = x & 0xff
|
||||
carry = x >> 8
|
||||
if carry == 0:
|
||||
break
|
||||
addr = "".join(map(lambda x: chr(x), tmp))
|
||||
return self.__class__(self.af, addr)
|
||||
|
||||
def __sub__(self, other):
|
||||
try:
|
||||
tmp = -int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
return self.__add__(tmp)
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
for af in AF_INET, AF_INET6:
|
||||
try:
|
||||
return cls(af, socket.inet_pton(af, s))
|
||||
except Exception, e:
|
||||
pass
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
def toint(s):
|
||||
''' convert IPv4 string to 32-bit integer
|
||||
'''
|
||||
bin = socket.inet_pton(AF_INET, s)
|
||||
return(struct.unpack('!I', bin)[0])
|
||||
|
||||
class IPPrefix(object):
|
||||
def __init__(self, af, prefixstr):
|
||||
"prefixstr format: address/prefixlen"
|
||||
tmp = prefixstr.split("/")
|
||||
if len(tmp) > 2:
|
||||
raise ValueError, "invalid prefix: '%s'" % prefixstr
|
||||
self.af = af
|
||||
if self.af == AF_INET:
|
||||
self.addrlen = 32
|
||||
elif self.af == AF_INET6:
|
||||
self.addrlen = 128
|
||||
else:
|
||||
raise ValueError, "invalid address family: '%s'" % self.af
|
||||
if len(tmp) == 2:
|
||||
self.prefixlen = int(tmp[1])
|
||||
else:
|
||||
self.prefixlen = self.addrlen
|
||||
self.prefix = socket.inet_pton(self.af, tmp[0])
|
||||
if self.addrlen > self.prefixlen:
|
||||
addrbits = self.addrlen - self.prefixlen
|
||||
netmask = ((1L << self.prefixlen) - 1) << addrbits
|
||||
prefix = ""
|
||||
for i in xrange(-1, -(addrbits >> 3) - 2, -1):
|
||||
prefix = chr(ord(self.prefix[i]) & (netmask & 0xff)) + prefix
|
||||
netmask >>= 8
|
||||
self.prefix = self.prefix[:i] + prefix
|
||||
|
||||
def __str__(self):
|
||||
return "%s/%s" % (socket.inet_ntop(self.af, self.prefix),
|
||||
self.prefixlen)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return other.af == self.af and \
|
||||
other.prefixlen == self.prefixlen and \
|
||||
other.prefix == self.prefix
|
||||
except:
|
||||
return False
|
||||
|
||||
def __add__(self, other):
|
||||
try:
|
||||
tmp = int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
a = IPAddr(self.af, self.prefix) + \
|
||||
(tmp << (self.addrlen - self.prefixlen))
|
||||
prefixstr = "%s/%s" % (a, self.prefixlen)
|
||||
if self.__class__ == IPPrefix:
|
||||
return self.__class__(self.af, prefixstr)
|
||||
else:
|
||||
return self.__class__(prefixstr)
|
||||
|
||||
def __sub__(self, other):
|
||||
try:
|
||||
tmp = -int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
return self.__add__(tmp)
|
||||
|
||||
def addr(self, hostid):
|
||||
tmp = int(hostid)
|
||||
if (tmp == 1 or tmp == 0 or tmp == -1) and self.addrlen == self.prefixlen:
|
||||
return IPAddr(self.af, self.prefix)
|
||||
if tmp == 0 or \
|
||||
tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or \
|
||||
(self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1):
|
||||
raise ValueError, "invalid hostid for prefix %s: %s" % (self, hostid)
|
||||
addr = ""
|
||||
for i in xrange(-1, -(self.addrlen >> 3) - 1, -1):
|
||||
addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr
|
||||
tmp >>= 8
|
||||
if not tmp:
|
||||
break
|
||||
addr = self.prefix[:i] + addr
|
||||
return IPAddr(self.af, addr)
|
||||
|
||||
def minaddr(self):
|
||||
return self.addr(1)
|
||||
|
||||
def maxaddr(self):
|
||||
if self.af == AF_INET:
|
||||
return self.addr((1 << (self.addrlen - self.prefixlen)) - 2)
|
||||
else:
|
||||
return self.addr((1 << (self.addrlen - self.prefixlen)) - 1)
|
||||
|
||||
def numaddr(self):
|
||||
return max(0, (1 << (self.addrlen - self.prefixlen)) - 2)
|
||||
|
||||
def prefixstr(self):
|
||||
return "%s" % socket.inet_ntop(self.af, self.prefix)
|
||||
|
||||
def netmaskstr(self):
|
||||
addrbits = self.addrlen - self.prefixlen
|
||||
netmask = ((1L << self.prefixlen) - 1) << addrbits
|
||||
netmaskbytes = struct.pack("!L", netmask)
|
||||
return IPAddr(af=AF_INET, addr=netmaskbytes).__str__()
|
||||
|
||||
class IPv4Prefix(IPPrefix):
|
||||
def __init__(self, prefixstr):
|
||||
IPPrefix.__init__(self, AF_INET, prefixstr)
|
||||
|
||||
class IPv6Prefix(IPPrefix):
|
||||
def __init__(self, prefixstr):
|
||||
IPPrefix.__init__(self, AF_INET6, prefixstr)
|
||||
|
||||
def isIPAddress(af, addrstr):
|
||||
try:
|
||||
tmp = socket.inet_pton(af, addrstr)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def isIPv4Address(addrstr):
|
||||
return isIPAddress(AF_INET, addrstr)
|
||||
|
||||
def isIPv6Address(addrstr):
|
||||
return isIPAddress(AF_INET6, addrstr)
|
116
daemon/core/misc/quagga.py
Normal file
116
daemon/core/misc/quagga.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
quagga.py: helper class for generating Quagga configuration.
|
||||
'''
|
||||
|
||||
import os.path
|
||||
from string import Template
|
||||
|
||||
def maketuple(obj):
|
||||
if hasattr(obj, "__iter__"):
|
||||
return tuple(obj)
|
||||
else:
|
||||
return (obj,)
|
||||
|
||||
class NetIf(object):
|
||||
def __init__(self, name, addrlist = []):
|
||||
self.name = name
|
||||
self.addrlist = addrlist
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, **kwds):
|
||||
self.kwds = kwds
|
||||
|
||||
def __str__(self):
|
||||
tmp = self.template.substitute(**self.kwds)
|
||||
if tmp[-1] == '\n':
|
||||
tmp = tmp[:-1]
|
||||
return tmp
|
||||
|
||||
class QuaggaOSPF6Interface(Conf):
|
||||
AF_IPV6_ID = 0
|
||||
AF_IPV4_ID = 65
|
||||
|
||||
template = Template("""\
|
||||
interface $interface
|
||||
$addr
|
||||
ipv6 ospf6 instance-id $instanceid
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 11
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network $network
|
||||
ipv6 ospf6 diffhellos
|
||||
ipv6 ospf6 adjacencyconnectivity uniconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
""")
|
||||
|
||||
# ip address $ipaddr/32
|
||||
# ipv6 ospf6 simhelloLLtoULRecv :$simhelloport
|
||||
# !$ipaddr:$simhelloport
|
||||
|
||||
def __init__(self, netif, instanceid = AF_IPV4_ID,
|
||||
network = "manet-designated-router", **kwds):
|
||||
self.netif = netif
|
||||
def addrstr(x):
|
||||
if x.find(".") >= 0:
|
||||
return "ip address %s" % x
|
||||
elif x.find(":") >= 0:
|
||||
return "ipv6 address %s" % x
|
||||
else:
|
||||
raise Value, "invalid address: %s", x
|
||||
addr = "\n ".join(map(addrstr, netif.addrlist))
|
||||
|
||||
self.instanceid = instanceid
|
||||
self.network = network
|
||||
Conf.__init__(self, interface = netif.name, addr = addr,
|
||||
instanceid = instanceid, network = network, **kwds)
|
||||
|
||||
def name(self):
|
||||
return self.netif.name
|
||||
|
||||
class QuaggaOSPF6(Conf):
|
||||
|
||||
template = Template("""\
|
||||
$interfaces
|
||||
!
|
||||
router ospf6
|
||||
router-id $routerid
|
||||
$ospfifs
|
||||
$redistribute
|
||||
""")
|
||||
|
||||
def __init__(self, ospf6ifs, area, routerid,
|
||||
redistribute = "! no redistribute"):
|
||||
ospf6ifs = maketuple(ospf6ifs)
|
||||
interfaces = "\n!\n".join(map(str, ospf6ifs))
|
||||
ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % \
|
||||
(x.name(), area), ospf6ifs))
|
||||
Conf.__init__(self, interfaces = interfaces, routerid = routerid,
|
||||
ospfifs = ospfifs, redistribute = redistribute)
|
||||
|
||||
|
||||
class QuaggaConf(Conf):
|
||||
template = Template("""\
|
||||
log file $logfile
|
||||
$debugs
|
||||
!
|
||||
$routers
|
||||
!
|
||||
$forwarding
|
||||
""")
|
||||
|
||||
def __init__(self, routers, logfile, debugs = ()):
|
||||
routers = "\n!\n".join(map(str, maketuple(routers)))
|
||||
if debugs:
|
||||
debugs = "\n".join(maketuple(debugs))
|
||||
else:
|
||||
debugs = "! no debugs"
|
||||
forwarding = "ip forwarding\nipv6 forwarding"
|
||||
Conf.__init__(self, logfile = logfile, debugs = debugs,
|
||||
routers = routers, forwarding = forwarding)
|
228
daemon/core/misc/utils.py
Normal file
228
daemon/core/misc/utils.py
Normal file
|
@ -0,0 +1,228 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
utils.py: miscellaneous utility functions, wrappers around some subprocess
|
||||
procedures.
|
||||
'''
|
||||
|
||||
import subprocess, os, ast
|
||||
|
||||
def checkexec(execlist):
|
||||
for bin in execlist:
|
||||
# note that os.access() uses real uid/gid; that should be okay
|
||||
# here
|
||||
if not os.access(bin, os.X_OK):
|
||||
raise EnvironmentError, "executable not found: %s" % bin
|
||||
|
||||
def ensurepath(pathlist):
|
||||
searchpath = os.environ["PATH"].split(":")
|
||||
for p in set(pathlist):
|
||||
if p not in searchpath:
|
||||
os.environ["PATH"] += ":" + p
|
||||
|
||||
def maketuple(obj):
|
||||
if hasattr(obj, "__iter__"):
|
||||
return tuple(obj)
|
||||
else:
|
||||
return (obj,)
|
||||
|
||||
def maketuplefromstr(s, type):
|
||||
s.replace('\\', '\\\\')
|
||||
return ast.literal_eval(s)
|
||||
#return tuple(type(i) for i in s[1:-1].split(','))
|
||||
#r = ()
|
||||
#for i in s.strip("()").split(','):
|
||||
# r += (i.strip("' "), )
|
||||
# chop empty last element from "('a',)" strings
|
||||
#if r[-1] == '':
|
||||
# r = r[:-1]
|
||||
#return r
|
||||
|
||||
def call(*args, **kwds):
|
||||
return subprocess.call(*args, **kwds)
|
||||
|
||||
def mutecall(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return call(*args, **kwds)
|
||||
|
||||
def check_call(*args, **kwds):
|
||||
return subprocess.check_call(*args, **kwds)
|
||||
|
||||
def mutecheck_call(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.check_call(*args, **kwds)
|
||||
|
||||
def spawn(*args, **kwds):
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def mutespawn(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def detachinit():
|
||||
if os.fork():
|
||||
os._exit(0) # parent exits
|
||||
os.setsid()
|
||||
|
||||
def detach(*args, **kwds):
|
||||
kwds["preexec_fn"] = detachinit
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def mutedetach(*args, **kwds):
|
||||
kwds["preexec_fn"] = detachinit
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def hexdump(s, bytes_per_word = 2, words_per_line = 8):
|
||||
dump = ""
|
||||
count = 0
|
||||
bytes = bytes_per_word * words_per_line
|
||||
while s:
|
||||
line = s[:bytes]
|
||||
s = s[bytes:]
|
||||
tmp = map(lambda x: ("%02x" * bytes_per_word) % x,
|
||||
zip(*[iter(map(ord, line))] * bytes_per_word))
|
||||
if len(line) % 2:
|
||||
tmp.append("%x" % ord(line[-1]))
|
||||
dump += "0x%08x: %s\n" % (count, " ".join(tmp))
|
||||
count += len(line)
|
||||
return dump[:-1]
|
||||
|
||||
def filemunge(pathname, header, text):
|
||||
''' Insert text at the end of a file, surrounded by header comments.
|
||||
'''
|
||||
filedemunge(pathname, header) # prevent duplicates
|
||||
f = open(pathname, 'a')
|
||||
f.write("# BEGIN %s\n" % header)
|
||||
f.write(text)
|
||||
f.write("# END %s\n" % header)
|
||||
f.close()
|
||||
|
||||
def filedemunge(pathname, header):
|
||||
''' Remove text that was inserted in a file surrounded by header comments.
|
||||
'''
|
||||
f = open(pathname, 'r')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
start = None
|
||||
end = None
|
||||
for i in range(len(lines)):
|
||||
if lines[i] == "# BEGIN %s\n" % header:
|
||||
start = i
|
||||
elif lines[i] == "# END %s\n" % header:
|
||||
end = i + 1
|
||||
if start is None or end is None:
|
||||
return
|
||||
f = open(pathname, 'w')
|
||||
lines = lines[:start] + lines[end:]
|
||||
f.write("".join(lines))
|
||||
f.close()
|
||||
|
||||
def expandcorepath(pathname, session=None, node=None):
|
||||
''' Expand a file path given session information.
|
||||
'''
|
||||
if session is not None:
|
||||
pathname = pathname.replace('~', "/home/%s" % session.user)
|
||||
pathname = pathname.replace('%SESSION%', str(session.sessionid))
|
||||
pathname = pathname.replace('%SESSION_DIR%', session.sessiondir)
|
||||
pathname = pathname.replace('%SESSION_USER%', session.user)
|
||||
if node is not None:
|
||||
pathname = pathname.replace('%NODE%', str(node.objid))
|
||||
pathname = pathname.replace('%NODENAME%', node.name)
|
||||
return pathname
|
||||
|
||||
def sysctldevname(devname):
|
||||
''' Translate a device name to the name used with sysctl.
|
||||
'''
|
||||
if devname is None:
|
||||
return None
|
||||
return devname.replace(".", "/")
|
||||
|
||||
def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
||||
stdin = os.devnull, stdout = os.devnull, stderr = os.devnull,
|
||||
stdoutmode = 0644, stderrmode = 0644, pidfilename = None,
|
||||
defaultmaxfd = 1024):
|
||||
''' Run the background process as a daemon.
|
||||
'''
|
||||
if not hasattr(dontclose, "__contains__"):
|
||||
if not isinstance(dontclose, int):
|
||||
raise TypeError, "dontclose must be an integer"
|
||||
dontclose = (int(dontclose),)
|
||||
else:
|
||||
for fd in dontclose:
|
||||
if not isinstance(fd, int):
|
||||
raise TypeError, "dontclose must contain only integers"
|
||||
# redirect stdin
|
||||
if stdin:
|
||||
fd = os.open(stdin, os.O_RDONLY)
|
||||
os.dup2(fd, 0)
|
||||
os.close(fd)
|
||||
# redirect stdout
|
||||
if stdout:
|
||||
fd = os.open(stdout, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
|
||||
stdoutmode)
|
||||
os.dup2(fd, 1)
|
||||
if (stdout == stderr):
|
||||
os.dup2(1, 2)
|
||||
os.close(fd)
|
||||
# redirect stderr
|
||||
if stderr and (stderr != stdout):
|
||||
fd = os.open(stderr, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
|
||||
stderrmode)
|
||||
os.dup2(fd, 2)
|
||||
os.close(fd)
|
||||
if os.fork():
|
||||
os._exit(0) # parent exits
|
||||
os.setsid()
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
if pidfilename:
|
||||
try:
|
||||
f = open(pidfilename, "w")
|
||||
f.write("%s\n" % pid)
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
os._exit(0) # parent exits
|
||||
if rootdir:
|
||||
os.chdir(rootdir)
|
||||
os.umask(umask)
|
||||
if close_fds:
|
||||
try:
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if maxfd == resource.RLIM_INFINITY:
|
||||
raise ValueError
|
||||
except:
|
||||
maxfd = defaultmaxfd
|
||||
for fd in xrange(3, maxfd):
|
||||
if fd in dontclose:
|
||||
continue
|
||||
try:
|
||||
os.close(fd)
|
||||
except:
|
||||
pass
|
||||
|
||||
def readfileintodict(filename, d):
|
||||
''' Read key=value pairs from a file, into a dict.
|
||||
Skip comments; strip newline characters and spacing.
|
||||
'''
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
for l in lines:
|
||||
if l[:1] == '#':
|
||||
continue
|
||||
try:
|
||||
key, value = l.split('=', 1)
|
||||
d[key] = value.strip()
|
||||
except ValueError:
|
||||
pass
|
259
daemon/core/misc/utm.py
Normal file
259
daemon/core/misc/utm.py
Normal file
|
@ -0,0 +1,259 @@
|
|||
"""
|
||||
utm
|
||||
===
|
||||
|
||||
.. image:: https://travis-ci.org/Turbo87/utm.png
|
||||
|
||||
Bidirectional UTM-WGS84 converter for python
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
::
|
||||
|
||||
import utm
|
||||
|
||||
Convert a (latitude, longitude) tuple into an UTM coordinate::
|
||||
|
||||
utm.from_latlon(51.2, 7.5)
|
||||
>>> (395201.3103811303, 5673135.241182375, 32, 'U')
|
||||
|
||||
Convert an UTM coordinate into a (latitude, longitude) tuple::
|
||||
|
||||
utm.to_latlon(340000, 5710000, 32, 'U')
|
||||
>>> (51.51852098408468, 6.693872395145327)
|
||||
|
||||
Speed
|
||||
-----
|
||||
|
||||
The library has been compared to the more generic pyproj library by running the
|
||||
unit test suite through pyproj instead of utm. These are the results:
|
||||
|
||||
* with pyproj (without projection cache): 4.0 - 4.5 sec
|
||||
* with pyproj (with projection cache): 0.9 - 1.0 sec
|
||||
* with utm: 0.4 - 0.5 sec
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright (C) 2012 Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
__all__ = ['to_latlon', 'from_latlon']
|
||||
|
||||
class OutOfRangeError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
K0 = 0.9996
|
||||
|
||||
E = 0.00669438
|
||||
E2 = E * E
|
||||
E3 = E2 * E
|
||||
E_P2 = E / (1.0 - E)
|
||||
|
||||
SQRT_E = math.sqrt(1 - E)
|
||||
_E = (1 - SQRT_E) / (1 + SQRT_E)
|
||||
_E3 = _E * _E * _E
|
||||
_E4 = _E3 * _E
|
||||
|
||||
M1 = (1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256)
|
||||
M2 = (3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024)
|
||||
M3 = (15 * E2 / 256 + 45 * E3 / 1024)
|
||||
M4 = (35 * E3 / 3072)
|
||||
|
||||
P2 = (3 * _E / 2 - 27 * _E3 / 32)
|
||||
P3 = (21 * _E3 / 16 - 55 * _E4 / 32)
|
||||
P4 = (151 * _E3 / 96)
|
||||
|
||||
R = 6378137
|
||||
|
||||
ZONE_LETTERS = [
|
||||
(84, None), (72, 'X'), (64, 'W'), (56, 'V'), (48, 'U'), (40, 'T'),
|
||||
(32, 'S'), (24, 'R'), (16, 'Q'), (8, 'P'), (0, 'N'), (-8, 'M'), (-16, 'L'),
|
||||
(-24, 'K'), (-32, 'J'), (-40, 'H'), (-48, 'G'), (-56, 'F'), (-64, 'E'),
|
||||
(-72, 'D'), (-80, 'C')
|
||||
]
|
||||
|
||||
|
||||
def to_latlon(easting, northing, zone_number, zone_letter):
|
||||
zone_letter = zone_letter.upper()
|
||||
|
||||
if not 100000 <= easting < 1000000:
|
||||
raise OutOfRangeError('easting out of range (must be between 100.000 m and 999.999 m)')
|
||||
if not 0 <= northing <= 10000000:
|
||||
raise OutOfRangeError('northing out of range (must be between 0 m and 10.000.000 m)')
|
||||
if not 1 <= zone_number <= 60:
|
||||
raise OutOfRangeError('zone number out of range (must be between 1 and 60)')
|
||||
if not 'C' <= zone_letter <= 'X' or zone_letter in ['I', 'O']:
|
||||
raise OutOfRangeError('zone letter out of range (must be between C and X)')
|
||||
|
||||
x = easting - 500000
|
||||
y = northing
|
||||
|
||||
if zone_letter < 'N':
|
||||
y -= 10000000
|
||||
|
||||
m = y / K0
|
||||
mu = m / (R * M1)
|
||||
|
||||
p_rad = (mu + P2 * math.sin(2 * mu) + P3 * math.sin(4 * mu) + P4 * math.sin(6 * mu))
|
||||
|
||||
p_sin = math.sin(p_rad)
|
||||
p_sin2 = p_sin * p_sin
|
||||
|
||||
p_cos = math.cos(p_rad)
|
||||
|
||||
p_tan = p_sin / p_cos
|
||||
p_tan2 = p_tan * p_tan
|
||||
p_tan4 = p_tan2 * p_tan2
|
||||
|
||||
ep_sin = 1 - E * p_sin2
|
||||
ep_sin_sqrt = math.sqrt(1 - E * p_sin2)
|
||||
|
||||
n = R / ep_sin_sqrt
|
||||
r = (1 - E) / ep_sin
|
||||
|
||||
c = _E * p_cos**2
|
||||
c2 = c * c
|
||||
|
||||
d = x / (n * K0)
|
||||
d2 = d * d
|
||||
d3 = d2 * d
|
||||
d4 = d3 * d
|
||||
d5 = d4 * d
|
||||
d6 = d5 * d
|
||||
|
||||
latitude = (p_rad - (p_tan / r) *
|
||||
(d2 / 2 -
|
||||
d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) +
|
||||
d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2))
|
||||
|
||||
longitude = (d -
|
||||
d3 / 6 * (1 + 2 * p_tan2 + c) +
|
||||
d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4)) / p_cos
|
||||
|
||||
return (math.degrees(latitude),
|
||||
math.degrees(longitude) + zone_number_to_central_longitude(zone_number))
|
||||
|
||||
|
||||
def from_latlon(latitude, longitude):
|
||||
if not -80.0 <= latitude <= 84.0:
|
||||
raise OutOfRangeError('latitude out of range (must be between 80 deg S and 84 deg N)')
|
||||
if not -180.0 <= longitude <= 180.0:
|
||||
raise OutOfRangeError('northing out of range (must be between 180 deg W and 180 deg E)')
|
||||
|
||||
lat_rad = math.radians(latitude)
|
||||
lat_sin = math.sin(lat_rad)
|
||||
lat_cos = math.cos(lat_rad)
|
||||
|
||||
lat_tan = lat_sin / lat_cos
|
||||
lat_tan2 = lat_tan * lat_tan
|
||||
lat_tan4 = lat_tan2 * lat_tan2
|
||||
|
||||
lon_rad = math.radians(longitude)
|
||||
|
||||
zone_number = latlon_to_zone_number(latitude, longitude)
|
||||
central_lon = zone_number_to_central_longitude(zone_number)
|
||||
central_lon_rad = math.radians(central_lon)
|
||||
|
||||
zone_letter = latitude_to_zone_letter(latitude)
|
||||
|
||||
n = R / math.sqrt(1 - E * lat_sin**2)
|
||||
c = E_P2 * lat_cos**2
|
||||
|
||||
a = lat_cos * (lon_rad - central_lon_rad)
|
||||
a2 = a * a
|
||||
a3 = a2 * a
|
||||
a4 = a3 * a
|
||||
a5 = a4 * a
|
||||
a6 = a5 * a
|
||||
|
||||
m = R * (M1 * lat_rad -
|
||||
M2 * math.sin(2 * lat_rad) +
|
||||
M3 * math.sin(4 * lat_rad) -
|
||||
M4 * math.sin(6 * lat_rad))
|
||||
|
||||
easting = K0 * n * (a +
|
||||
a3 / 6 * (1 - lat_tan2 + c) +
|
||||
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000
|
||||
|
||||
northing = K0 * (m + n * lat_tan * (a2 / 2 +
|
||||
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +
|
||||
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2)))
|
||||
|
||||
if latitude < 0:
|
||||
northing += 10000000
|
||||
|
||||
return easting, northing, zone_number, zone_letter
|
||||
|
||||
|
||||
def latitude_to_zone_letter(latitude):
|
||||
for lat_min, zone_letter in ZONE_LETTERS:
|
||||
if latitude >= lat_min:
|
||||
return zone_letter
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def latlon_to_zone_number(latitude, longitude):
|
||||
if 56 <= latitude <= 64 and 3 <= longitude <= 12:
|
||||
return 32
|
||||
|
||||
if 72 <= latitude <= 84 and longitude >= 0:
|
||||
if longitude <= 9:
|
||||
return 31
|
||||
elif longitude <= 21:
|
||||
return 33
|
||||
elif longitude <= 33:
|
||||
return 35
|
||||
elif longitude <= 42:
|
||||
return 37
|
||||
|
||||
return int((longitude + 180) / 6) + 1
|
||||
|
||||
|
||||
def zone_number_to_central_longitude(zone_number):
|
||||
return (zone_number - 1) * 6 - 180 + 3
|
||||
|
||||
|
||||
def haversine(lon1, lat1, lon2, lat2):
|
||||
"""
|
||||
Calculate the great circle distance between two points
|
||||
on the earth (specified in decimal degrees)
|
||||
"""
|
||||
# convert decimal degrees to radians
|
||||
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
|
||||
# haversine formula
|
||||
dlon = lon2 - lon1
|
||||
dlat = lat2 - lat1
|
||||
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
|
||||
c = 2 * math.asin(math.sqrt(a))
|
||||
m = 6367000 * c
|
||||
return m
|
||||
|
776
daemon/core/misc/xmlutils.py
Normal file
776
daemon/core/misc/xmlutils.py
Normal file
|
@ -0,0 +1,776 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
Helpers for loading and saving XML files. savesessionxml(session, filename) is
|
||||
the main public interface here.
|
||||
'''
|
||||
import os, pwd
|
||||
from xml.dom.minidom import parse, Document, Node
|
||||
from core import pycore
|
||||
from core.api import coreapi
|
||||
|
||||
def addelementsfromlist(dom, parent, iterable, name, attr_name):
|
||||
''' XML helper to iterate through a list and add items to parent using tags
|
||||
of the given name and the item value as an attribute named attr_name.
|
||||
Example: addelementsfromlist(dom, parent, ('a','b','c'), "letter", "value")
|
||||
<parent>
|
||||
<letter value="a"/>
|
||||
<letter value="b"/>
|
||||
<letter value="c"/>
|
||||
</parent>
|
||||
'''
|
||||
for item in iterable:
|
||||
element = dom.createElement(name)
|
||||
element.setAttribute(attr_name, item)
|
||||
parent.appendChild(element)
|
||||
|
||||
def addtextelementsfromlist(dom, parent, iterable, name, attrs):
|
||||
''' XML helper to iterate through a list and add items to parent using tags
|
||||
of the given name, attributes specified in the attrs tuple, and having the
|
||||
text of the item within the tags.
|
||||
'''
|
||||
for item in iterable:
|
||||
element = dom.createElement(name)
|
||||
for k,v in attrs:
|
||||
element.setAttribute(k, v)
|
||||
parent.appendChild(element)
|
||||
txt = dom.createTextNode(item)
|
||||
element.appendChild(txt)
|
||||
|
||||
def gettextelementstolist(parent):
|
||||
''' XML helper to parse child text nodes from the given parent and return
|
||||
a list of (key, value) tuples.
|
||||
'''
|
||||
r = []
|
||||
for n in parent.childNodes:
|
||||
if n.nodeType != Node.ELEMENT_NODE:
|
||||
continue
|
||||
k = str(n.nodeName)
|
||||
v = '' # sometimes want None here?
|
||||
for c in n.childNodes:
|
||||
if c.nodeType != Node.TEXT_NODE:
|
||||
continue
|
||||
v = str(c.nodeValue)
|
||||
break
|
||||
r.append((k,v))
|
||||
return r
|
||||
|
||||
def addparamtoparent(dom, parent, name, value):
|
||||
''' XML helper to add a <param name="name" value="value"/> tag to the parent
|
||||
element, when value is not None.
|
||||
'''
|
||||
if value is None:
|
||||
return None
|
||||
p = dom.createElement("param")
|
||||
parent.appendChild(p)
|
||||
p.setAttribute("name", name)
|
||||
p.setAttribute("value", "%s" % value)
|
||||
return p
|
||||
|
||||
def addtextparamtoparent(dom, parent, name, value):
|
||||
''' XML helper to add a <param name="name">value</param> tag to the parent
|
||||
element, when value is not None.
|
||||
'''
|
||||
if value is None:
|
||||
return None
|
||||
p = dom.createElement("param")
|
||||
parent.appendChild(p)
|
||||
p.setAttribute("name", name)
|
||||
txt = dom.createTextNode(value)
|
||||
p.appendChild(txt)
|
||||
return p
|
||||
|
||||
def getoneelement(dom, name):
|
||||
e = dom.getElementsByTagName(name)
|
||||
if len(e) == 0:
|
||||
return None
|
||||
return e[0]
|
||||
|
||||
def gettextchild(dom):
|
||||
# this could be improved to skip XML comments
|
||||
child = dom.firstChild
|
||||
if child is not None and child.nodeType == Node.TEXT_NODE:
|
||||
return str(child.nodeValue)
|
||||
return None
|
||||
|
||||
def getparamssetattrs(dom, param_names, target):
|
||||
''' XML helper to get <param name="name" value="value"/> tags and set
|
||||
the attribute in the target object. String type is used. Target object
|
||||
attribute is unchanged if the XML attribute is not present.
|
||||
'''
|
||||
params = dom.getElementsByTagName("param")
|
||||
for param in params:
|
||||
param_name = param.getAttribute("name")
|
||||
value = param.getAttribute("value")
|
||||
if value is None:
|
||||
continue # never reached?
|
||||
if param_name in param_names:
|
||||
setattr(target, param_name, str(value))
|
||||
|
||||
def xmltypetonodeclass(session, type):
|
||||
''' Helper to convert from a type string to a class name in pycore.nodes.*.
|
||||
'''
|
||||
if hasattr(pycore.nodes, type):
|
||||
return eval("pycore.nodes.%s" % type)
|
||||
else:
|
||||
return None
|
||||
|
||||
class CoreDocumentParser(object):
|
||||
def __init__(self, session, filename):
|
||||
self.session = session
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.filename = filename
|
||||
self.dom = parse(filename)
|
||||
|
||||
#self.scenario = getoneelement(self.dom, "Scenario")
|
||||
self.np = getoneelement(self.dom, "NetworkPlan")
|
||||
if self.np is None:
|
||||
raise ValueError, "missing NetworkPlan!"
|
||||
self.mp = getoneelement(self.dom, "MotionPlan")
|
||||
self.sp = getoneelement(self.dom, "ServicePlan")
|
||||
self.meta = getoneelement(self.dom, "CoreMetaData")
|
||||
|
||||
self.coords = self.getmotiondict(self.mp)
|
||||
# link parameters parsed in parsenets(), applied in parsenodes()
|
||||
self.linkparams = {}
|
||||
|
||||
self.parsenets()
|
||||
self.parsenodes()
|
||||
self.parseservices()
|
||||
self.parsemeta()
|
||||
|
||||
|
||||
def warn(self, msg):
|
||||
if self.session:
|
||||
warnstr = "XML parsing '%s':" % (self.filename)
|
||||
self.session.warn("%s %s" % (warnstr, msg))
|
||||
|
||||
def getmotiondict(self, mp):
|
||||
''' Parse a MotionPlan into a dict with node names for keys and coordinates
|
||||
for values.
|
||||
'''
|
||||
if mp is None:
|
||||
return {}
|
||||
coords = {}
|
||||
for node in mp.getElementsByTagName("Node"):
|
||||
nodename = str(node.getAttribute("name"))
|
||||
if nodename == '':
|
||||
continue
|
||||
for m in node.getElementsByTagName("motion"):
|
||||
if m.getAttribute("type") != "stationary":
|
||||
continue
|
||||
point = m.getElementsByTagName("point")
|
||||
if len(point) == 0:
|
||||
continue
|
||||
txt = point[0].firstChild
|
||||
if txt is None:
|
||||
continue
|
||||
xyz = map(int, txt.nodeValue.split(','))
|
||||
z = None
|
||||
x, y = xyz[0:2]
|
||||
if (len(xyz) == 3):
|
||||
z = xyz[2]
|
||||
coords[nodename] = (x, y, z)
|
||||
return coords
|
||||
|
||||
@staticmethod
|
||||
def getcommonattributes(obj):
|
||||
''' Helper to return tuple of attributes common to nodes and nets.
|
||||
'''
|
||||
id = int(obj.getAttribute("id"))
|
||||
name = str(obj.getAttribute("name"))
|
||||
type = str(obj.getAttribute("type"))
|
||||
return(id, name, type)
|
||||
|
||||
def parsenets(self):
|
||||
linkednets = []
|
||||
for net in self.np.getElementsByTagName("NetworkDefinition"):
|
||||
id, name, type = self.getcommonattributes(net)
|
||||
nodecls = xmltypetonodeclass(self.session, type)
|
||||
if not nodecls:
|
||||
self.warn("skipping unknown network node '%s' type '%s'" % \
|
||||
(name, type))
|
||||
continue
|
||||
n = self.session.addobj(cls = nodecls, objid = id, name = name,
|
||||
start = False)
|
||||
if name in self.coords:
|
||||
x, y, z = self.coords[name]
|
||||
n.setposition(x, y, z)
|
||||
getparamssetattrs(net, ("icon", "canvas", "opaque"), n)
|
||||
if hasattr(n, "canvas") and n.canvas is not None:
|
||||
n.canvas = int(n.canvas)
|
||||
# links between two nets (e.g. switch-switch)
|
||||
for ifc in net.getElementsByTagName("interface"):
|
||||
netid = str(ifc.getAttribute("net"))
|
||||
linkednets.append((n, netid))
|
||||
self.parsemodels(net, n)
|
||||
# link networks together now that they all have been parsed
|
||||
for (n, netid) in linkednets:
|
||||
try:
|
||||
n2 = n.session.objbyname(netid)
|
||||
except KeyError:
|
||||
n.warn("skipping net %s interface: unknown net %s" % \
|
||||
(n.name, netid))
|
||||
continue
|
||||
n.linknet(n2)
|
||||
|
||||
def parsenodes(self):
|
||||
for node in self.np.getElementsByTagName("Node"):
|
||||
id, name, type = self.getcommonattributes(node)
|
||||
if type == "rj45":
|
||||
nodecls = pycore.nodes.RJ45Node
|
||||
else:
|
||||
nodecls = pycore.nodes.CoreNode
|
||||
n = self.session.addobj(cls = nodecls, objid = id, name = name,
|
||||
start = False)
|
||||
if name in self.coords:
|
||||
x, y, z = self.coords[name]
|
||||
n.setposition(x, y, z)
|
||||
n.type = type
|
||||
getparamssetattrs(node, ("icon", "canvas", "opaque"), n)
|
||||
if hasattr(n, "canvas") and n.canvas is not None:
|
||||
n.canvas = int(n.canvas)
|
||||
for ifc in node.getElementsByTagName("interface"):
|
||||
self.parseinterface(n, ifc)
|
||||
|
||||
def parseinterface(self, n, ifc):
|
||||
''' Parse a interface block such as:
|
||||
<interface name="eth0" net="37278">
|
||||
<address type="mac">00:00:00:aa:00:01</address>
|
||||
<address>10.0.0.2/24</address>
|
||||
<address>2001::2/64</address>
|
||||
</interface>
|
||||
'''
|
||||
name = str(ifc.getAttribute("name"))
|
||||
netid = str(ifc.getAttribute("net"))
|
||||
hwaddr = None
|
||||
addrlist = []
|
||||
try:
|
||||
net = n.session.objbyname(netid)
|
||||
except KeyError:
|
||||
n.warn("skipping node %s interface %s: unknown net %s" % \
|
||||
(n.name, name, netid))
|
||||
return
|
||||
for addr in ifc.getElementsByTagName("address"):
|
||||
addrstr = gettextchild(addr)
|
||||
if addrstr is None:
|
||||
continue
|
||||
if addr.getAttribute("type") == "mac":
|
||||
hwaddr = addrstr
|
||||
else:
|
||||
addrlist.append(addrstr)
|
||||
i = n.newnetif(net, addrlist = addrlist, hwaddr = hwaddr,
|
||||
ifindex = None, ifname = name)
|
||||
for model in ifc.getElementsByTagName("model"):
|
||||
self.parsemodel(model, n, n.objid)
|
||||
key = (n.name, name)
|
||||
if key in self.linkparams:
|
||||
netif = n.netif(i)
|
||||
for (k, v) in self.linkparams[key]:
|
||||
netif.setparam(k, v)
|
||||
|
||||
def parsemodels(self, dom, obj):
|
||||
''' Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
config dict.
|
||||
'''
|
||||
nodenum = int(dom.getAttribute("id"))
|
||||
for model in dom.getElementsByTagName("model"):
|
||||
self.parsemodel(model, obj, nodenum)
|
||||
|
||||
def parsemodel(self, model, obj, nodenum):
|
||||
''' Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
config dict.
|
||||
'''
|
||||
name = model.getAttribute("name")
|
||||
if name == '':
|
||||
return
|
||||
type = model.getAttribute("type")
|
||||
# convert child text nodes into key=value pairs
|
||||
kvs = gettextelementstolist(model)
|
||||
|
||||
mgr = self.session.mobility
|
||||
# TODO: the session.confobj() mechanism could be more generic;
|
||||
# it only allows registering Conf Message callbacks, but here
|
||||
# we want access to the ConfigurableManager, not the callback
|
||||
if name[:5] == "emane":
|
||||
mgr = self.session.emane
|
||||
elif name[:5] == "netem":
|
||||
mgr = None
|
||||
self.parsenetem(model, obj, kvs)
|
||||
|
||||
elif name[:3] == "xen":
|
||||
mgr = self.session.xen
|
||||
# TODO: assign other config managers here
|
||||
if mgr:
|
||||
mgr.setconfig_keyvalues(nodenum, name, kvs)
|
||||
|
||||
def parsenetem(self, model, obj, kvs):
|
||||
''' Determine interface and invoke setparam() using the parsed
|
||||
(key, value) pairs.
|
||||
'''
|
||||
ifname = model.getAttribute("netif")
|
||||
peer = model.getAttribute("peer")
|
||||
key = (peer, ifname)
|
||||
# nodes and interfaces do not exist yet, at this point of the parsing,
|
||||
# save (key, value) pairs for later
|
||||
try:
|
||||
#kvs = map(lambda(k, v): (int(v)), kvs)
|
||||
kvs = map(self.numericvalue, kvs)
|
||||
except ValueError:
|
||||
self.warn("error parsing link parameters for '%s' on '%s'" % \
|
||||
(ifname, peer))
|
||||
self.linkparams[key] = kvs
|
||||
|
||||
@staticmethod
|
||||
def numericvalue(keyvalue):
|
||||
(key, value) = keyvalue
|
||||
if '.' in str(value):
|
||||
value = float(value)
|
||||
else:
|
||||
value = int(value)
|
||||
return (key, value)
|
||||
|
||||
def parseservices(self):
|
||||
''' After node objects exist, parse service customizations and add them
|
||||
to the nodes.
|
||||
'''
|
||||
svclists = {}
|
||||
# parse services and store configs into session.services.configs
|
||||
for node in self.sp.getElementsByTagName("Node"):
|
||||
name = node.getAttribute("name")
|
||||
n = self.session.objbyname(name)
|
||||
if n is None:
|
||||
self.warn("skipping service config for unknown node '%s'" % \
|
||||
name)
|
||||
continue
|
||||
for service in node.getElementsByTagName("Service"):
|
||||
svcname = service.getAttribute("name")
|
||||
if self.parseservice(service, n):
|
||||
if n.objid in svclists:
|
||||
svclists[n.objid] += "|" + svcname
|
||||
else:
|
||||
svclists[n.objid] = svcname
|
||||
# associate nodes with services
|
||||
for objid in sorted(svclists.keys()):
|
||||
n = self.session.obj(objid)
|
||||
self.session.services.addservicestonode(node=n, nodetype=n.type,
|
||||
services_str=svclists[objid],
|
||||
verbose=self.verbose)
|
||||
|
||||
def parseservice(self, service, n):
|
||||
''' Use session.services manager to store service customizations before
|
||||
they are added to a node.
|
||||
'''
|
||||
name = service.getAttribute("name")
|
||||
svc = self.session.services.getservicebyname(name)
|
||||
if svc is None:
|
||||
return False
|
||||
values = []
|
||||
startup_idx = service.getAttribute("startup_idx")
|
||||
if startup_idx is not None:
|
||||
values.append("startidx=%s" % startup_idx)
|
||||
startup_time = service.getAttribute("start_time")
|
||||
if startup_time is not None:
|
||||
values.append("starttime=%s" % startup_time)
|
||||
dirs = []
|
||||
for dir in service.getElementsByTagName("Directory"):
|
||||
dirname = dir.getAttribute("name")
|
||||
dirs.append(dirname)
|
||||
if len(dirs):
|
||||
values.append("dirs=%s" % dirs)
|
||||
|
||||
startup = []
|
||||
shutdown = []
|
||||
validate = []
|
||||
for cmd in service.getElementsByTagName("Command"):
|
||||
type = cmd.getAttribute("type")
|
||||
cmdstr = gettextchild(cmd)
|
||||
if cmdstr is None:
|
||||
continue
|
||||
if type == "start":
|
||||
startup.append(cmdstr)
|
||||
elif type == "stop":
|
||||
shutdown.append(cmdstr)
|
||||
elif type == "validate":
|
||||
validate.append(cmdstr)
|
||||
if len(startup):
|
||||
values.append("cmdup=%s" % startup)
|
||||
if len(shutdown):
|
||||
values.append("cmddown=%s" % shutdown)
|
||||
if len(validate):
|
||||
values.append("cmdval=%s" % validate)
|
||||
|
||||
files = []
|
||||
for file in service.getElementsByTagName("File"):
|
||||
filename = file.getAttribute("name")
|
||||
files.append(filename)
|
||||
data = gettextchild(file)
|
||||
typestr = "service:%s:%s" % (name, filename)
|
||||
self.session.services.setservicefile(nodenum=n.objid, type=typestr,
|
||||
filename=filename,
|
||||
srcname=None, data=data)
|
||||
if len(files):
|
||||
values.append("files=%s" % files)
|
||||
if not bool(service.getAttribute("custom")):
|
||||
return True
|
||||
self.session.services.setcustomservice(n.objid, svc, values)
|
||||
return True
|
||||
|
||||
def parsehooks(self, hooks):
|
||||
''' Parse hook scripts from XML into session._hooks.
|
||||
'''
|
||||
for hook in hooks.getElementsByTagName("Hook"):
|
||||
filename = hook.getAttribute("name")
|
||||
state = hook.getAttribute("state")
|
||||
data = gettextchild(hook)
|
||||
if data is None:
|
||||
data = "" # allow for empty file
|
||||
type = "hook:%s" % state
|
||||
self.session.sethook(type, filename=filename,
|
||||
srcname=None, data=data)
|
||||
|
||||
def parsemeta(self):
|
||||
opt = getoneelement(self.meta, "SessionOptions")
|
||||
if opt:
|
||||
for param in opt.getElementsByTagName("param"):
|
||||
k = str(param.getAttribute("name"))
|
||||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = gettextchild(param) # allow attribute/text for newlines
|
||||
setattr(self.session.options, k, v)
|
||||
hooks = getoneelement(self.meta, "Hooks")
|
||||
if hooks:
|
||||
self.parsehooks(hooks)
|
||||
meta = getoneelement(self.meta, "MetaData")
|
||||
if meta:
|
||||
for param in meta.getElementsByTagName("param"):
|
||||
k = str(param.getAttribute("name"))
|
||||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = gettextchild(param)
|
||||
self.session.metadata.additem(k, v)
|
||||
|
||||
|
||||
class CoreDocumentWriter(Document):
|
||||
''' Utility class for writing a CoreSession to XML. The init method builds
|
||||
an xml.dom.minidom.Document, and the writexml() method saves the XML file.
|
||||
'''
|
||||
def __init__(self, session):
|
||||
''' Create an empty Scenario XML Document, then populate it with
|
||||
objects from the given session.
|
||||
'''
|
||||
Document.__init__(self)
|
||||
self.session = session
|
||||
self.scenario = self.createElement("Scenario")
|
||||
self.np = self.createElement("NetworkPlan")
|
||||
self.mp = self.createElement("MotionPlan")
|
||||
self.sp = self.createElement("ServicePlan")
|
||||
self.meta = self.createElement("CoreMetaData")
|
||||
|
||||
self.appendChild(self.scenario)
|
||||
self.scenario.appendChild(self.np)
|
||||
self.scenario.appendChild(self.mp)
|
||||
self.scenario.appendChild(self.sp)
|
||||
self.scenario.appendChild(self.meta)
|
||||
|
||||
self.populatefromsession()
|
||||
|
||||
def populatefromsession(self):
|
||||
self.session.emane.setup() # not during runtime?
|
||||
self.addnets()
|
||||
self.addnodes()
|
||||
self.addmetadata()
|
||||
|
||||
def writexml(self, filename):
|
||||
self.session.info("saving session XML file %s" % filename)
|
||||
f = open(filename, "w")
|
||||
Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \
|
||||
encoding="UTF-8")
|
||||
f.close()
|
||||
if self.session.user is not None:
|
||||
uid = pwd.getpwnam(self.session.user).pw_uid
|
||||
gid = os.stat(self.session.sessiondir).st_gid
|
||||
os.chown(filename, uid, gid)
|
||||
|
||||
def addnets(self):
|
||||
''' Add PyCoreNet objects as NetworkDefinition XML elements.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for net in self.session.objs():
|
||||
if not isinstance(net, pycore.nodes.PyCoreNet):
|
||||
continue
|
||||
self.addnet(net)
|
||||
|
||||
def addnet(self, net):
|
||||
''' Add one PyCoreNet object as a NetworkDefinition XML element.
|
||||
'''
|
||||
n = self.createElement("NetworkDefinition")
|
||||
self.np.appendChild(n)
|
||||
n.setAttribute("name", net.name)
|
||||
# could use net.brname
|
||||
n.setAttribute("id", "%s" % net.objid)
|
||||
n.setAttribute("type", "%s" % net.__class__.__name__)
|
||||
self.addnetinterfaces(n, net)
|
||||
# key used with tunnel node
|
||||
if hasattr(net, 'grekey') and net.grekey is not None:
|
||||
n.setAttribute("key", "%s" % net.grekey)
|
||||
# link parameters
|
||||
for netif in net.netifs(sort=True):
|
||||
self.addnetem(n, netif)
|
||||
# wireless/mobility models
|
||||
modelconfigs = net.session.mobility.getmodels(net)
|
||||
modelconfigs += net.session.emane.getmodels(net)
|
||||
self.addmodels(n, modelconfigs)
|
||||
self.addposition(net)
|
||||
|
||||
def addnetem(self, n, netif):
|
||||
''' Similar to addmodels(); used for writing netem link effects
|
||||
parameters. TODO: Interface parameters should be moved to the model
|
||||
construct, then this separate method shouldn't be required.
|
||||
'''
|
||||
if not hasattr(netif, "node") or netif.node is None:
|
||||
return
|
||||
params = netif.getparams()
|
||||
if len(params) == 0:
|
||||
return
|
||||
model = self.createElement("model")
|
||||
model.setAttribute("name", "netem")
|
||||
model.setAttribute("netif", netif.name)
|
||||
model.setAttribute("peer", netif.node.name)
|
||||
has_params = False
|
||||
for k, v in params:
|
||||
# default netem parameters are 0 or None
|
||||
if v is None or v == 0:
|
||||
continue
|
||||
if k == "has_netem" or k == "has_tbf":
|
||||
continue
|
||||
key = self.createElement(k)
|
||||
key.appendChild(self.createTextNode("%s" % v))
|
||||
model.appendChild(key)
|
||||
has_params = True
|
||||
if has_params:
|
||||
n.appendChild(model)
|
||||
|
||||
def addmodels(self, n, configs):
|
||||
''' Add models from a list of model-class, config values tuples.
|
||||
'''
|
||||
for (m, conf) in configs:
|
||||
model = self.createElement("model")
|
||||
n.appendChild(model)
|
||||
model.setAttribute("name", m._name)
|
||||
type = "wireless"
|
||||
if m._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
type = "mobility"
|
||||
model.setAttribute("type", type)
|
||||
for i, k in enumerate(m.getnames()):
|
||||
key = self.createElement(k)
|
||||
value = conf[i]
|
||||
if value is None:
|
||||
value = ""
|
||||
key.appendChild(self.createTextNode("%s" % value))
|
||||
model.appendChild(key)
|
||||
|
||||
def addnodes(self):
|
||||
''' Add PyCoreNode objects as node XML elements.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for node in self.session.objs():
|
||||
if not isinstance(node, pycore.nodes.PyCoreNode):
|
||||
continue
|
||||
self.addnode(node)
|
||||
|
||||
def addnode(self, node):
|
||||
''' Add a PyCoreNode object as node XML elements.
|
||||
'''
|
||||
n = self.createElement("Node")
|
||||
self.np.appendChild(n)
|
||||
n.setAttribute("name", node.name)
|
||||
n.setAttribute("id", "%s" % node.nodeid())
|
||||
if node.type:
|
||||
n.setAttribute("type", node.type)
|
||||
self.addinterfaces(n, node)
|
||||
self.addposition(node)
|
||||
addparamtoparent(self, n, "icon", node.icon)
|
||||
addparamtoparent(self, n, "canvas", node.canvas)
|
||||
self.addservices(node)
|
||||
|
||||
def addinterfaces(self, n, node):
|
||||
''' Add PyCoreNetIfs to node XML elements.
|
||||
'''
|
||||
for ifc in node.netifs(sort=True):
|
||||
i = self.createElement("interface")
|
||||
n.appendChild(i)
|
||||
i.setAttribute("name", ifc.name)
|
||||
netmodel = None
|
||||
if ifc.net:
|
||||
i.setAttribute("net", ifc.net.name)
|
||||
if hasattr(ifc.net, "model"):
|
||||
netmodel = ifc.net.model
|
||||
if ifc.mtu and ifc.mtu != 1500:
|
||||
i.setAttribute("mtu", "%s" % ifc.mtu)
|
||||
# could use ifc.params, transport_type
|
||||
self.addaddresses(i, ifc)
|
||||
# per-interface models
|
||||
if netmodel and netmodel._name[:6] == "emane_":
|
||||
cfg = self.session.emane.getifcconfig(node.objid, netmodel._name,
|
||||
None, ifc)
|
||||
if cfg:
|
||||
self.addmodels(i, ((netmodel, cfg),) )
|
||||
|
||||
|
||||
def addnetinterfaces(self, n, net):
|
||||
''' Similar to addinterfaces(), but only adds interface elements to the
|
||||
supplied XML node that would not otherwise appear in the Node elements.
|
||||
These are any interfaces that link two switches/hubs together.
|
||||
'''
|
||||
for ifc in net.netifs(sort=True):
|
||||
if not hasattr(ifc, "othernet") or not ifc.othernet:
|
||||
continue
|
||||
if net.objid == ifc.net.objid:
|
||||
continue
|
||||
i = self.createElement("interface")
|
||||
n.appendChild(i)
|
||||
i.setAttribute("name", ifc.name)
|
||||
if ifc.net:
|
||||
i.setAttribute("net", ifc.net.name)
|
||||
|
||||
def addposition(self, node):
|
||||
''' Add object coordinates as location XML element.
|
||||
'''
|
||||
(x,y,z) = node.position.get()
|
||||
if x is None or y is None:
|
||||
return
|
||||
# <Node name="n1">
|
||||
mpn = self.createElement("Node")
|
||||
mpn.setAttribute("name", node.name)
|
||||
self.mp.appendChild(mpn)
|
||||
|
||||
# <motion type="stationary">
|
||||
motion = self.createElement("motion")
|
||||
motion.setAttribute("type", "stationary")
|
||||
mpn.appendChild(motion)
|
||||
|
||||
# <point>$X$,$Y$,$Z$</point>
|
||||
pt = self.createElement("point")
|
||||
motion.appendChild(pt)
|
||||
coordstxt = "%s,%s" % (x,y)
|
||||
if z:
|
||||
coordstxt += ",%s" % z
|
||||
coords = self.createTextNode(coordstxt)
|
||||
pt.appendChild(coords)
|
||||
|
||||
def addservices(self, node):
|
||||
''' Add services and their customizations to the ServicePlan.
|
||||
'''
|
||||
if len(node.services) == 0:
|
||||
return
|
||||
defaults = self.session.services.getdefaultservices(node.type)
|
||||
if node.services == defaults:
|
||||
return
|
||||
spn = self.createElement("Node")
|
||||
spn.setAttribute("name", node.name)
|
||||
self.sp.appendChild(spn)
|
||||
|
||||
for svc in node.services:
|
||||
s = self.createElement("Service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("startup_idx", str(svc._startindex))
|
||||
if svc._starttime != "":
|
||||
s.setAttribute("start_time", str(svc._starttime))
|
||||
# only record service names if not a customized service
|
||||
if not svc._custom:
|
||||
continue
|
||||
s.setAttribute("custom", str(svc._custom))
|
||||
addelementsfromlist(self, s, svc._dirs, "Directory", "name")
|
||||
|
||||
for fn in svc._configs:
|
||||
if len(fn) == 0:
|
||||
continue
|
||||
f = self.createElement("File")
|
||||
f.setAttribute("name", fn)
|
||||
# all file names are added to determine when a file has been deleted
|
||||
s.appendChild(f)
|
||||
data = self.session.services.getservicefiledata(svc, fn)
|
||||
if data is None:
|
||||
# this includes only customized file contents and skips
|
||||
# the auto-generated files
|
||||
continue
|
||||
txt = self.createTextNode(data)
|
||||
f.appendChild(txt)
|
||||
|
||||
addtextelementsfromlist(self, s, svc._startup, "Command",
|
||||
(("type","start"),))
|
||||
addtextelementsfromlist(self, s, svc._shutdown, "Command",
|
||||
(("type","stop"),))
|
||||
addtextelementsfromlist(self, s, svc._validate, "Command",
|
||||
(("type","validate"),))
|
||||
|
||||
def addaddresses(self, i, netif):
|
||||
''' Add MAC and IP addresses to interface XML elements.
|
||||
'''
|
||||
if netif.hwaddr:
|
||||
h = self.createElement("address")
|
||||
i.appendChild(h)
|
||||
h.setAttribute("type", "mac")
|
||||
htxt = self.createTextNode("%s" % netif.hwaddr)
|
||||
h.appendChild(htxt)
|
||||
for addr in netif.addrlist:
|
||||
a = self.createElement("address")
|
||||
i.appendChild(a)
|
||||
# a.setAttribute("type", )
|
||||
atxt = self.createTextNode("%s" % addr)
|
||||
a.appendChild(atxt)
|
||||
|
||||
def addhooks(self):
|
||||
''' Add hook script XML elements to the metadata tag.
|
||||
'''
|
||||
hooks = self.createElement("Hooks")
|
||||
for state in sorted(self.session._hooks.keys()):
|
||||
for (filename, data) in self.session._hooks[state]:
|
||||
hook = self.createElement("Hook")
|
||||
hook.setAttribute("name", filename)
|
||||
hook.setAttribute("state", str(state))
|
||||
txt = self.createTextNode(data)
|
||||
hook.appendChild(txt)
|
||||
hooks.appendChild(hook)
|
||||
if hooks.hasChildNodes():
|
||||
self.meta.appendChild(hooks)
|
||||
|
||||
def addmetadata(self):
|
||||
''' Add CORE-specific session meta-data XML elements.
|
||||
'''
|
||||
# options
|
||||
options = self.createElement("SessionOptions")
|
||||
defaults = self.session.options.getdefaultvalues()
|
||||
for i, (k, v) in enumerate(self.session.options.getkeyvaluelist()):
|
||||
if str(v) != str(defaults[i]):
|
||||
addtextparamtoparent(self, options, k, v)
|
||||
#addparamtoparent(self, options, k, v)
|
||||
if options.hasChildNodes():
|
||||
self.meta.appendChild(options)
|
||||
# hook scripts
|
||||
self.addhooks()
|
||||
# meta
|
||||
meta = self.createElement("MetaData")
|
||||
self.meta.appendChild(meta)
|
||||
for (k, v) in self.session.metadata.items():
|
||||
addtextparamtoparent(self, meta, k, v)
|
||||
#addparamtoparent(self, meta, k, v)
|
||||
|
||||
def opensessionxml(session, filename):
|
||||
''' Import a session from the EmulationScript XML format.
|
||||
'''
|
||||
doc = CoreDocumentParser(session, filename)
|
||||
|
||||
def savesessionxml(session, filename):
|
||||
''' Export a session to the EmulationScript XML format.
|
||||
'''
|
||||
doc = CoreDocumentWriter(session)
|
||||
doc.writexml(filename)
|
||||
|
929
daemon/core/mobility.py
Normal file
929
daemon/core/mobility.py
Normal file
|
@ -0,0 +1,929 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
mobility.py: mobility helpers for moving nodes and calculating wireless range.
|
||||
'''
|
||||
import sys, os, time, string, math, threading
|
||||
import heapq
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.misc.utils import check_call
|
||||
from core.misc.ipaddr import IPAddr
|
||||
|
||||
class MobilityManager(ConfigurableManager):
|
||||
''' Member of session class for handling configuration data for mobility and
|
||||
range models.
|
||||
'''
|
||||
_name = "MobilityManager"
|
||||
_type = coreapi.CORE_TLV_REG_WIRELESS
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
# configurations for basic range, indexed by WLAN node number, are
|
||||
# stored in self.configs
|
||||
# mapping from model names to their classes
|
||||
self._modelclsmap = {}
|
||||
# dummy node objects for tracking position of nodes on other servers
|
||||
self.phys = {}
|
||||
self.physnets = {}
|
||||
self.session.broker.handlers += (self.physnodehandlelink, )
|
||||
self.register()
|
||||
|
||||
def startup(self):
|
||||
''' Session is transitioning from instantiation to runtime state.
|
||||
Instantiate any mobility models that have been configured for a WLAN.
|
||||
'''
|
||||
for nodenum in self.configs:
|
||||
v = self.configs[nodenum]
|
||||
try:
|
||||
n = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
self.session.warn("Skipping mobility configuration for unknown"
|
||||
"node %d." % nodenum)
|
||||
continue
|
||||
for model in v:
|
||||
try:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
except KeyError:
|
||||
self.session.warn("Skipping mobility configuration for "
|
||||
"unknown model '%s'" % model[0])
|
||||
continue
|
||||
n.setmodel(cls, model[1])
|
||||
if self.session.master:
|
||||
self.installphysnodes(n)
|
||||
if n.mobility:
|
||||
self.session.evq.add_event(0.0, n.mobility.startup)
|
||||
|
||||
|
||||
def reset(self):
|
||||
''' Reset all configs.
|
||||
'''
|
||||
self.clearconfig(nodenum=None)
|
||||
|
||||
def register(self):
|
||||
''' Register models as configurable object(s) with the Session object.
|
||||
'''
|
||||
models = [BasicRangeModel, Ns2ScriptedMobility]
|
||||
for m in models:
|
||||
self.session.addconfobj(m._name, m._type, m.configure_mob)
|
||||
self._modelclsmap[m._name] = m
|
||||
|
||||
def handleevent(self, msg):
|
||||
''' Handle an Event Message used to start, stop, or pause
|
||||
mobility scripts for a given WlanNode.
|
||||
'''
|
||||
eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE)
|
||||
name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME)
|
||||
try:
|
||||
node = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
self.session.warn("Ignoring event for model '%s', unknown node " \
|
||||
"'%s'" % (name, nodenum))
|
||||
return
|
||||
|
||||
# name is e.g. "mobility:ns2script"
|
||||
models = name[9:].split(',')
|
||||
for m in models:
|
||||
try:
|
||||
cls = self._modelclsmap[m]
|
||||
except KeyError:
|
||||
self.session.warn("Ignoring event for unknown model '%s'" % m)
|
||||
continue
|
||||
_name = "waypoint"
|
||||
if cls._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
model = node.mobility
|
||||
elif cls._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
model = node.mobility
|
||||
else:
|
||||
continue
|
||||
if model is None:
|
||||
self.session.warn("Ignoring event, %s has no model" % node.name)
|
||||
continue
|
||||
if cls._name != model._name:
|
||||
self.session.warn("Ignoring event for %s wrong model %s,%s" % \
|
||||
(node.name, cls._name, model._name))
|
||||
continue
|
||||
|
||||
if eventtype == coreapi.CORE_EVENT_STOP or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
model.stop(move_initial=True)
|
||||
if eventtype == coreapi.CORE_EVENT_START or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
model.start()
|
||||
if eventtype == coreapi.CORE_EVENT_PAUSE:
|
||||
model.pause()
|
||||
|
||||
def sendevent(self, model):
|
||||
''' Send an event message on behalf of a mobility model.
|
||||
This communicates the current and end (max) times to the GUI.
|
||||
'''
|
||||
if model.state == model.STATE_STOPPED:
|
||||
eventtype = coreapi.CORE_EVENT_STOP
|
||||
elif model.state == model.STATE_RUNNING:
|
||||
eventtype = coreapi.CORE_EVENT_START
|
||||
elif model.state == model.STATE_PAUSED:
|
||||
eventtype = coreapi.CORE_EVENT_PAUSE
|
||||
data = "start=%d" % int(model.lasttime - model.timezero)
|
||||
data += " end=%d" % int(model.endtime)
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NODE,
|
||||
model.objid)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
|
||||
eventtype)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NAME,
|
||||
"mobility:%s" % model._name)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_DATA,
|
||||
data)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TIME,
|
||||
"%s" % time.time())
|
||||
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
|
||||
try:
|
||||
self.session.broadcastraw(None, msg)
|
||||
except Exception, e:
|
||||
self.warn("Error sending Event Message: %s" % e)
|
||||
|
||||
def updatewlans(self, moved, moved_netifs):
|
||||
''' A mobility script has caused nodes in the 'moved' list to move.
|
||||
Update every WlanNode. This saves range calculations if the model
|
||||
were to recalculate for each individual node movement.
|
||||
'''
|
||||
for nodenum in self.configs:
|
||||
try:
|
||||
n = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
continue
|
||||
if n.model:
|
||||
n.model.update(moved, moved_netifs)
|
||||
|
||||
def addphys(self, netnum, node):
|
||||
''' Keep track of PhysicalNodes and which network they belong to.
|
||||
'''
|
||||
nodenum = node.objid
|
||||
self.phys[nodenum] = node
|
||||
if netnum not in self.physnets:
|
||||
self.physnets[netnum] = [nodenum,]
|
||||
else:
|
||||
self.physnets[netnum].append(nodenum)
|
||||
|
||||
def physnodehandlelink(self, msg):
|
||||
''' Broker handler. Snoop Link add messages to get
|
||||
node numbers of PhyiscalNodes and their nets.
|
||||
Physical nodes exist only on other servers, but a shadow object is
|
||||
created here for tracking node position.
|
||||
'''
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG and \
|
||||
msg.flags & coreapi.CORE_API_ADD_FLAG:
|
||||
nn = msg.nodenumbers()
|
||||
# first node is always link layer node in Link add message
|
||||
if nn[0] not in self.session.broker.nets:
|
||||
return
|
||||
if nn[1] in self.session.broker.phys:
|
||||
# record the fact that this PhysicalNode is linked to a net
|
||||
dummy = PyCoreNode(session=self.session, objid=nn[1],
|
||||
name="n%d" % nn[1], start=False)
|
||||
self.addphys(nn[0], dummy)
|
||||
|
||||
def physnodeupdateposition(self, msg):
|
||||
''' Snoop node messages belonging to physical nodes. The dummy object
|
||||
in self.phys[] records the node position.
|
||||
'''
|
||||
nodenum = msg.nodenumbers()[0]
|
||||
try:
|
||||
dummy = self.phys[nodenum]
|
||||
nodexpos = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS)
|
||||
nodeypos = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS)
|
||||
dummy.setposition(nodexpos, nodeypos, None)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def installphysnodes(self, net):
|
||||
''' After installing a mobility model on a net, include any physical
|
||||
nodes that we have recorded. Use the GreTap tunnel to the physical node
|
||||
as the node's interface.
|
||||
'''
|
||||
try:
|
||||
nodenums = self.physnets[net.objid]
|
||||
except KeyError:
|
||||
return
|
||||
for nodenum in nodenums:
|
||||
node = self.phys[nodenum]
|
||||
servers = self.session.broker.getserversbynode(nodenum)
|
||||
(host, port, sock) = self.session.broker.getserver(servers[0])
|
||||
netif = self.session.broker.gettunnel(net.objid, IPAddr.toint(host))
|
||||
node.addnetif(netif, 0)
|
||||
netif.node = node
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
|
||||
class WirelessModel(Configurable):
|
||||
''' Base class used by EMANE models and the basic range model.
|
||||
Used for managing arbitrary configuration parameters.
|
||||
'''
|
||||
_type = coreapi.CORE_TLV_REG_WIRELESS
|
||||
_bitmap = None
|
||||
_positioncallback = None
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values = None):
|
||||
Configurable.__init__(self, session, objid)
|
||||
self.verbose = verbose
|
||||
# 'values' can be retrieved from a ConfigurableManager, or used here
|
||||
# during initialization, depending on the model.
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' May be used if the model can populate the GUI with wireless (green)
|
||||
link lines.
|
||||
'''
|
||||
return []
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BasicRangeModel(WirelessModel):
|
||||
''' Basic Range wireless model, calculates range between nodes and links
|
||||
and unlinks nodes based on this distance. This was formerly done from
|
||||
the GUI.
|
||||
'''
|
||||
_name = "basic_range"
|
||||
|
||||
# configuration parameters are
|
||||
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = [
|
||||
("range", coreapi.CONF_DATA_TYPE_UINT32, '275',
|
||||
'', 'wireless range (pixels)'),
|
||||
("bandwidth", coreapi.CONF_DATA_TYPE_UINT32, '54000',
|
||||
'', 'bandwidth (bps)'),
|
||||
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'transmission jitter (usec)'),
|
||||
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '5000.0',
|
||||
'', 'transmission delay (usec)'),
|
||||
("error", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'error rate (%)'),
|
||||
]
|
||||
|
||||
# value groupings
|
||||
_confgroups = "Basic Range Parameters:1-%d" % len(_confmatrix)
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values=None):
|
||||
''' Range model is only instantiated during runtime.
|
||||
'''
|
||||
super(BasicRangeModel, self).__init__(session = session, objid = objid,
|
||||
verbose = verbose)
|
||||
self.wlan = session.obj(objid)
|
||||
self._netifs = {}
|
||||
self._netifslock = threading.Lock()
|
||||
if values is None:
|
||||
values = session.mobility.getconfig(objid, self._name,
|
||||
self.getdefaultvalues())[1]
|
||||
self.range = float(self.valueof("range", values))
|
||||
if self.verbose:
|
||||
self.session.info("Basic range model configured for WLAN %d using" \
|
||||
" range %d" % (objid, self.range))
|
||||
self.bw = int(self.valueof("bandwidth", values))
|
||||
if self.bw == 0.0:
|
||||
self.bw = None
|
||||
self.delay = float(self.valueof("delay", values))
|
||||
if self.delay == 0.0:
|
||||
self.delay = None
|
||||
self.loss = float(self.valueof("error", values))
|
||||
if self.loss == 0.0:
|
||||
self.loss = None
|
||||
self.jitter = float(self.valueof("jitter", values))
|
||||
if self.jitter == 0.0:
|
||||
self.jitter = None
|
||||
|
||||
@classmethod
|
||||
def configure_mob(cls, session, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Pass the MobilityManager object as the manager object.
|
||||
'''
|
||||
return cls.configure(session.mobility, msg)
|
||||
|
||||
def setlinkparams(self):
|
||||
''' Apply link parameters to all interfaces. This is invoked from
|
||||
WlanNode.setmodel() after the position callback has been set.
|
||||
'''
|
||||
with self._netifslock:
|
||||
for netif in self._netifs:
|
||||
self.wlan.linkconfig(netif, bw=self.bw, delay=self.delay,
|
||||
loss=self.loss, duplicate=None,
|
||||
jitter=self.jitter)
|
||||
|
||||
def get_position(self, netif):
|
||||
with self._netifslock:
|
||||
return self._netifs[netif]
|
||||
|
||||
def set_position(self, netif, x = None, y = None, z = None):
|
||||
''' A node has moved; given an interface, a new (x,y,z) position has
|
||||
been set; calculate the new distance between other nodes and link or
|
||||
unlink node pairs based on the configured range.
|
||||
'''
|
||||
#print "set_position(%s, x=%s, y=%s, z=%s)" % (netif.localname, x, y, z)
|
||||
self._netifslock.acquire()
|
||||
self._netifs[netif] = (x, y, z)
|
||||
if x is None or y is None:
|
||||
self._netifslock.release()
|
||||
return
|
||||
for netif2 in self._netifs:
|
||||
self.calclink(netif, netif2)
|
||||
self._netifslock.release()
|
||||
|
||||
_positioncallback = set_position
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
''' Node positions have changed without recalc. Update positions from
|
||||
node.position, then re-calculate links for those that have moved.
|
||||
Assumes bidirectional links, with one calculation per node pair, where
|
||||
one of the nodes has moved.
|
||||
'''
|
||||
with self._netifslock:
|
||||
while len(moved_netifs):
|
||||
netif = moved_netifs.pop()
|
||||
(nx, ny, nz) = netif.node.getposition()
|
||||
if netif in self._netifs:
|
||||
self._netifs[netif] = (nx, ny, nz)
|
||||
for netif2 in self._netifs:
|
||||
if netif2 in moved_netifs:
|
||||
continue
|
||||
self.calclink(netif, netif2)
|
||||
|
||||
def calclink(self, netif, netif2):
|
||||
''' Helper used by set_position() and update() to
|
||||
calculate distance between two interfaces and perform
|
||||
linking/unlinking. Sends link/unlink messages and updates the
|
||||
WlanNode's linked dict.
|
||||
'''
|
||||
if netif == netif2:
|
||||
return
|
||||
(x, y, z) = self._netifs[netif]
|
||||
(x2, y2, z2) = self._netifs[netif2]
|
||||
if x2 is None or y2 is None:
|
||||
return
|
||||
|
||||
d = self.calcdistance( (x,y,z), (x2,y2,z2) )
|
||||
# ordering is important, to keep the wlan._linked dict organized
|
||||
a = min(netif, netif2)
|
||||
b = max(netif, netif2)
|
||||
try:
|
||||
self.wlan._linked_lock.acquire()
|
||||
linked = self.wlan.linked(a, b)
|
||||
except KeyError:
|
||||
return
|
||||
finally:
|
||||
self.wlan._linked_lock.release()
|
||||
if d > self.range:
|
||||
if linked:
|
||||
self.wlan.unlink(a, b)
|
||||
self.sendlinkmsg(a, b, unlink=True)
|
||||
else:
|
||||
if not linked:
|
||||
self.wlan.link(a, b)
|
||||
self.sendlinkmsg(a, b)
|
||||
|
||||
|
||||
def calcdistance(self, p1, p2):
|
||||
''' Calculate the distance between two three-dimensional points.
|
||||
'''
|
||||
a = p1[0] - p2[0]
|
||||
b = p1[1] - p2[1]
|
||||
c = 0
|
||||
if p1[2] is not None and p2[2] is not None:
|
||||
c = p1[2] - p2[2]
|
||||
return math.hypot(math.hypot(a, b), c)
|
||||
|
||||
def linkmsg(self, netif, netif2, flags):
|
||||
''' Create a wireless link/unlink API message.
|
||||
'''
|
||||
n1 = netif.localname.split('.')[0]
|
||||
n2 = netif2.localname.split('.')[0]
|
||||
tlvdata = coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
netif.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
netif2.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_NETID,
|
||||
self.wlan.objid)
|
||||
#tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM,
|
||||
# netif.index)
|
||||
#tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM,
|
||||
# netif2.index)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
coreapi.CORE_LINK_WIRELESS)
|
||||
return coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
|
||||
def sendlinkmsg(self, netif, netif2, unlink=False):
|
||||
''' Send a wireless link/unlink API message to the GUI.
|
||||
'''
|
||||
if unlink:
|
||||
flags = coreapi.CORE_API_DEL_FLAG
|
||||
else:
|
||||
flags = coreapi.CORE_API_ADD_FLAG
|
||||
msg = self.linkmsg(netif, netif2, flags)
|
||||
self.session.broadcastraw(src=None, data=msg)
|
||||
self.session.sdt.updatelink(netif.node.objid, netif2.node.objid, flags,
|
||||
wireless=True)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Return a list of wireless link messages for when the GUI reconnects.
|
||||
'''
|
||||
r = []
|
||||
with self.wlan._linked_lock:
|
||||
for a in self.wlan._linked:
|
||||
for b in self.wlan._linked[a]:
|
||||
if self.wlan._linked[a][b]:
|
||||
r.append(self.linkmsg(a, b, flags))
|
||||
return r
|
||||
|
||||
class WayPointMobility(WirelessModel):
|
||||
''' Abstract class for mobility models that set node waypoints.
|
||||
'''
|
||||
_name = "waypoint"
|
||||
_type = coreapi.CORE_TLV_REG_MOBILITY
|
||||
|
||||
STATE_STOPPED = 0
|
||||
STATE_RUNNING = 1
|
||||
STATE_PAUSED = 2
|
||||
|
||||
class WayPoint(object):
|
||||
def __init__(self, time, nodenum, coords, speed):
|
||||
self.time = time
|
||||
self.nodenum = nodenum
|
||||
self.coords = coords
|
||||
self.speed = speed
|
||||
|
||||
def __cmp__(self, other):
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.nodenum, other.nodenum)
|
||||
return tmp
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values = None):
|
||||
super(WayPointMobility, self).__init__(session = session, objid = objid,
|
||||
verbose = verbose, values = values)
|
||||
self.state = self.STATE_STOPPED
|
||||
self.queue = []
|
||||
self.queue_copy = []
|
||||
self.points = {}
|
||||
self.initial = {}
|
||||
self.lasttime = None
|
||||
self.endtime = None
|
||||
self.wlan = session.obj(objid)
|
||||
# these are really set in child class via confmatrix
|
||||
self.loop = False
|
||||
self.refresh_ms = 50
|
||||
# flag whether to stop scheduling when queue is empty
|
||||
# (ns-3 sets this to False as new waypoints may be added from trace)
|
||||
self.empty_queue_stop = True
|
||||
|
||||
def runround(self):
|
||||
''' Advance script time and move nodes.
|
||||
'''
|
||||
if self.state != self.STATE_RUNNING:
|
||||
return
|
||||
t = self.lasttime
|
||||
self.lasttime = time.time()
|
||||
now = self.lasttime - self.timezero
|
||||
dt = self.lasttime - t
|
||||
#print "runround(now=%.2f, dt=%.2f)" % (now, dt)
|
||||
|
||||
# keep current waypoints up-to-date
|
||||
self.updatepoints(now)
|
||||
|
||||
if not len(self.points):
|
||||
if len(self.queue):
|
||||
# more future waypoints, allow time for self.lasttime update
|
||||
nexttime = self.queue[0].time - now
|
||||
if nexttime > (0.001 * self.refresh_ms):
|
||||
nexttime -= (0.001 * self.refresh_ms)
|
||||
self.session.evq.add_event(nexttime, self.runround)
|
||||
return
|
||||
else:
|
||||
# no more waypoints or queued items, loop?
|
||||
if not self.empty_queue_stop:
|
||||
# keep running every refresh_ms, even with empty queue
|
||||
self.session.evq.add_event(0.001 * self.refresh_ms, self.runround)
|
||||
return
|
||||
if not self.loopwaypoints():
|
||||
return self.stop(move_initial=False)
|
||||
if not len(self.queue):
|
||||
# prevent busy loop
|
||||
return
|
||||
return self.run()
|
||||
|
||||
# only move netifs attached to self.wlan, or all nodenum in script?
|
||||
moved = []
|
||||
moved_netifs = []
|
||||
for netif in self.wlan.netifs():
|
||||
node = netif.node
|
||||
if self.movenode(node, dt):
|
||||
moved.append(node)
|
||||
moved_netifs.append(netif)
|
||||
|
||||
# calculate all ranges after moving nodes; this saves calculations
|
||||
#self.wlan.model.update(moved)
|
||||
self.session.mobility.updatewlans(moved, moved_netifs)
|
||||
|
||||
# TODO: check session state
|
||||
self.session.evq.add_event(0.001 * self.refresh_ms, self.runround)
|
||||
|
||||
def run(self):
|
||||
self.timezero = time.time()
|
||||
self.lasttime = self.timezero - (0.001 * self.refresh_ms)
|
||||
self.movenodesinitial()
|
||||
self.runround()
|
||||
self.session.mobility.sendevent(self)
|
||||
|
||||
def movenode(self, node, dt):
|
||||
''' Calculate next node location and update its coordinates.
|
||||
Returns True if the node's position has changed.
|
||||
'''
|
||||
if node.objid not in self.points:
|
||||
return False
|
||||
x1, y1, z1 = node.getposition()
|
||||
x2, y2, z2 = self.points[node.objid].coords
|
||||
speed = self.points[node.objid].speed
|
||||
# instantaneous move (prevents dx/dy == 0.0 below)
|
||||
if speed == 0:
|
||||
self.setnodeposition(node, x2, y2, z2)
|
||||
del self.points[node.objid]
|
||||
return True
|
||||
# speed can be a velocity vector (ns3 mobility) or speed value
|
||||
if isinstance(speed, (float, int)):
|
||||
# linear speed value
|
||||
alpha = math.atan2(y2 - y1, x2 - x1)
|
||||
sx = speed * math.cos(alpha)
|
||||
sy = speed * math.sin(alpha)
|
||||
else:
|
||||
# velocity vector
|
||||
sx = speed[0]
|
||||
sy = speed[1]
|
||||
|
||||
# calculate dt * speed = distance moved
|
||||
dx = sx * dt
|
||||
dy = sy * dt
|
||||
# prevent overshoot
|
||||
if abs(dx) > abs(x2 - x1):
|
||||
dx = x2 - x1
|
||||
if abs(dy) > abs(y2 - y1):
|
||||
dy = y2 - y1
|
||||
if dx == 0.0 and dy == 0.0:
|
||||
if self.endtime < (self.lasttime - self.timezero):
|
||||
# the last node to reach the last waypoint determines this
|
||||
# script's endtime
|
||||
self.endtime = self.lasttime - self.timezero
|
||||
del self.points[node.objid]
|
||||
return False
|
||||
#print "node %s dx,dy= <%s, %d>" % (node.name, dx, dy)
|
||||
if (x1 + dx) < 0.0:
|
||||
dx = 0.0 - x1
|
||||
if (y1 + dy) < 0.0:
|
||||
dy = 0.0 - y1
|
||||
self.setnodeposition(node, x1 + dx, y1 + dy, z1)
|
||||
return True
|
||||
|
||||
def movenodesinitial(self):
|
||||
''' Move nodes to their initial positions. Then calculate the ranges.
|
||||
'''
|
||||
moved = []
|
||||
moved_netifs = []
|
||||
for netif in self.wlan.netifs():
|
||||
node = netif.node
|
||||
if node.objid not in self.initial:
|
||||
continue
|
||||
(x, y, z) = self.initial[node.objid].coords
|
||||
self.setnodeposition(node, x, y, z)
|
||||
moved.append(node)
|
||||
moved_netifs.append(netif)
|
||||
#self.wlan.model.update(moved)
|
||||
self.session.mobility.updatewlans(moved, moved_netifs)
|
||||
|
||||
def addwaypoint(self, time, nodenum, x, y, z, speed):
|
||||
''' Waypoints are pushed to a heapq, sorted by time.
|
||||
'''
|
||||
#print "addwaypoint: %s %s %s,%s,%s %s" % (time, nodenum, x, y, z, speed)
|
||||
wp = self.WayPoint(time, nodenum, coords=(x,y,z), speed=speed)
|
||||
heapq.heappush(self.queue, wp)
|
||||
|
||||
def addinitial(self, nodenum, x, y, z):
|
||||
''' Record initial position in a dict.
|
||||
'''
|
||||
wp = self.WayPoint(0, nodenum, coords=(x,y,z), speed=0)
|
||||
self.initial[nodenum] = wp
|
||||
|
||||
def updatepoints(self, now):
|
||||
''' Move items from self.queue to self.points when their time has come.
|
||||
'''
|
||||
while len(self.queue):
|
||||
if self.queue[0].time > now:
|
||||
break
|
||||
wp = heapq.heappop(self.queue)
|
||||
self.points[wp.nodenum] = wp
|
||||
|
||||
def copywaypoints(self):
|
||||
''' Store backup copy of waypoints for looping and stopping.
|
||||
'''
|
||||
self.queue_copy = list(self.queue)
|
||||
|
||||
def loopwaypoints(self):
|
||||
''' Restore backup copy of waypoints when looping.
|
||||
'''
|
||||
self.queue = list(self.queue_copy)
|
||||
return self.loop
|
||||
|
||||
def setnodeposition(self, node, x, y, z):
|
||||
''' Helper to move a node, notify any GUI (connected session handlers),
|
||||
without invoking the interface poshook callback that may perform
|
||||
range calculation.
|
||||
'''
|
||||
# this would cause PyCoreNetIf.poshook() callback (range calculation)
|
||||
#node.setposition(x, y, z)
|
||||
node.position.set(x, y, z)
|
||||
msg = node.tonodemsg(flags=0)
|
||||
self.session.broadcastraw(None, msg)
|
||||
self.session.sdt.updatenode(node, flags=0, x=x, y=y, z=z)
|
||||
|
||||
def setendtime(self):
|
||||
''' Set self.endtime to the time of the last waypoint in the queue of
|
||||
waypoints. This is just an estimate. The endtime will later be
|
||||
adjusted, after one round of the script has run, to be the time
|
||||
that the last moving node has reached its final waypoint.
|
||||
'''
|
||||
try:
|
||||
self.endtime = self.queue[-1].time
|
||||
except IndexError:
|
||||
self.endtime = 0
|
||||
|
||||
def start(self):
|
||||
''' Run the script from the beginning or unpause from where it
|
||||
was before.
|
||||
'''
|
||||
laststate = self.state
|
||||
self.state = self.STATE_RUNNING
|
||||
if laststate == self.STATE_STOPPED or laststate == self.STATE_RUNNING:
|
||||
self.loopwaypoints()
|
||||
self.timezero = 0
|
||||
self.lasttime = 0
|
||||
self.run()
|
||||
elif laststate == self.STATE_PAUSED:
|
||||
now = time.time()
|
||||
self.timezero += now - self.lasttime
|
||||
self.lasttime = now - (0.001 * self.refresh_ms)
|
||||
self.runround()
|
||||
|
||||
def stop(self, move_initial=True):
|
||||
''' Stop the script and move nodes to initial positions.
|
||||
'''
|
||||
self.state = self.STATE_STOPPED
|
||||
self.loopwaypoints()
|
||||
self.timezero = 0
|
||||
self.lasttime = 0
|
||||
if move_initial:
|
||||
self.movenodesinitial()
|
||||
self.session.mobility.sendevent(self)
|
||||
|
||||
def pause(self):
|
||||
''' Pause the script; pause time is stored to self.lasttime.
|
||||
'''
|
||||
self.state = self.STATE_PAUSED
|
||||
self.lasttime = time.time()
|
||||
|
||||
|
||||
class Ns2ScriptedMobility(WayPointMobility):
|
||||
''' Handles the ns-2 script format, generated by scengen/setdest or
|
||||
BonnMotion.
|
||||
'''
|
||||
_name = "ns2script"
|
||||
|
||||
_confmatrix = [
|
||||
("file", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'mobility script file'),
|
||||
("refresh_ms", coreapi.CONF_DATA_TYPE_UINT32, '50',
|
||||
'', 'refresh time (ms)'),
|
||||
("loop", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off', 'loop'),
|
||||
("autostart", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'auto-start seconds (0.0 for runtime)'),
|
||||
("map", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'node mapping (optional, e.g. 0:1,1:2,2:3)'),
|
||||
("script_start", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'script file to run upon start'),
|
||||
("script_pause", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'script file to run upon pause'),
|
||||
("script_stop", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'script file to run upon stop'),
|
||||
]
|
||||
_confgroups = "ns-2 Mobility Script Parameters:1-%d" % len(_confmatrix)
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values = None):
|
||||
'''
|
||||
'''
|
||||
super(Ns2ScriptedMobility, self).__init__(session = session, objid = objid,
|
||||
verbose = verbose, values = values)
|
||||
self._netifs = {}
|
||||
self._netifslock = threading.Lock()
|
||||
if values is None:
|
||||
values = session.mobility.getconfig(objid, self._name,
|
||||
self.getdefaultvalues())[1]
|
||||
self.file = self.valueof("file", values)
|
||||
self.refresh_ms = int(self.valueof("refresh_ms", values))
|
||||
self.loop = (self.valueof("loop", values).lower() == "on")
|
||||
self.autostart = self.valueof("autostart", values)
|
||||
self.parsemap(self.valueof("map", values))
|
||||
self.script_start = self.valueof("script_start", values)
|
||||
self.script_pause = self.valueof("script_pause", values)
|
||||
self.script_stop = self.valueof("script_stop", values)
|
||||
if self.verbose:
|
||||
self.session.info("ns-2 scripted mobility configured for WLAN %d" \
|
||||
" using file: %s" % (objid, self.file))
|
||||
self.readscriptfile()
|
||||
self.copywaypoints()
|
||||
self.setendtime()
|
||||
|
||||
@classmethod
|
||||
def configure_mob(cls, session, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Pass the MobilityManager object as the manager object.
|
||||
'''
|
||||
return cls.configure(session.mobility, msg)
|
||||
|
||||
def readscriptfile(self):
|
||||
''' Read in mobility script from a file. This adds waypoints to a
|
||||
priority queue, sorted by waypoint time. Initial waypoints are
|
||||
stored in a separate dict.
|
||||
'''
|
||||
filename = self.findfile(self.file)
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, e:
|
||||
self.session.warn("ns-2 scripted mobility failed to load file " \
|
||||
" '%s' (%s)" % (self.file, e))
|
||||
return
|
||||
if self.verbose:
|
||||
self.session.info("reading ns-2 script file: %s" % filename)
|
||||
ln = 0
|
||||
ix = iy = iz = None
|
||||
inodenum = None
|
||||
for line in f:
|
||||
ln += 1
|
||||
if line[:2] != '$n':
|
||||
continue
|
||||
try:
|
||||
if line[:8] == "$ns_ at ":
|
||||
if ix is not None and iy is not None:
|
||||
self.addinitial(self.map(inodenum), ix, iy, iz)
|
||||
ix = iy = iz = None
|
||||
# waypoints:
|
||||
# $ns_ at 1.00 "$node_(6) setdest 500.0 178.0 25.0"
|
||||
parts = line.split()
|
||||
time = float(parts[2])
|
||||
nodenum = parts[3][1+parts[3].index('('):parts[3].index(')')]
|
||||
x = float(parts[5])
|
||||
y = float(parts[6])
|
||||
z = None
|
||||
speed = float(parts[7].strip('"'))
|
||||
self.addwaypoint(time, self.map(nodenum), x, y, z, speed)
|
||||
elif line[:7] == "$node_(":
|
||||
# initial position (time=0, speed=0):
|
||||
# $node_(6) set X_ 780.0
|
||||
parts = line.split()
|
||||
time = 0.0
|
||||
nodenum = parts[0][1+parts[0].index('('):parts[0].index(')')]
|
||||
if parts[2] == 'X_':
|
||||
if ix is not None and iy is not None:
|
||||
self.addinitial(self.map(inodenum), ix, iy, iz)
|
||||
ix = iy = iz = None
|
||||
ix = float(parts[3])
|
||||
elif parts[2] == 'Y_':
|
||||
iy = float(parts[3])
|
||||
elif parts[2] == 'Z_':
|
||||
iz = float(parts[3])
|
||||
self.addinitial(self.map(nodenum), ix, iy, iz)
|
||||
ix = iy = iz = None
|
||||
inodenum = nodenum
|
||||
else:
|
||||
raise ValueError
|
||||
except ValueError, e:
|
||||
self.session.warn("skipping line %d of file %s '%s' (%s)" % \
|
||||
(ln, self.file, line, e))
|
||||
continue
|
||||
if ix is not None and iy is not None:
|
||||
self.addinitial(self.map(inodenum), ix, iy, iz)
|
||||
|
||||
def findfile(self, fn):
|
||||
''' Locate a script file. If the specified file doesn't exist, look in the
|
||||
same directory as the scenario file (session.filename), or in the default
|
||||
configs directory (~/.core/configs). This allows for sample files without
|
||||
absolute pathnames.
|
||||
'''
|
||||
if os.path.exists(fn):
|
||||
return fn
|
||||
if self.session.filename is not None:
|
||||
d = os.path.dirname(self.session.filename)
|
||||
sessfn = os.path.join(d, fn)
|
||||
if (os.path.exists(sessfn)):
|
||||
return sessfn
|
||||
if self.session.user is not None:
|
||||
userfn = os.path.join('/home', self.session.user, '.core', 'configs', fn)
|
||||
if (os.path.exists(userfn)):
|
||||
return userfn
|
||||
return fn
|
||||
|
||||
def parsemap(self, mapstr):
|
||||
''' Parse a node mapping string, given as a configuration parameter.
|
||||
'''
|
||||
self.nodemap = {}
|
||||
if mapstr.strip() == '':
|
||||
return
|
||||
for pair in mapstr.split(','):
|
||||
parts = pair.split(':')
|
||||
try:
|
||||
if len(parts) != 2:
|
||||
raise ValueError
|
||||
self.nodemap[int(parts[0])] = int(parts[1])
|
||||
except ValueError:
|
||||
self.session.warn("ns-2 mobility node map error")
|
||||
return
|
||||
|
||||
def map(self, nodenum):
|
||||
''' Map one node number (from a script file) to another.
|
||||
'''
|
||||
nodenum = int(nodenum)
|
||||
try:
|
||||
return self.nodemap[nodenum]
|
||||
except KeyError:
|
||||
return nodenum
|
||||
|
||||
def startup(self):
|
||||
''' Start running the script if autostart is enabled.
|
||||
Move node to initial positions when any autostart time is specified.
|
||||
Ignore the script if autostart is an empty string (can still be
|
||||
started via GUI controls).
|
||||
'''
|
||||
if self.autostart == '':
|
||||
if self.verbose:
|
||||
self.session.info("not auto-starting ns-2 script for %s" % \
|
||||
self.wlan.name)
|
||||
return
|
||||
try:
|
||||
t = float(self.autostart)
|
||||
except ValueError:
|
||||
self.session.warn("Invalid auto-start seconds specified '%s' for " \
|
||||
"%s" % (self.autostart, self.wlan.name))
|
||||
return
|
||||
self.movenodesinitial()
|
||||
if self.verbose:
|
||||
self.session.info("scheduling ns-2 script for %s autostart at %s" \
|
||||
% (self.wlan.name, t))
|
||||
self.state = self.STATE_RUNNING
|
||||
self.session.evq.add_event(t, self.run)
|
||||
|
||||
def start(self):
|
||||
''' Handle the case when un-paused.
|
||||
'''
|
||||
laststate = self.state
|
||||
super(Ns2ScriptedMobility, self).start()
|
||||
if laststate == self.STATE_PAUSED:
|
||||
self.statescript("unpause")
|
||||
|
||||
def run(self):
|
||||
''' Start is pressed or autostart is triggered.
|
||||
'''
|
||||
super(Ns2ScriptedMobility, self).run()
|
||||
self.statescript("run")
|
||||
|
||||
def pause(self):
|
||||
super(Ns2ScriptedMobility, self).pause()
|
||||
self.statescript("pause")
|
||||
|
||||
def stop(self, move_initial=True):
|
||||
super(Ns2ScriptedMobility, self).stop(move_initial=move_initial)
|
||||
self.statescript("stop")
|
||||
|
||||
def statescript(self, typestr):
|
||||
filename = None
|
||||
if typestr == "run" or typestr == "unpause":
|
||||
filename = self.script_start
|
||||
elif typestr == "pause":
|
||||
filename = self.script_pause
|
||||
elif typestr == "stop":
|
||||
filename = self.script_stop
|
||||
if filename is None or filename == '':
|
||||
return
|
||||
filename = self.findfile(filename)
|
||||
try:
|
||||
check_call(["/bin/sh", filename, typestr],
|
||||
cwd=self.session.sessiondir,
|
||||
env=self.session.getenviron())
|
||||
except Exception, e:
|
||||
self.session.warn("Error running script '%s' for WLAN state %s: " \
|
||||
"%s" % (filename, typestr, e))
|
||||
|
||||
|
0
daemon/core/netns/__init__.py
Normal file
0
daemon/core/netns/__init__.py
Normal file
401
daemon/core/netns/nodes.py
Normal file
401
daemon/core/netns/nodes.py
Normal file
|
@ -0,0 +1,401 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
nodes.py: definition of an LxcNode and CoreNode classes, and other node classes
|
||||
that inherit from the CoreNode, implementing specific node types.
|
||||
'''
|
||||
|
||||
from vnode import *
|
||||
from vnet import *
|
||||
from core.misc.ipaddr import *
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode
|
||||
|
||||
class CtrlNet(LxBrNet):
|
||||
policy = "ACCEPT"
|
||||
CTRLIF_IDX_BASE = 99 # base control interface index
|
||||
|
||||
def __init__(self, session, objid = "ctrlnet", name = None,
|
||||
verbose = False, netid = 1, prefix = None,
|
||||
hostid = None, start = True, assign_address = True,
|
||||
updown_script = None):
|
||||
if not prefix:
|
||||
prefix = "172.16.%d.0/24" % netid
|
||||
self.prefix = IPv4Prefix(prefix)
|
||||
self.hostid = hostid
|
||||
self.assign_address = assign_address
|
||||
self.updown_script = updown_script
|
||||
LxBrNet.__init__(self, session, objid = objid, name = name,
|
||||
verbose = verbose, start = start)
|
||||
|
||||
def startup(self):
|
||||
LxBrNet.startup(self)
|
||||
if self.hostid:
|
||||
addr = self.prefix.addr(self.hostid)
|
||||
else:
|
||||
addr = self.prefix.maxaddr()
|
||||
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
|
||||
if self.assign_address:
|
||||
self.addrconfig(addrlist = addrlist)
|
||||
if self.updown_script is not None:
|
||||
self.info("interface %s updown script '%s startup' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
check_call([self.updown_script, self.brname, "startup"])
|
||||
|
||||
def shutdown(self):
|
||||
if self.updown_script is not None:
|
||||
self.info("interface %s updown script '%s shutdown' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
check_call([self.updown_script, self.brname, "shutdown"])
|
||||
LxBrNet.shutdown(self)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Do not include CtrlNet in link messages describing this session.
|
||||
'''
|
||||
return []
|
||||
|
||||
class CoreNode(LxcNode):
|
||||
apitype = coreapi.CORE_NODE_DEF
|
||||
|
||||
class PtpNet(LxBrNet):
|
||||
policy = "ACCEPT"
|
||||
|
||||
def attach(self, netif):
|
||||
if len(self._netif) > 1:
|
||||
raise ValueError, \
|
||||
"Point-to-point links support at most 2 network interfaces"
|
||||
LxBrNet.attach(self, netif)
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
''' Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
'''
|
||||
pass
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
'''
|
||||
tlvdata = ""
|
||||
if len(self._netif) != 2:
|
||||
return tlvdata
|
||||
(if1, if2) = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if2.node.objid)
|
||||
delay = if1.getparam('delay')
|
||||
bw = if1.getparam('bw')
|
||||
loss = if1.getparam('loss')
|
||||
duplicate = if1.getparam('duplicate')
|
||||
jitter = if1.getparam('jitter')
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
for addr in if1.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
for addr in if2.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
return [msg,]
|
||||
|
||||
class SwitchNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_SWITCH
|
||||
policy = "ACCEPT"
|
||||
type = "lanswitch"
|
||||
|
||||
class HubNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_HUB
|
||||
policy = "ACCEPT"
|
||||
type = "hub"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
''' the Hub node forwards packets to all bridge ports by turning off
|
||||
the MAC address learning
|
||||
'''
|
||||
LxBrNet.__init__(self, session, objid, name, verbose, start)
|
||||
if start:
|
||||
check_call([BRCTL_BIN, "setageing", self.brname, "0"])
|
||||
|
||||
|
||||
class WlanNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
policy = "DROP"
|
||||
type = "wlan"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
LxBrNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
self.mobility = None
|
||||
|
||||
def attach(self, netif):
|
||||
LxBrNet.attach(self, netif)
|
||||
if self.model:
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is None:
|
||||
return
|
||||
(x,y,z) = netif.node.position.get()
|
||||
# invokes any netif.poshook
|
||||
netif.setposition(x, y, z)
|
||||
#self.model.setlinkparams()
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' Mobility and wireless model.
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
if self.model._positioncallback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
msgs = LxBrNet.tolinkmsgs(self, flags)
|
||||
if self.model:
|
||||
msgs += self.model.tolinkmsgs(flags)
|
||||
return msgs
|
||||
|
||||
|
||||
class RJ45Node(PyCoreNode, PyCoreNetIf):
|
||||
''' RJ45Node is a physical interface on the host linked to the emulated
|
||||
network.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_RJ45
|
||||
|
||||
def __init__(self, session, objid = None, name = None, mtu = 1500,
|
||||
verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
# this initializes net, params, poshook
|
||||
PyCoreNetIf.__init__(self, node=self, name=name, mtu = mtu)
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self.ifindex = None
|
||||
# the following are PyCoreNetIf attributes
|
||||
self.transport_type = "raw"
|
||||
self.localname = name
|
||||
self.type = "rj45"
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
''' Set the interface in the up state.
|
||||
'''
|
||||
# interface will also be marked up during net.attach()
|
||||
self.savestate()
|
||||
try:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
except:
|
||||
self.warn("Failed to run command: %s link set %s up" % \
|
||||
(IP_BIN, self.localname))
|
||||
return
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
''' Bring the interface down. Remove any addresses and queuing
|
||||
disciplines.
|
||||
'''
|
||||
if not self.up:
|
||||
return
|
||||
check_call([IP_BIN, "link", "set", self.localname, "down"])
|
||||
check_call([IP_BIN, "addr", "flush", "dev", self.localname])
|
||||
mutecall([TC_BIN, "qdisc", "del", "dev", self.localname, "root"])
|
||||
self.up = False
|
||||
self.restorestate()
|
||||
|
||||
def attachnet(self, net):
|
||||
PyCoreNetIf.attachnet(self, net)
|
||||
|
||||
def detachnet(self):
|
||||
PyCoreNetIf.detachnet(self)
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
''' This is called when linking with another node. Since this node
|
||||
represents an interface, we do not create another object here,
|
||||
but attach ourselves to the given network.
|
||||
'''
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if self.net is not None:
|
||||
raise ValueError, \
|
||||
"RJ45 nodes support at most 1 network interface"
|
||||
self._netif[ifindex] = self
|
||||
self.node = self # PyCoreNetIf.node is self
|
||||
self.ifindex = ifindex
|
||||
if net is not None:
|
||||
self.attachnet(net)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(addr)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif.pop(ifindex)
|
||||
if ifindex == self.ifindex:
|
||||
self.shutdown()
|
||||
else:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
|
||||
def netif(self, ifindex, net=None):
|
||||
''' This object is considered the network interface, so we only
|
||||
return self here. This keeps the RJ45Node compatible with
|
||||
real nodes.
|
||||
'''
|
||||
if net is not None and net == self.net:
|
||||
return self
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if ifindex == self.ifindex:
|
||||
return self
|
||||
return None
|
||||
|
||||
def getifindex(self, netif):
|
||||
if netif != self:
|
||||
return None
|
||||
return self.ifindex
|
||||
|
||||
def addaddr(self, addr):
|
||||
if self.up:
|
||||
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
PyCoreNetIf.addaddr(self, addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
if self.up:
|
||||
check_call([IP_BIN, "addr", "del", str(addr), "dev", self.name])
|
||||
PyCoreNetIf.deladdr(self, addr)
|
||||
|
||||
def savestate(self):
|
||||
''' Save the addresses and other interface state before using the
|
||||
interface for emulation purposes. TODO: save/restore the PROMISC flag
|
||||
'''
|
||||
self.old_up = False
|
||||
self.old_addrs = []
|
||||
cmd = [IP_BIN, "addr", "show", "dev", self.localname]
|
||||
try:
|
||||
tmp = subprocess.Popen(cmd, stdout = subprocess.PIPE)
|
||||
except OSError:
|
||||
self.warn("Failed to run %s command: %s" % (IP_BIN, cmd))
|
||||
if tmp.wait():
|
||||
self.warn("Command failed: %s" % cmd)
|
||||
return
|
||||
lines = tmp.stdout.read()
|
||||
tmp.stdout.close()
|
||||
for l in lines.split('\n'):
|
||||
items = l.split()
|
||||
if len(items) < 2:
|
||||
continue
|
||||
if items[1] == "%s:" % self.localname:
|
||||
flags = items[2][1:-1].split(',')
|
||||
if "UP" in flags:
|
||||
self.old_up = True
|
||||
elif items[0] == "inet":
|
||||
self.old_addrs.append((items[1], items[3]))
|
||||
elif items[0] == "inet6":
|
||||
if items[1][:4] == "fe80":
|
||||
continue
|
||||
self.old_addrs.append((items[1], None))
|
||||
|
||||
def restorestate(self):
|
||||
''' Restore the addresses and other interface state after using it.
|
||||
'''
|
||||
for addr in self.old_addrs:
|
||||
if addr[1] is None:
|
||||
check_call([IP_BIN, "addr", "add", addr[0], "dev",
|
||||
self.localname])
|
||||
else:
|
||||
check_call([IP_BIN, "addr", "add", addr[0], "brd", addr[1],
|
||||
"dev", self.localname])
|
||||
if self.old_up:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
''' Use setposition() from both parent classes.
|
||||
'''
|
||||
PyCoreObj.setposition(self, x, y, z)
|
||||
# invoke any poshook
|
||||
PyCoreNetIf.setposition(self, x, y, z)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class TunnelNode(GreTapBridge):
|
||||
apitype = coreapi.CORE_NODE_TUNNEL
|
||||
policy = "ACCEPT"
|
||||
type = "tunnel"
|
||||
|
168
daemon/core/netns/vif.py
Normal file
168
daemon/core/netns/vif.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vif.py: PyCoreNetIf classes that implement the interfaces available
|
||||
under Linux.
|
||||
'''
|
||||
|
||||
import os, signal, shutil, sys, subprocess, vnodeclient, threading, string
|
||||
import random, time
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
checkexec([IP_BIN])
|
||||
|
||||
class VEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
# note that net arg is ignored
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
check_call([IP_BIN, "link", "add", "name", self.localname,
|
||||
"type", "veth", "peer", "name", self.name])
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
if self.node:
|
||||
self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
if self.localname:
|
||||
mutedetach([IP_BIN, "link", "delete", self.localname])
|
||||
self.up = False
|
||||
|
||||
|
||||
class TunTap(PyCoreNetIf):
|
||||
''' TUN/TAP virtual device in TAP mode
|
||||
'''
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.transport_type = "virtual"
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
# TODO: more sophisticated TAP creation here
|
||||
# Debian does not support -p (tap) option, RedHat does.
|
||||
# For now, this is disabled to allow the TAP to be created by another
|
||||
# system (e.g. EMANE's emanetransportd)
|
||||
#check_call(["tunctl", "-t", self.name])
|
||||
# self.install()
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
#if self.name:
|
||||
# mutedetach(["tunctl", "-d", self.localname])
|
||||
self.up = False
|
||||
|
||||
def install(self):
|
||||
''' Install this TAP into its namespace. This is not done from the
|
||||
startup() method but called at a later time when a userspace
|
||||
program (running on the host) has had a chance to open the socket
|
||||
end of the TAP.
|
||||
'''
|
||||
netns = str(self.node.pid)
|
||||
# check for presence of device - tap device may not appear right away
|
||||
# waits ~= stime * ( 2 ** attempts) seconds
|
||||
attempts = 9
|
||||
stime = 0.01
|
||||
while attempts > 0:
|
||||
try:
|
||||
mutecheck_call([IP_BIN, "link", "show", self.localname])
|
||||
break
|
||||
except Exception, e:
|
||||
msg = "ip link show %s error (%d): %s" % \
|
||||
(self.localname, attempts, e)
|
||||
if attempts > 1:
|
||||
msg += ", retrying..."
|
||||
self.node.info(msg)
|
||||
time.sleep(stime)
|
||||
stime *= 2
|
||||
attempts -= 1
|
||||
# install tap device into namespace
|
||||
try:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "netns", netns])
|
||||
except Exception, e:
|
||||
msg = "error installing TAP interface %s, command:" % self.localname
|
||||
msg += "ip link set %s netns %s" % (self.localname, netns)
|
||||
self.node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.localname, msg)
|
||||
self.node.warn(msg)
|
||||
return
|
||||
self.node.cmd([IP_BIN, "link", "set", self.localname,
|
||||
"name", self.name])
|
||||
for addr in self.addrlist:
|
||||
self.node.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.name])
|
||||
self.node.cmd([IP_BIN, "link", "set", self.name, "up"])
|
||||
|
||||
class GreTap(PyCoreNetIf):
|
||||
''' GRE TAP device for tunneling between emulation servers.
|
||||
Uses the "gretap" tunnel device type from Linux which is a GRE device
|
||||
having a MAC address. The MAC address is required for bridging.
|
||||
'''
|
||||
def __init__(self, node = None, name = None, session = None, mtu = 1458,
|
||||
remoteip = None, objid = None, localip = None, ttl = 255,
|
||||
key = None, start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.session = session
|
||||
if objid is None:
|
||||
# from PyCoreObj
|
||||
objid = (((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff)
|
||||
self.objid = objid
|
||||
sessionid = self.session.shortsessionid()
|
||||
# interface name on the local host machine
|
||||
self.localname = "gt.%s.%s" % (self.objid, sessionid)
|
||||
self.transport_type = "raw"
|
||||
if not start:
|
||||
self.up = False
|
||||
return
|
||||
|
||||
if remoteip is None:
|
||||
raise ValueError, "missing remote IP required for GRE TAP device"
|
||||
cmd = ("ip", "link", "add", self.localname, "type", "gretap",
|
||||
"remote", str(remoteip))
|
||||
if localip:
|
||||
cmd += ("local", str(localip))
|
||||
if ttl:
|
||||
cmd += ("ttl", str(ttl))
|
||||
if key:
|
||||
cmd += ("key", str(key))
|
||||
check_call(cmd)
|
||||
cmd = ("ip", "link", "set", self.localname, "up")
|
||||
check_call(cmd)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if self.localname:
|
||||
cmd = ("ip", "link", "set", self.localname, "down")
|
||||
check_call(cmd)
|
||||
cmd = ("ip", "link", "del", self.localname)
|
||||
check_call(cmd)
|
||||
self.localname = None
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
return None
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
return []
|
496
daemon/core/netns/vnet.py
Normal file
496
daemon/core/netns/vnet.py
Normal file
|
@ -0,0 +1,496 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnet.py: PyCoreNet and LxBrNet classes that implement virtual networks using
|
||||
Linux Ethernet bridging and ebtables rules.
|
||||
'''
|
||||
|
||||
import os, sys, threading, time, subprocess
|
||||
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreNet, PyCoreObj
|
||||
from core.netns.vif import VEth, GreTap
|
||||
|
||||
checkexec([BRCTL_BIN, IP_BIN, EBTABLES_BIN, TC_BIN])
|
||||
|
||||
ebtables_lock = threading.Lock()
|
||||
|
||||
class EbtablesQueue(object):
|
||||
''' Helper class for queuing up ebtables commands into rate-limited
|
||||
atomic commits. This improves performance and reliability when there are
|
||||
many WLAN link updates.
|
||||
'''
|
||||
# update rate is every 300ms
|
||||
rate = 0.3
|
||||
# ebtables
|
||||
atomic_file = "/tmp/pycore.ebtables.atomic"
|
||||
|
||||
def __init__(self):
|
||||
''' Initialize the helper class, but don't start the update thread
|
||||
until a WLAN is instantiated.
|
||||
'''
|
||||
self.doupdateloop = False
|
||||
self.updatethread = None
|
||||
# this lock protects cmds and updates lists
|
||||
self.updatelock = threading.Lock()
|
||||
# list of pending ebtables commands
|
||||
self.cmds = []
|
||||
# list of WLANs requiring update
|
||||
self.updates = []
|
||||
# timestamps of last WLAN update; this keeps track of WLANs that are
|
||||
# using this queue
|
||||
self.last_update_time = {}
|
||||
|
||||
def startupdateloop(self, wlan):
|
||||
''' Kick off the update loop; only needs to be invoked once.
|
||||
'''
|
||||
self.updatelock.acquire()
|
||||
self.last_update_time[wlan] = time.time()
|
||||
self.updatelock.release()
|
||||
if self.doupdateloop:
|
||||
return
|
||||
self.doupdateloop = True
|
||||
self.updatethread = threading.Thread(target = self.updateloop)
|
||||
self.updatethread.daemon = True
|
||||
self.updatethread.start()
|
||||
|
||||
def stopupdateloop(self, wlan):
|
||||
''' Kill the update loop thread if there are no more WLANs using it.
|
||||
'''
|
||||
self.updatelock.acquire()
|
||||
try:
|
||||
del self.last_update_time[wlan]
|
||||
except KeyError:
|
||||
pass
|
||||
self.updatelock.release()
|
||||
if len(self.last_update_time) > 0:
|
||||
return
|
||||
self.doupdateloop = False
|
||||
if self.updatethread:
|
||||
self.updatethread.join()
|
||||
self.updatethread = None
|
||||
|
||||
def ebatomiccmd(self, cmd):
|
||||
''' Helper for building ebtables atomic file command list.
|
||||
'''
|
||||
r = [EBTABLES_BIN, "--atomic-file", self.atomic_file]
|
||||
if cmd:
|
||||
r.extend(cmd)
|
||||
return r
|
||||
|
||||
def lastupdate(self, wlan):
|
||||
''' Return the time elapsed since this WLAN was last updated.
|
||||
'''
|
||||
try:
|
||||
elapsed = time.time() - self.last_update_time[wlan]
|
||||
except KeyError:
|
||||
self.last_update_time[wlan] = time.time()
|
||||
elapsed = 0.0
|
||||
return elapsed
|
||||
|
||||
def updated(self, wlan):
|
||||
''' Keep track of when this WLAN was last updated.
|
||||
'''
|
||||
self.last_update_time[wlan] = time.time()
|
||||
self.updates.remove(wlan)
|
||||
|
||||
def updateloop(self):
|
||||
''' Thread target that looks for WLANs needing update, and
|
||||
rate limits the amount of ebtables activity. Only one userspace program
|
||||
should use ebtables at any given time, or results can be unpredictable.
|
||||
'''
|
||||
while self.doupdateloop:
|
||||
self.updatelock.acquire()
|
||||
for wlan in self.updates:
|
||||
if self.lastupdate(wlan) > self.rate:
|
||||
self.buildcmds(wlan)
|
||||
#print "ebtables commit %d rules" % len(self.cmds)
|
||||
self.ebcommit(wlan)
|
||||
self.updated(wlan)
|
||||
self.updatelock.release()
|
||||
time.sleep(self.rate)
|
||||
|
||||
def ebcommit(self, wlan):
|
||||
''' Perform ebtables atomic commit using commands built in the
|
||||
self.cmds list.
|
||||
'''
|
||||
# save kernel ebtables snapshot to a file
|
||||
cmd = self.ebatomiccmd(["--atomic-save",])
|
||||
try:
|
||||
check_call(cmd)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "atomic-save (%s)" % cmd, e)
|
||||
# no atomic file, exit
|
||||
return
|
||||
# modify the table file using queued ebtables commands
|
||||
for c in self.cmds:
|
||||
cmd = self.ebatomiccmd(c)
|
||||
try:
|
||||
check_call(cmd)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "cmd=%s" % cmd, e)
|
||||
pass
|
||||
self.cmds = []
|
||||
# commit the table file to the kernel
|
||||
cmd = self.ebatomiccmd(["--atomic-commit",])
|
||||
try:
|
||||
check_call(cmd)
|
||||
os.unlink(self.atomic_file)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "atomic-commit (%s)" % cmd, e)
|
||||
|
||||
def ebchange(self, wlan):
|
||||
''' Flag a change to the given WLAN's _linked dict, so the ebtables
|
||||
chain will be rebuilt at the next interval.
|
||||
'''
|
||||
self.updatelock.acquire()
|
||||
if wlan not in self.updates:
|
||||
self.updates.append(wlan)
|
||||
self.updatelock.release()
|
||||
|
||||
def buildcmds(self, wlan):
|
||||
''' Inspect a _linked dict from a wlan, and rebuild the ebtables chain
|
||||
for that WLAN.
|
||||
'''
|
||||
wlan._linked_lock.acquire()
|
||||
# flush the chain
|
||||
self.cmds.extend([["-F", wlan.brname],])
|
||||
# rebuild the chain
|
||||
for (netif1, v) in wlan._linked.items():
|
||||
for (netif2, linked) in v.items():
|
||||
if wlan.policy == "DROP" and linked:
|
||||
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
|
||||
"-o", netif2.localname, "-j", "ACCEPT"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "ACCEPT"]])
|
||||
elif wlan.policy == "ACCEPT" and not linked:
|
||||
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
|
||||
"-o", netif2.localname, "-j", "DROP"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "DROP"]])
|
||||
wlan._linked_lock.release()
|
||||
|
||||
def eberror(self, wlan, source, error):
|
||||
''' Log an ebtables command error and send an exception.
|
||||
'''
|
||||
if not wlan:
|
||||
return
|
||||
wlan.exception(coreapi.CORE_EXCP_LEVEL_ERROR, wlan.brname,
|
||||
"ebtables command error: %s\n%s\n" % (source, error))
|
||||
|
||||
|
||||
# a global object because all WLANs share the same queue
|
||||
# cannot have multiple threads invoking the ebtables commnd
|
||||
ebq = EbtablesQueue()
|
||||
|
||||
def ebtablescmds(call, cmds):
|
||||
ebtables_lock.acquire()
|
||||
try:
|
||||
for cmd in cmds:
|
||||
call(cmd)
|
||||
finally:
|
||||
ebtables_lock.release()
|
||||
|
||||
class LxBrNet(PyCoreNet):
|
||||
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
PyCoreNet.__init__(self, session, objid, name, verbose, start)
|
||||
if name is None:
|
||||
name = str(self.objid)
|
||||
if policy is not None:
|
||||
self.policy = policy
|
||||
self.name = name
|
||||
self.brname = "b.%s.%s" % (str(self.objid), self.session.sessionid)
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
ebq.startupdateloop(self)
|
||||
|
||||
def startup(self):
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addbr", self.brname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname,
|
||||
"Error adding bridge: %s" % e)
|
||||
try:
|
||||
# turn off spanning tree protocol and forwarding delay
|
||||
check_call([BRCTL_BIN, "stp", self.brname, "off"])
|
||||
check_call([BRCTL_BIN, "setfd", self.brname, "0"])
|
||||
check_call([IP_BIN, "link", "set", self.brname, "up"])
|
||||
# create a new ebtables chain for this bridge
|
||||
ebtablescmds(check_call, [
|
||||
[EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
|
||||
[EBTABLES_BIN, "-A", "FORWARD",
|
||||
"--logical-in", self.brname, "-j", self.brname]])
|
||||
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
|
||||
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % \
|
||||
self.brname
|
||||
if os.path.exists(snoop):
|
||||
open(snoop, "w").write('0')
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_WARNING, self.brname,
|
||||
"Error setting bridge parameters: %s" % e)
|
||||
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
ebq.stopupdateloop(self)
|
||||
mutecall([IP_BIN, "link", "set", self.brname, "down"])
|
||||
mutecall([BRCTL_BIN, "delbr", self.brname])
|
||||
ebtablescmds(mutecall, [
|
||||
[EBTABLES_BIN, "-D", "FORWARD",
|
||||
"--logical-in", self.brname, "-j", self.brname],
|
||||
[EBTABLES_BIN, "-X", self.brname]])
|
||||
for netif in self.netifs():
|
||||
# removes veth pairs used for bridge-to-bridge connections
|
||||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
self._linked.clear()
|
||||
del self.session
|
||||
self.up = False
|
||||
|
||||
def attach(self, netif):
|
||||
if self.up:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addif", self.brname, netif.localname])
|
||||
check_call([IP_BIN, "link", "set", netif.localname, "up"])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error joining interface %s to bridge %s: %s" % \
|
||||
(netif.localname, self.brname, e))
|
||||
return
|
||||
PyCoreNet.attach(self, netif)
|
||||
|
||||
def detach(self, netif):
|
||||
if self.up:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "delif", self.brname, netif.localname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error removing interface %s from bridge %s: %s" % \
|
||||
(netif.localname, self.brname, e))
|
||||
return
|
||||
PyCoreNet.detach(self, netif)
|
||||
|
||||
def linked(self, netif1, netif2):
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[netif1.netifi] != netif1:
|
||||
raise ValueError, "inconsistency for netif %s" % netif1.name
|
||||
if self._netif[netif2.netifi] != netif2:
|
||||
raise ValueError, "inconsistency for netif %s" % netif2.name
|
||||
try:
|
||||
linked = self._linked[netif1][netif2]
|
||||
except KeyError:
|
||||
if self.policy == "ACCEPT":
|
||||
linked = True
|
||||
elif self.policy == "DROP":
|
||||
linked = False
|
||||
else:
|
||||
raise Exception, "unknown policy: %s" % self.policy
|
||||
self._linked[netif1][netif2] = linked
|
||||
return linked
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
''' Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
|
||||
filtering rules.
|
||||
'''
|
||||
self._linked_lock.acquire()
|
||||
if not self.linked(netif1, netif2):
|
||||
self._linked_lock.release()
|
||||
return
|
||||
self._linked[netif1][netif2] = False
|
||||
self._linked_lock.release()
|
||||
ebq.ebchange(self)
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
''' Link two PyCoreNetIfs together, resulting in adding or removing
|
||||
ebtables filtering rules.
|
||||
'''
|
||||
self._linked_lock.acquire()
|
||||
if self.linked(netif1, netif2):
|
||||
self._linked_lock.release()
|
||||
return
|
||||
self._linked[netif1][netif2] = True
|
||||
self._linked_lock.release()
|
||||
ebq.ebchange(self)
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Configure link parameters by applying tc queuing disciplines on the
|
||||
interface.
|
||||
'''
|
||||
tc = [TC_BIN, "qdisc", "replace", "dev", netif.localname]
|
||||
parent = ["root"]
|
||||
changed = False
|
||||
if netif.setparam('bw', bw):
|
||||
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
|
||||
if bw is not None:
|
||||
burst = max(2 * netif.mtu, bw / 1000)
|
||||
limit = 0xffff # max IP payload
|
||||
tbf = ["tbf", "rate", str(bw),
|
||||
"burst", str(burst), "limit", str(limit)]
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
check_call(tc + parent + ["handle", "1:"] + tbf)
|
||||
netif.setparam('has_tbf', True)
|
||||
changed = True
|
||||
elif netif.getparam('has_tbf') and bw <= 0:
|
||||
tcd = [] + tc
|
||||
tcd[2] = "delete"
|
||||
if self.up:
|
||||
check_call(tcd + parent)
|
||||
netif.setparam('has_tbf', False)
|
||||
# removing the parent removes the child
|
||||
netif.setparam('has_netem', False)
|
||||
changed = True
|
||||
if netif.getparam('has_tbf'):
|
||||
parent = ["parent", "1:1"]
|
||||
netem = ["netem"]
|
||||
changed = max(changed, netif.setparam('delay', delay))
|
||||
if loss is not None:
|
||||
loss = float(loss)
|
||||
changed = max(changed, netif.setparam('loss', loss))
|
||||
if duplicate is not None:
|
||||
duplicate = float(duplicate)
|
||||
changed = max(changed, netif.setparam('duplicate', duplicate))
|
||||
changed = max(changed, netif.setparam('jitter', jitter))
|
||||
if not changed:
|
||||
return
|
||||
# jitter and delay use the same delay statement
|
||||
if delay is not None:
|
||||
netem += ["delay", "%sus" % delay]
|
||||
if jitter is not None:
|
||||
if delay is None:
|
||||
netem += ["delay", "0us", "%sus" % jitter, "25%"]
|
||||
else:
|
||||
netem += ["%sus" % jitter, "25%"]
|
||||
|
||||
if loss is not None:
|
||||
netem += ["loss", "%s%%" % min(loss, 100)]
|
||||
if duplicate is not None:
|
||||
netem += ["duplicate", "%s%%" % min(duplicate, 100)]
|
||||
if delay <= 0 and loss <= 0 and duplicate <= 0:
|
||||
# possibly remove netem if it exists and parent queue wasn't removed
|
||||
if not netif.getparam('has_netem'):
|
||||
return
|
||||
tc[2] = "delete"
|
||||
if self.up:
|
||||
check_call(tc + parent + ["handle", "10:"])
|
||||
netif.setparam('has_netem', False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
check_call(tc + parent + ["handle", "10:"] + netem)
|
||||
netif.setparam('has_netem', True)
|
||||
|
||||
def linknet(self, net):
|
||||
''' Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
'''
|
||||
sessionid = self.session.sessionid
|
||||
localname = "n%s.%s.%s" % (self.objid, net.objid, sessionid)
|
||||
name = "n%s.%s.%s" % (net.objid, self.objid, sessionid)
|
||||
netif = VEth(node = None, name = name, localname = localname,
|
||||
mtu = 1500, net = self, start = self.up)
|
||||
self.attach(netif)
|
||||
if net.up:
|
||||
# this is similar to net.attach() but uses netif.name instead
|
||||
# of localname
|
||||
check_call([BRCTL_BIN, "addif", net.brname, netif.name])
|
||||
check_call([IP_BIN, "link", "set", netif.name, "up"])
|
||||
i = net.newifindex()
|
||||
net._netif[i] = netif
|
||||
with net._linked_lock:
|
||||
net._linked[netif] = {}
|
||||
netif.net = self
|
||||
netif.othernet = net
|
||||
|
||||
def addrconfig(self, addrlist):
|
||||
''' Set addresses on the bridge.
|
||||
'''
|
||||
if not self.up:
|
||||
return
|
||||
for addr in addrlist:
|
||||
try:
|
||||
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.brname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error adding IP address: %s" % e)
|
||||
|
||||
class GreTapBridge(LxBrNet):
|
||||
''' A network consisting of a bridge with a gretap device for tunneling to
|
||||
another system.
|
||||
'''
|
||||
def __init__(self, session, remoteip = None, objid = None, name = None,
|
||||
policy = "ACCEPT", localip = None, ttl = 255, key = None,
|
||||
verbose = False, start = True):
|
||||
LxBrNet.__init__(self, session = session, objid = objid,
|
||||
name = name, verbose = verbose, policy = policy,
|
||||
start = False)
|
||||
self.grekey = key
|
||||
if self.grekey is None:
|
||||
self.grekey = self.session.sessionid ^ self.objid
|
||||
self.localnum = None
|
||||
self.remotenum = None
|
||||
self.remoteip = remoteip
|
||||
self.localip = localip
|
||||
self.ttl = ttl
|
||||
if remoteip is None:
|
||||
self.gretap = None
|
||||
else:
|
||||
self.gretap = GreTap(node = self, name = None, session = session,
|
||||
remoteip = remoteip, objid = None, localip = localip, ttl = ttl,
|
||||
key = self.grekey)
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
''' Creates a bridge and adds the gretap device to it.
|
||||
'''
|
||||
LxBrNet.startup(self)
|
||||
if self.gretap:
|
||||
self.attach(self.gretap)
|
||||
|
||||
def shutdown(self):
|
||||
''' Detach the gretap device and remove the bridge.
|
||||
'''
|
||||
if self.gretap:
|
||||
self.detach(self.gretap)
|
||||
self.gretap.shutdown()
|
||||
self.gretap = None
|
||||
LxBrNet.shutdown(self)
|
||||
|
||||
def addrconfig(self, addrlist):
|
||||
''' Set the remote tunnel endpoint. This is a one-time method for
|
||||
creating the GreTap device, which requires the remoteip at startup.
|
||||
The 1st address in the provided list is remoteip, 2nd optionally
|
||||
specifies localip.
|
||||
'''
|
||||
if self.gretap:
|
||||
raise ValueError, "gretap already exists for %s" % self.name
|
||||
remoteip = addrlist[0].split('/')[0]
|
||||
localip = None
|
||||
if len(addrlist) > 1:
|
||||
localip = addrlist[1].split('/')[0]
|
||||
self.gretap = GreTap(session = self.session, remoteip = remoteip,
|
||||
objid = None, name = None,
|
||||
localip = localip, ttl = self.ttl, key = self.grekey)
|
||||
self.attach(self.gretap)
|
||||
|
||||
def setkey(self, key):
|
||||
''' Set the GRE key used for the GreTap device. This needs to be set
|
||||
prior to instantiating the GreTap device (before addrconfig).
|
||||
'''
|
||||
self.grekey = key
|
402
daemon/core/netns/vnode.py
Normal file
402
daemon/core/netns/vnode.py
Normal file
|
@ -0,0 +1,402 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnode.py: PyCoreNode and LxcNode classes that implement the network namespace
|
||||
virtual node.
|
||||
'''
|
||||
|
||||
import os, signal, sys, subprocess, vnodeclient, threading, string, shutil
|
||||
import random, time
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.netns.vif import VEth, TunTap
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
checkexec([IP_BIN])
|
||||
|
||||
class SimpleLxcNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None, nodedir = None,
|
||||
verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self.nodedir = nodedir
|
||||
self.ctrlchnlname = \
|
||||
os.path.abspath(os.path.join(self.session.sessiondir, self.name))
|
||||
self.vnodeclient = None
|
||||
self.pid = None
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
|
||||
def alive(self):
|
||||
try:
|
||||
os.kill(self.pid, 0)
|
||||
except OSError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def startup(self):
|
||||
''' Start a new namespace node by invoking the vnoded process that
|
||||
allocates a new namespace. Bring up the loopback device and set
|
||||
the hostname.
|
||||
'''
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
vnoded = ["%s/vnoded" % CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname,
|
||||
"-l", self.ctrlchnlname + ".log",
|
||||
"-p", self.ctrlchnlname + ".pid"]
|
||||
if self.nodedir:
|
||||
vnoded += ["-C", self.nodedir]
|
||||
try:
|
||||
tmp = subprocess.Popen(vnoded, stdout = subprocess.PIPE,
|
||||
env = self.session.getenviron(state=False))
|
||||
except OSError, e:
|
||||
msg = "error running vnoded command: %s (%s)" % (vnoded, e)
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL,
|
||||
"SimpleLxcNode.startup()", msg)
|
||||
raise Exception, msg
|
||||
try:
|
||||
self.pid = int(tmp.stdout.read())
|
||||
tmp.stdout.close()
|
||||
except Exception:
|
||||
msg = "vnoded failed to create a namespace; "
|
||||
msg += "check kernel support and user priveleges"
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL,
|
||||
"SimpleLxcNode.startup()", msg)
|
||||
if tmp.wait():
|
||||
raise Exception, ("command failed: %s" % vnoded)
|
||||
self.vnodeclient = vnodeclient.VnodeClient(self.name,
|
||||
self.ctrlchnlname)
|
||||
self.info("bringing up loopback interface")
|
||||
self.cmd([IP_BIN, "link", "set", "lo", "up"])
|
||||
self.info("setting hostname: %s" % self.name)
|
||||
self.cmd(["hostname", self.name])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
while self._mounts:
|
||||
source, target = self._mounts.pop(-1)
|
||||
self.umount(target)
|
||||
#print "XXX del vnodeclient:", self.vnodeclient
|
||||
# XXX XXX XXX this causes a serious crash
|
||||
#del self.vnodeclient
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
os.waitpid(self.pid, 0)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
os.unlink(self.ctrlchnlname)
|
||||
except OSError:
|
||||
pass
|
||||
self._netif.clear()
|
||||
#del self.session
|
||||
# print "XXX del vnodeclient:", self.vnodeclient
|
||||
del self.vnodeclient
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
return self.vnodeclient.cmd(args, wait)
|
||||
|
||||
def cmdresult(self, args):
|
||||
return self.vnodeclient.cmdresult(args)
|
||||
|
||||
def popen(self, args):
|
||||
return self.vnodeclient.popen(args)
|
||||
|
||||
def icmd(self, args):
|
||||
return self.vnodeclient.icmd(args)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait = True):
|
||||
return self.vnodeclient.redircmd(infd, outfd, errfd, args, wait)
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return self.vnodeclient.term(sh = sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
return self.vnodeclient.termcmdstring(sh = sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.vnodeclient.shcmd(cmdstr, sh = sh)
|
||||
|
||||
def boot(self):
|
||||
pass
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
try:
|
||||
shcmd = "mkdir -p '%s' && %s -n --bind '%s' '%s'" % \
|
||||
(target, MOUNT_BIN, source, target)
|
||||
self.shcmd(shcmd)
|
||||
self._mounts.append((source, target))
|
||||
except:
|
||||
self.warn("mounting failed for %s at %s" % (source, target))
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
try:
|
||||
self.cmd([UMOUNT_BIN, "-n", "-l", target])
|
||||
except:
|
||||
self.warn("unmounting failed for %s" % target)
|
||||
|
||||
def newifindex(self):
|
||||
with self.lock:
|
||||
return PyCoreNode.newifindex(self)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
localname = "n%s.%s.%s" % (self.objid, ifname, sessionid)
|
||||
ifclass = VEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
if self.up:
|
||||
check_call([IP_BIN, "link", "set", veth.name,
|
||||
"netns", str(self.pid)])
|
||||
self.cmd([IP_BIN, "link", "set", veth.name, "name", ifname])
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newtuntap(self, ifindex = None, ifname = None, net = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
localname = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
name = ifname
|
||||
ifclass = TunTap
|
||||
tuntap = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
try:
|
||||
self.addnetif(tuntap, ifindex)
|
||||
except:
|
||||
tuntap.shutdown()
|
||||
del tuntap
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
(status, result) = self.cmdresult([IP_BIN, "link", "set", "dev",
|
||||
self.ifname(ifindex), "address", str(addr)])
|
||||
if status:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"SimpleLxcNode.sethwaddr()",
|
||||
"error setting MAC address %s" % str(addr))
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def ifup(self, ifindex):
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if isinstance(net, EmaneNode):
|
||||
ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
# TUN/TAP is not ready for addressing yet; the device may
|
||||
# take some time to appear, and installing it into a
|
||||
# namespace after it has been bound removes addressing;
|
||||
# save addresses with the interface now
|
||||
self.attachnet(ifindex, net)
|
||||
netif = self.netif(ifindex)
|
||||
netif.sethwaddr(hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
netif.addaddr(addr)
|
||||
return ifindex
|
||||
else:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def connectnode(self, ifname, othernode, otherifname):
|
||||
tmplen = 8
|
||||
tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
for x in xrange(tmplen)])
|
||||
tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
for x in xrange(tmplen)])
|
||||
check_call([IP_BIN, "link", "add", "name", tmp1,
|
||||
"type", "veth", "peer", "name", tmp2])
|
||||
|
||||
check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
|
||||
|
||||
check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
othernode.addnetif(PyCoreNetIf(othernode, otherifname),
|
||||
othernode.newifindex())
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
(filename, srcname, filename)
|
||||
self.shcmd(shcmd)
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
|
||||
def netifstats(self, ifname = None):
|
||||
return self.vnodeclient.netifstats(ifname = ifname)
|
||||
|
||||
|
||||
class LxcNode(SimpleLxcNode):
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True):
|
||||
super(LxcNode, self).__init__(session = session, objid = objid,
|
||||
name = name, nodedir = nodedir,
|
||||
verbose = verbose, start = start)
|
||||
self.bootsh = bootsh
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
self.makenodedir()
|
||||
super(LxcNode, self).startup()
|
||||
self.privatedir("/var/run")
|
||||
self.privatedir("/var/log")
|
||||
except OSError, e:
|
||||
self.warn("Error with LxcNode.startup(): %s" % e)
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"LxcNode.startup()", "%s" % e)
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
# services are instead stopped when session enters datacollect state
|
||||
#self.session.services.stopnodeservices(self)
|
||||
try:
|
||||
super(LxcNode, self).shutdown()
|
||||
finally:
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def hostfilename(self, filename):
|
||||
''' Return the name of a node's file on the host filesystem.
|
||||
'''
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
return os.path.join(dirname, basename)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
hostfilename = self.hostfilename(filename)
|
||||
dirname, basename = os.path.split(hostfilename)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode = None):
|
||||
''' Copy a file to a node, following symlinks and preserving metadata.
|
||||
Change file mode if specified.
|
||||
'''
|
||||
hostfilename = self.hostfilename(filename)
|
||||
shutil.copy2(srcfilename, hostfilename)
|
||||
if mode is not None:
|
||||
os.chmod(hostfilename, mode)
|
||||
self.info("copied nodefile: '%s'; mode: %s" % (hostfilename, mode))
|
||||
|
||||
|
221
daemon/core/netns/vnodeclient.py
Normal file
221
daemon/core/netns/vnodeclient.py
Normal file
|
@ -0,0 +1,221 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnodeclient.py: implementation of the VnodeClient class for issuing commands
|
||||
over a control channel to the vnoded process running in a network namespace.
|
||||
The control channel can be accessed via calls to the vcmd Python module or
|
||||
by invoking the vcmd shell command.
|
||||
'''
|
||||
|
||||
import os, stat, sys
|
||||
from core.constants import *
|
||||
|
||||
USE_VCMD_MODULE = True
|
||||
|
||||
if USE_VCMD_MODULE:
|
||||
import vcmd
|
||||
else:
|
||||
import subprocess
|
||||
|
||||
VCMD = os.path.join(CORE_SBIN_DIR, "vcmd")
|
||||
|
||||
class VnodeClient(object):
|
||||
def __init__(self, name, ctrlchnlname):
|
||||
self.name = name
|
||||
self.ctrlchnlname = ctrlchnlname
|
||||
if USE_VCMD_MODULE:
|
||||
self.cmdchnl = vcmd.VCmd(self.ctrlchnlname)
|
||||
else:
|
||||
self.cmdchnl = None
|
||||
self._addr = {}
|
||||
|
||||
def warn(self, msg):
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
|
||||
def connected(self):
|
||||
if USE_VCMD_MODULE:
|
||||
return self.cmdchnl.connected()
|
||||
else:
|
||||
return True
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
''' Execute a command on a node and return the status (return code).
|
||||
'''
|
||||
if USE_VCMD_MODULE:
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
tmp = self.cmdchnl.qcmd(args)
|
||||
if not wait:
|
||||
return tmp
|
||||
tmp = tmp.wait()
|
||||
else:
|
||||
if wait:
|
||||
mode = os.P_WAIT
|
||||
else:
|
||||
mode = os.P_NOWAIT
|
||||
tmp = os.spawnlp(mode, VCMD, VCMD, "-c",
|
||||
self.ctrlchnlname, "-q", "--", *args)
|
||||
if not wait:
|
||||
return tmp
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def cmdresult(self, args):
|
||||
''' Execute a command on a node and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
'''
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(args)
|
||||
result = cmdout.read()
|
||||
result += cmderr.read()
|
||||
cmdin.close()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
return (status, result)
|
||||
|
||||
def popen(self, args):
|
||||
if USE_VCMD_MODULE:
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
return self.cmdchnl.popen(args)
|
||||
else:
|
||||
cmd = [VCMD, "-c", self.ctrlchnlname, "--"]
|
||||
cmd.extend(args)
|
||||
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname,
|
||||
"--", *args)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait = True):
|
||||
'''
|
||||
Execute a command on a node with standard input, output, and
|
||||
error redirected according to the given file descriptors.
|
||||
'''
|
||||
if not USE_VCMD_MODULE:
|
||||
raise NotImplementedError
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
tmp = self.cmdchnl.redircmd(infd, outfd, errfd, args)
|
||||
if not wait:
|
||||
return tmp
|
||||
tmp = tmp.wait()
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return os.spawnlp(os.P_NOWAIT, "xterm", "xterm", "-ut",
|
||||
"-title", self.name, "-e",
|
||||
VCMD, "-c", self.ctrlchnlname, "--", sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
return "%s -c %s -- %s" % (VCMD, self.ctrlchnlname, sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
tmp = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
cmd = [IP_BIN, "addr", "show", "dev", ifname]
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
|
||||
cmdin.close()
|
||||
for line in cmdout:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
tmp["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
tmp["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
tmp["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
tmp["inet6link"].append(line[1])
|
||||
else:
|
||||
self.warn("unknown scope: %s" % line[3])
|
||||
else:
|
||||
pass
|
||||
err = cmderr.read()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
if status:
|
||||
self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
if err:
|
||||
self.warn("error output: %s" % err)
|
||||
self._addr[ifname] = tmp
|
||||
return tmp
|
||||
|
||||
def netifstats(self, ifname = None):
|
||||
stats = {}
|
||||
cmd = ["cat", "/proc/net/dev"]
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
|
||||
cmdin.close()
|
||||
# ignore first line
|
||||
cmdout.readline()
|
||||
# second line has count names
|
||||
tmp = cmdout.readline().strip().split("|")
|
||||
rxkeys = tmp[1].split()
|
||||
txkeys = tmp[2].split()
|
||||
for line in cmdout:
|
||||
line = line.strip().split()
|
||||
devname, tmp = line[0].split(":")
|
||||
if tmp:
|
||||
line.insert(1, tmp)
|
||||
stats[devname] = {"rx": {}, "tx": {}}
|
||||
field = 1
|
||||
for count in rxkeys:
|
||||
stats[devname]["rx"][count] = int(line[field])
|
||||
field += 1
|
||||
for count in txkeys:
|
||||
stats[devname]["tx"][count] = int(line[field])
|
||||
field += 1
|
||||
err = cmderr.read()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
if status:
|
||||
self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
if err:
|
||||
self.warn("error output: %s" % err)
|
||||
if ifname is not None:
|
||||
return stats[ifname]
|
||||
else:
|
||||
return stats
|
||||
|
||||
def createclients(sessiondir, clientcls = VnodeClient,
|
||||
cmdchnlfilterfunc = None):
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x),
|
||||
os.listdir(sessiondir))
|
||||
cmdchnls = filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries)
|
||||
if cmdchnlfilterfunc:
|
||||
cmdchnls = filter(cmdchnlfilterfunc, cmdchnls)
|
||||
cmdchnls.sort()
|
||||
return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls)
|
||||
|
||||
def createremoteclients(sessiondir, clientcls = VnodeClient,
|
||||
filterfunc = None):
|
||||
''' Creates remote VnodeClients, for nodes emulated on other machines. The
|
||||
session.Broker writes a n1.conf/server file having the server's info.
|
||||
'''
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x),
|
||||
os.listdir(sessiondir))
|
||||
nodedirs = filter(lambda x: stat.S_ISDIR(os.stat(x).st_mode), direntries)
|
||||
nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")),
|
||||
nodedirs)
|
||||
if filterfunc:
|
||||
nodedirs = filter(filterfunc, nodedirs)
|
||||
nodedirs.sort()
|
||||
return map(lambda x: clientcls(x), nodedirs)
|
0
daemon/core/phys/__init__.py
Normal file
0
daemon/core/phys/__init__.py
Normal file
268
daemon/core/phys/pnodes.py
Normal file
268
daemon/core/phys/pnodes.py
Normal file
|
@ -0,0 +1,268 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
''' PhysicalNode class for including real systems in the emulated network.
|
||||
'''
|
||||
import os, threading, subprocess
|
||||
|
||||
from core.misc.ipaddr import *
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode, PyCoreNetIf
|
||||
from core.emane.nodes import EmaneNode
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns.vnet import LxBrNet
|
||||
from core.netns.vif import GreTap
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from core.bsd.vnet import NetgraphNet
|
||||
|
||||
|
||||
class PhysicalNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self.nodedir = nodedir
|
||||
self.up = start
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
self.makenodedir()
|
||||
#self.privatedir("/var/run")
|
||||
#self.privatedir("/var/log")
|
||||
except OSError, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"PhysicalNode.startup()", e)
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
while self._mounts:
|
||||
source, target = self._mounts.pop(-1)
|
||||
self.umount(target)
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' The broker will add the appropriate SSH command to open a terminal
|
||||
on this physical node.
|
||||
'''
|
||||
return sh
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
''' run a command on the physical node
|
||||
'''
|
||||
os.chdir(self.nodedir)
|
||||
try:
|
||||
if wait:
|
||||
# os.spawnlp(os.P_WAIT, args)
|
||||
subprocess.call(args)
|
||||
else:
|
||||
# os.spawnlp(os.P_NOWAIT, args)
|
||||
subprocess.Popen(args)
|
||||
except CalledProcessError, e:
|
||||
self.warn("cmd exited with status %s: %s" % (e, str(args)))
|
||||
|
||||
def cmdresult(self, args):
|
||||
''' run a command on the physical node and get the result
|
||||
'''
|
||||
os.chdir(self.nodedir)
|
||||
# in Python 2.7 we can use subprocess.check_output() here
|
||||
tmp = subprocess.Popen(args, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
result = tmp.stdout.read()
|
||||
result += tmp.stderr.read()
|
||||
tmp.stdin.close()
|
||||
tmp.stdout.close()
|
||||
tmp.stderr.close()
|
||||
status = tmp.wait()
|
||||
return (status, result)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.sethwaddr()
|
||||
'''
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
ifname = self.ifname(ifindex)
|
||||
if self.up:
|
||||
(status, result) = self.cmdresult([IP_BIN, "link", "set", "dev",
|
||||
ifname, "address", str(addr)])
|
||||
if status:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"PhysicalNode.sethwaddr()",
|
||||
"error setting MAC address %s" % str(addr))
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.addaddr()
|
||||
'''
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.deladdr()
|
||||
'''
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
|
||||
def adoptnetif(self, netif, ifindex, hwaddr, addrlist):
|
||||
''' The broker builds a GreTap tunnel device to this physical node.
|
||||
When a link message is received linking this node to another part of
|
||||
the emulation, no new interface is created; instead, adopt the
|
||||
GreTap netif as the node interface.
|
||||
'''
|
||||
netif.name = "gt%d" % ifindex
|
||||
netif.node = self
|
||||
self.addnetif(netif, ifindex)
|
||||
# use a more reasonable name, e.g. "gt0" instead of "gt.56286.150"
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "down"])
|
||||
self.cmd([IP_BIN, "link", "set", netif.localname, "name", netif.name])
|
||||
netif.localname = netif.name
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "up"])
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Apply tc queing disciplines using LxBrNet.linkconfig()
|
||||
'''
|
||||
if os.uname()[0] == "Linux":
|
||||
netcls = LxBrNet
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
netcls = NetgraphNet
|
||||
else:
|
||||
raise NotImplementedError, "unsupported platform"
|
||||
# borrow the tc qdisc commands from LxBrNet.linkconfig()
|
||||
tmp = netcls(session=self.session, start=False)
|
||||
tmp.up = True
|
||||
tmp.linkconfig(netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
del tmp
|
||||
|
||||
def newifindex(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
if self.up and net is None:
|
||||
raise NotImplementedError
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
|
||||
if self.up:
|
||||
# this is reached when this node is linked to a network node
|
||||
# tunnel to net not built yet, so build it now and adopt it
|
||||
gt = self.session.broker.addnettunnel(net.objid)
|
||||
if gt is None or len(gt) != 1:
|
||||
self.session.warn("Error building tunnel from PhysicalNode."
|
||||
"newnetif()")
|
||||
gt = gt[0]
|
||||
net.detach(gt)
|
||||
self.adoptnetif(gt, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
# this is reached when configuring services (self.up=False)
|
||||
if ifname is None:
|
||||
ifname = "gt%d" % ifindex
|
||||
netif = GreTap(node = self, name = ifname, session = self.session,
|
||||
start = False)
|
||||
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
try:
|
||||
os.makedirs(target)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
self.cmd([MOUNT_BIN, "--bind", source, target])
|
||||
self._mounts.append((source, target))
|
||||
except:
|
||||
self.warn("mounting failed for %s at %s" % (source, target))
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
try:
|
||||
self.cmd([UMOUNT_BIN, "-l", target])
|
||||
except:
|
||||
self.warn("unmounting failed for %s" % target)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
|
27
daemon/core/pycore.py
Normal file
27
daemon/core/pycore.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
"""
|
||||
This is a convenience module that imports a set of platform-dependent
|
||||
defaults.
|
||||
"""
|
||||
|
||||
from misc.utils import ensurepath
|
||||
ensurepath(["/sbin", "/bin", "/usr/sbin", "/usr/bin"])
|
||||
del ensurepath
|
||||
|
||||
from session import Session
|
||||
|
||||
import os
|
||||
|
||||
if os.uname()[0] == "Linux":
|
||||
from netns import nodes
|
||||
try:
|
||||
from xen import xen
|
||||
except ImportError:
|
||||
#print "Xen support disabled."
|
||||
pass
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from bsd import nodes
|
||||
from phys import pnodes
|
||||
del os
|
202
daemon/core/sdt.py
Normal file
202
daemon/core/sdt.py
Normal file
|
@ -0,0 +1,202 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
sdt.py: Scripted Display Tool (SDT3D) helper
|
||||
'''
|
||||
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from coreobj import PyCoreNet, PyCoreObj
|
||||
from core.netns import nodes
|
||||
import socket
|
||||
|
||||
class Sdt(object):
|
||||
''' Helper class for exporting session objects to NRL's SDT3D.
|
||||
The connect() method initializes the display, and can be invoked
|
||||
when a node position or link has changed.
|
||||
'''
|
||||
DEFAULT_SDT_PORT = 5000
|
||||
# default altitude (in meters) for flyto view
|
||||
DEFAULT_ALT = 2500
|
||||
# TODO: read in user's nodes.conf here; below are default node types
|
||||
# from the GUI
|
||||
DEFAULT_SPRITES = [('router', 'router.gif'), ('host', 'host.gif'),
|
||||
('PC', 'pc.gif'), ('mdr', 'mdr.gif'),
|
||||
('prouter', 'router_green.gif'), ('xen', 'xen.gif'),
|
||||
('hub', 'hub.gif'), ('lanswitch','lanswitch.gif'),
|
||||
('wlan', 'wlan.gif'), ('rj45','rj45.gif'),
|
||||
('tunnel','tunnel.gif'),
|
||||
]
|
||||
|
||||
def __init__(self, session):
|
||||
self.session = session
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
self.showerror = True
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.address = ("127.0.0.1", self.DEFAULT_SDT_PORT)
|
||||
|
||||
def is_enabled(self):
|
||||
if not hasattr(self.session.options, 'enablesdt'):
|
||||
return False
|
||||
if self.session.options.enablesdt == '1':
|
||||
return True
|
||||
return False
|
||||
|
||||
def connect(self, flags=0):
|
||||
if not self.is_enabled():
|
||||
return False
|
||||
if self.connected:
|
||||
return True
|
||||
if self.showerror:
|
||||
self.session.info("connecting to SDT at %s:%s" % self.address)
|
||||
if self.sock is None:
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
try:
|
||||
self.sock.connect(self.address)
|
||||
except Exception, e:
|
||||
self.session.warn("SDT socket connect error: %s" % e)
|
||||
return False
|
||||
if not self.initialize():
|
||||
return False
|
||||
self.connected = True
|
||||
# refresh all objects in SDT3D when connecting after session start
|
||||
if not flags & coreapi.CORE_API_ADD_FLAG:
|
||||
if not self.sendobjs():
|
||||
return False
|
||||
return True
|
||||
|
||||
def initialize(self):
|
||||
''' Load icon sprites, and fly to the reference point location on
|
||||
the virtual globe.
|
||||
'''
|
||||
if not self.cmd('path "%s/icons/normal"' % CORE_DATA_DIR):
|
||||
return False
|
||||
# send node type to icon mappings
|
||||
for (type, icon) in self.DEFAULT_SPRITES:
|
||||
if not self.cmd('sprite %s image %s' % (type, icon)):
|
||||
return False
|
||||
(lat, long) = self.session.location.refgeo[:2]
|
||||
return self.cmd('flyto %.6f,%.6f,%d' % (long, lat, self.DEFAULT_ALT))
|
||||
|
||||
def disconnect(self):
|
||||
try:
|
||||
self.sock.close()
|
||||
except:
|
||||
pass
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
|
||||
def shutdown(self):
|
||||
''' Invoked from Session.shutdown() and Session.checkshutdown().
|
||||
'''
|
||||
# TODO: clear SDT display here?
|
||||
self.disconnect()
|
||||
self.showerror = True
|
||||
|
||||
def cmd(self, cmdstr):
|
||||
''' Send an SDT command over a UDP socket. socket.sendall() is used
|
||||
as opposed to socket.sendto() because an exception is raised when there
|
||||
is no socket listener.
|
||||
'''
|
||||
if self.sock is None:
|
||||
return False
|
||||
try:
|
||||
if self.verbose:
|
||||
self.session.info("sdt: %s" % cmdstr)
|
||||
self.sock.sendall("%s\n" % cmdstr)
|
||||
return True
|
||||
except Exception, e:
|
||||
if self.showerror:
|
||||
self.session.warn("SDT connection error: %s" % e)
|
||||
self.showerror = False
|
||||
self.connected = False
|
||||
return False
|
||||
|
||||
def updatenode(self, node, flags, x, y, z):
|
||||
''' Node is updated from a Node Message or mobility script.
|
||||
'''
|
||||
if node is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
if flags & coreapi.CORE_API_DEL_FLAG:
|
||||
self.cmd('delete node,%d' % node.objid)
|
||||
return
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
if flags & coreapi.CORE_API_ADD_FLAG:
|
||||
type = node.type
|
||||
if node.icon is not None:
|
||||
type = node.name
|
||||
self.cmd('sprite %s image %s' % (type, node.icon))
|
||||
self.cmd('node %d type %s label on,"%s" %s' % \
|
||||
(node.objid, type, node.name, pos))
|
||||
else:
|
||||
self.cmd('node %d %s' % (node.objid, pos))
|
||||
|
||||
def updatenodegeo(self, node, lat, long, alt):
|
||||
''' Node is updated upon receiving an EMANE Location Event.
|
||||
'''
|
||||
if node is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
self.cmd('node %d %s' % (node.objid, pos))
|
||||
|
||||
def updatelink(self, node1num, node2num, flags, wireless=False):
|
||||
''' Link is updated from a Link Message or by a wireless model.
|
||||
'''
|
||||
if node1num is None or node2num is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
if flags & coreapi.CORE_API_DEL_FLAG:
|
||||
self.cmd('delete link,%s,%s' % (node1num, node2num))
|
||||
elif flags & coreapi.CORE_API_ADD_FLAG:
|
||||
attr = ""
|
||||
if wireless:
|
||||
attr = " line green"
|
||||
self.cmd('link %s,%s%s' % (node1num, node2num, attr))
|
||||
|
||||
def sendobjs(self):
|
||||
''' Session has already started, and the SDT3D GUI later connects.
|
||||
Send all node and link objects for display. Otherwise, nodes and links
|
||||
will only be drawn when they have been updated.
|
||||
'''
|
||||
nets = []
|
||||
with self.session._objslock:
|
||||
for obj in self.session.objs():
|
||||
if isinstance(obj, PyCoreNet):
|
||||
nets.append(obj)
|
||||
if not isinstance(obj, PyCoreObj):
|
||||
continue
|
||||
(x, y, z) = obj.getposition()
|
||||
if x is None or y is None:
|
||||
continue
|
||||
self.updatenode(obj, coreapi.CORE_API_ADD_FLAG, x, y, z)
|
||||
for net in nets:
|
||||
# use tolinkmsgs() to handle various types of links
|
||||
msgs = net.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
|
||||
for msg in msgs:
|
||||
msghdr = msg[:coreapi.CoreMessage.hdrsiz]
|
||||
flags = coreapi.CoreMessage.unpackhdr(msghdr)[1]
|
||||
m = coreapi.CoreLinkMessage(flags, msghdr,
|
||||
msg[coreapi.CoreMessage.hdrsiz:])
|
||||
n1num = m.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER)
|
||||
n2num = m.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER)
|
||||
link_msg_type = m.gettlv(coreapi.CORE_TLV_LINK_TYPE)
|
||||
if isinstance(net, nodes.WlanNode) or \
|
||||
isinstance(net, nodes.EmaneNode):
|
||||
if (n1num == net.objid):
|
||||
continue
|
||||
wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS)
|
||||
self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl)
|
||||
|
||||
|
760
daemon/core/service.py
Normal file
760
daemon/core/service.py
Normal file
|
@ -0,0 +1,760 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
service.py: definition of CoreService class that is subclassed to define
|
||||
startup services and routing for nodes. A service is typically a daemon
|
||||
program launched when a node starts that provides some sort of
|
||||
service. The CoreServices class handles configuration messages for sending
|
||||
a list of available services to the GUI and for configuring individual
|
||||
services.
|
||||
'''
|
||||
|
||||
import sys, os, shlex
|
||||
|
||||
from itertools import repeat
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.misc.utils import maketuplefromstr, expandcorepath
|
||||
|
||||
servicelist = []
|
||||
|
||||
def addservice(service):
|
||||
global servicelist
|
||||
i = 0
|
||||
found = -1
|
||||
for s in servicelist:
|
||||
if s._group == service._group:
|
||||
found = i
|
||||
elif (found >= 0):
|
||||
# insert service into list next to existing group
|
||||
i = found + 1
|
||||
break
|
||||
i += 1
|
||||
servicelist.insert(i, service)
|
||||
|
||||
class CoreServices(ConfigurableManager):
|
||||
''' Class for interacting with a list of available startup services for
|
||||
nodes. Mostly used to convert a CoreService into a Config API
|
||||
message. This class lives in the Session object and remembers
|
||||
the default services configured for each node type, and any
|
||||
custom service configuration. A CoreService is not a Configurable.
|
||||
'''
|
||||
_name = "services"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
# dict of default services tuples, key is node type
|
||||
self.defaultservices = {}
|
||||
# dict of tuple of service objects, key is node number
|
||||
self.customservices = {}
|
||||
importcmd = "from core.services import *"
|
||||
exec(importcmd)
|
||||
paths = self.session.getcfgitem('custom_services_dir')
|
||||
if paths:
|
||||
for path in paths.split(','):
|
||||
path = path.strip()
|
||||
self.importcustom(path)
|
||||
|
||||
def importcustom(self, path):
|
||||
''' Import services from a myservices directory.
|
||||
'''
|
||||
if not path or len(path) == 0:
|
||||
return
|
||||
if not os.path.isdir(path):
|
||||
self.session.warn("invalid custom service directory specified" \
|
||||
": %s" % path)
|
||||
return
|
||||
try:
|
||||
parentdir, childdir = os.path.split(path)
|
||||
if childdir == "services":
|
||||
raise ValueError, "use a unique custom services dir name, " \
|
||||
"not 'services'"
|
||||
sys.path.append(parentdir)
|
||||
exec("from %s import *" % childdir)
|
||||
except Exception, e:
|
||||
self.session.warn("error importing custom services from " \
|
||||
"%s:\n%s" % (path, e))
|
||||
|
||||
def reset(self):
|
||||
''' Called when config message with reset flag is received
|
||||
'''
|
||||
self.defaultservices.clear()
|
||||
self.customservices.clear()
|
||||
|
||||
def get(self):
|
||||
''' Get the list of available services.
|
||||
'''
|
||||
global servicelist
|
||||
return servicelist
|
||||
|
||||
def getservicebyname(self, name):
|
||||
''' Get a service class from the global servicelist given its name.
|
||||
Returns None when the name is not found.
|
||||
'''
|
||||
global servicelist
|
||||
for s in servicelist:
|
||||
if s._name == name:
|
||||
return s
|
||||
return None
|
||||
|
||||
def getdefaultservices(self, type):
|
||||
''' Get the list of default services that should be enabled for a
|
||||
node for the given node type.
|
||||
'''
|
||||
r = []
|
||||
if type in self.defaultservices:
|
||||
defaults = self.defaultservices[type]
|
||||
for name in defaults:
|
||||
s = self.getservicebyname(name)
|
||||
if s is None:
|
||||
self.session.warn("default service %s is unknown" % name)
|
||||
else:
|
||||
r.append(s)
|
||||
return r
|
||||
|
||||
def getcustomservice(self, objid, service):
|
||||
''' Get any custom service configured for the given node that
|
||||
matches the specified service name. If no custom service
|
||||
is found, return the specified service.
|
||||
'''
|
||||
if objid in self.customservices:
|
||||
for s in self.customservices[objid]:
|
||||
if s._name == service._name:
|
||||
return s
|
||||
return service
|
||||
|
||||
def setcustomservice(self, objid, service, values):
|
||||
''' Store service customizations in an instantiated service object
|
||||
using a list of values that came from a config message.
|
||||
'''
|
||||
if service._custom:
|
||||
s = service
|
||||
else:
|
||||
# instantiate the class, for storing config customization
|
||||
s = service()
|
||||
# values are new key=value format; not all keys need to be present
|
||||
# a missing key means go with the default
|
||||
if Configurable.haskeyvalues(values):
|
||||
for v in values:
|
||||
key, value = v.split('=', 1)
|
||||
s.setvalue(key, value)
|
||||
# old-style config, list of values
|
||||
else:
|
||||
s.fromvaluelist(values)
|
||||
|
||||
# assume custom service already in dict
|
||||
if service._custom:
|
||||
return
|
||||
# add the custom service to dict
|
||||
if objid in self.customservices:
|
||||
self.customservices[objid] += (s, )
|
||||
else:
|
||||
self.customservices[objid] = (s, )
|
||||
|
||||
def addservicestonode(self, node, nodetype, services_str, verbose):
|
||||
''' Populate the node.service list using (1) the list of services
|
||||
requested from the services TLV, (2) using any custom service
|
||||
configuration, or (3) using the default services for this node type.
|
||||
'''
|
||||
if services_str is not None:
|
||||
services = services_str.split('|')
|
||||
for name in services:
|
||||
s = self.getservicebyname(name)
|
||||
if s is None:
|
||||
self.session.warn("configured service %s for node %s is " \
|
||||
"unknown" % (name, node.name))
|
||||
continue
|
||||
if verbose:
|
||||
self.session.info("adding configured service %s to " \
|
||||
"node %s" % (s._name, node.name))
|
||||
s = self.getcustomservice(node.objid, s)
|
||||
node.addservice(s)
|
||||
else:
|
||||
services = self.getdefaultservices(nodetype)
|
||||
for s in services:
|
||||
if verbose:
|
||||
self.session.info("adding default service %s to node %s" % \
|
||||
(s._name, node.name))
|
||||
s = self.getcustomservice(node.objid, s)
|
||||
node.addservice(s)
|
||||
|
||||
def getallconfigs(self):
|
||||
''' Return (nodenum, service) tuples for all stored configs.
|
||||
Used when reconnecting to a session or opening XML.
|
||||
'''
|
||||
r = []
|
||||
for nodenum in self.customservices:
|
||||
for s in self.customservices[nodenum]:
|
||||
r.append( (nodenum, s) )
|
||||
return r
|
||||
|
||||
def getallfiles(self, service):
|
||||
''' Return all customized files stored with a service.
|
||||
Used when reconnecting to a session or opening XML.
|
||||
'''
|
||||
r = []
|
||||
if not service._custom:
|
||||
return r
|
||||
for filename in service._configs:
|
||||
data = self.getservicefiledata(service, filename)
|
||||
if data is None:
|
||||
continue
|
||||
r.append( (filename, data) )
|
||||
return r
|
||||
|
||||
def bootnodeservices(self, node):
|
||||
''' Start all services on a node.
|
||||
'''
|
||||
services = sorted(node.services,
|
||||
key=lambda service: service._startindex)
|
||||
for s in services:
|
||||
try:
|
||||
t = float(s._starttime)
|
||||
if t > 0.0:
|
||||
fn = self.bootnodeservice
|
||||
self.session.evq.add_event(t, fn, node, s, services)
|
||||
continue
|
||||
except ValueError:
|
||||
pass
|
||||
self.bootnodeservice(node, s, services)
|
||||
|
||||
def bootnodeservice(self, node, s, services):
|
||||
''' Start a service on a node. Create private dirs, generate config
|
||||
files, and execute startup commands.
|
||||
'''
|
||||
if s._custom:
|
||||
self.bootnodecustomservice(node, s, services)
|
||||
return
|
||||
if node.verbose:
|
||||
node.info("starting service %s (%s)" % (s._name, s._startindex))
|
||||
for d in s._dirs:
|
||||
try:
|
||||
node.privatedir(d)
|
||||
except Exception, e:
|
||||
node.warn("Error making node %s dir %s: %s" % \
|
||||
(node.name, d, e))
|
||||
for filename in s.getconfigfilenames(node.objid, services):
|
||||
cfg = s.generateconfig(node, filename, services)
|
||||
node.nodefile(filename, cfg)
|
||||
for cmd in s.getstartup(node, services):
|
||||
try:
|
||||
# NOTE: this wait=False can be problematic!
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error starting command %s" % cmd)
|
||||
|
||||
def bootnodecustomservice(self, node, s, services):
|
||||
''' Start a custom service on a node. Create private dirs, use supplied
|
||||
config files, and execute supplied startup commands.
|
||||
'''
|
||||
if node.verbose:
|
||||
node.info("starting service %s (%s)(custom)" % (s._name, s._startindex))
|
||||
for d in s._dirs:
|
||||
try:
|
||||
node.privatedir(d)
|
||||
except Exception, e:
|
||||
node.warn("Error making node %s dir %s: %s" % \
|
||||
(node.name, d, e))
|
||||
for i, filename in enumerate(s._configs):
|
||||
if len(filename) == 0:
|
||||
continue
|
||||
cfg = self.getservicefiledata(s, filename)
|
||||
if cfg is None:
|
||||
cfg = s.generateconfig(node, filename, services)
|
||||
# cfg may have a file:/// url for copying from a file
|
||||
try:
|
||||
if self.copyservicefile(node, filename, cfg):
|
||||
continue
|
||||
except IOError, e:
|
||||
node.warn("Error copying service file %s" % filename)
|
||||
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"service:%s" % s._name,
|
||||
"error copying service file '%s': %s" % (filename, e))
|
||||
continue
|
||||
node.nodefile(filename, cfg)
|
||||
|
||||
for cmd in s._startup:
|
||||
try:
|
||||
# NOTE: this wait=False can be problematic!
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error starting command %s" % cmd)
|
||||
|
||||
def copyservicefile(self, node, filename, cfg):
|
||||
''' Given a configured service filename and config, determine if the
|
||||
config references an existing file that should be copied.
|
||||
Returns True for local files, False for generated.
|
||||
'''
|
||||
if cfg[:7] == 'file://':
|
||||
src = cfg[7:]
|
||||
src = src.split('\n')[0]
|
||||
src = expandcorepath(src, node.session, node)
|
||||
# TODO: glob here
|
||||
node.nodefilecopy(filename, src, mode = 0644)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def validatenodeservices(self, node):
|
||||
''' Run validation commands for all services on a node.
|
||||
'''
|
||||
services = sorted(node.services,
|
||||
key=lambda service: service._startindex)
|
||||
for s in services:
|
||||
self.validatenodeservice(node, s, services)
|
||||
|
||||
def validatenodeservice(self, node, s, services):
|
||||
''' Run the validation command(s) for a service.
|
||||
'''
|
||||
if node.verbose:
|
||||
node.info("validating service %s (%s)" % (s._name, s._startindex))
|
||||
if s._custom:
|
||||
validate_cmds = s._validate
|
||||
else:
|
||||
validate_cmds = s.getvalidate(node, services)
|
||||
for cmd in validate_cmds:
|
||||
if node.verbose:
|
||||
node.info("validating service %s using: %s" % (s._name, cmd))
|
||||
try:
|
||||
(status, result) = node.cmdresult(shlex.split(cmd))
|
||||
if status != 0:
|
||||
raise ValueError, "non-zero exit status"
|
||||
except:
|
||||
node.warn("validation command '%s' failed" % cmd)
|
||||
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"service:%s" % s._name,
|
||||
"validate command failed: %s" % cmd)
|
||||
|
||||
def stopnodeservices(self, node):
|
||||
''' Stop all services on a node.
|
||||
'''
|
||||
services = sorted(node.services,
|
||||
key=lambda service: service._startindex)
|
||||
for s in services:
|
||||
self.stopnodeservice(node, s)
|
||||
|
||||
def stopnodeservice(self, node, s):
|
||||
''' Stop a service on a node.
|
||||
'''
|
||||
for cmd in s._shutdown:
|
||||
try:
|
||||
# NOTE: this wait=False can be problematic!
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error running stop command %s" % cmd)
|
||||
|
||||
|
||||
def configure_request(self, msg):
|
||||
''' Receive configuration message for configuring services.
|
||||
With a request flag set, a list of services has been requested.
|
||||
When the opaque field is present, a specific service is being
|
||||
configured or requested.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
sessionnum = msg.gettlv(coreapi.CORE_TLV_CONF_SESSION)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
|
||||
# send back a list of available services
|
||||
if opaque is None:
|
||||
global servicelist
|
||||
tf = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
datatypes = tuple(repeat(coreapi.CONF_DATA_TYPE_BOOL,
|
||||
len(servicelist)))
|
||||
vals = "|".join(repeat('0', len(servicelist)))
|
||||
names = map(lambda x: x._name, servicelist)
|
||||
captions = "|".join(names)
|
||||
possiblevals = ""
|
||||
for s in servicelist:
|
||||
if s._custom_needed:
|
||||
possiblevals += '1'
|
||||
possiblevals += '|'
|
||||
groups = self.buildgroups(servicelist)
|
||||
# send back the properties for this service
|
||||
else:
|
||||
if nodenum is None:
|
||||
return None
|
||||
n = self.session.obj(nodenum)
|
||||
if n is None:
|
||||
self.session.warn("Request to configure service %s for " \
|
||||
"unknown node %s" % (svc._name, nodenum))
|
||||
return None
|
||||
servicesstring = opaque.split(':')
|
||||
services = self.servicesfromopaque(opaque, n.objid)
|
||||
if len(services) < 1:
|
||||
return None
|
||||
if len(servicesstring) == 3:
|
||||
# a file request: e.g. "service:zebra:quagga.conf"
|
||||
return self.getservicefile(services, n, servicesstring[2])
|
||||
|
||||
# the first service in the list is the one being configured
|
||||
svc = services[0]
|
||||
# send back:
|
||||
# dirs, configs, startindex, startup, shutdown, metadata, config
|
||||
tf = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
datatypes = tuple(repeat(coreapi.CONF_DATA_TYPE_STRING,
|
||||
len(svc.keys)))
|
||||
vals = svc.tovaluelist(n, services)
|
||||
captions = None
|
||||
possiblevals = None
|
||||
groups = None
|
||||
|
||||
tlvdata = ""
|
||||
if nodenum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
|
||||
nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
self._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, tf)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
vals)
|
||||
if captions:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
if possiblevals:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if groups:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
groups)
|
||||
if sessionnum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_SESSION, sessionnum)
|
||||
if opaque:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
|
||||
opaque)
|
||||
return coreapi.CoreConfMessage.pack(0, tlvdata)
|
||||
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message for configuring services.
|
||||
With a request flag set, a list of services has been requested.
|
||||
When the opaque field is present, a specific service is being
|
||||
configured or requested.
|
||||
'''
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
|
||||
errmsg = "services config message that I don't know how to handle"
|
||||
if values is None:
|
||||
self.session.info(errmsg)
|
||||
return None
|
||||
else:
|
||||
values = values.split('|')
|
||||
|
||||
if opaque is None:
|
||||
# store default services for a node type in self.defaultservices[]
|
||||
data_types = msg.gettlv(coreapi.CORE_TLV_CONF_DATA_TYPES)
|
||||
if values is None or data_types is None or \
|
||||
data_types[0] != coreapi.CONF_DATA_TYPE_STRING:
|
||||
self.session.info(errmsg)
|
||||
return None
|
||||
key = values.pop(0)
|
||||
self.defaultservices[key] = values
|
||||
self.session.info("default services for type %s set to %s" % \
|
||||
(key, values))
|
||||
else:
|
||||
# store service customized config in self.customservices[]
|
||||
if nodenum is None:
|
||||
return None
|
||||
services = self.servicesfromopaque(opaque, nodenum)
|
||||
if len(services) < 1:
|
||||
return None
|
||||
svc = services[0]
|
||||
self.setcustomservice(nodenum, svc, values)
|
||||
return None
|
||||
|
||||
def servicesfromopaque(self, opaque, objid):
|
||||
''' Build a list of services from an opaque data string.
|
||||
'''
|
||||
services = []
|
||||
servicesstring = opaque.split(':')
|
||||
if servicesstring[0] != "service":
|
||||
return []
|
||||
servicenames = servicesstring[1].split(',')
|
||||
for name in servicenames:
|
||||
s = self.getservicebyname(name)
|
||||
s = self.getcustomservice(objid, s)
|
||||
if s is None:
|
||||
self.session.warn("Request for unknown service '%s'" % name)
|
||||
return []
|
||||
services.append(s)
|
||||
return services
|
||||
|
||||
def buildgroups(self, servicelist):
|
||||
''' Build a string of groups for use in a configuration message given
|
||||
a list of services. The group list string has the format
|
||||
"title1:1-5|title2:6-9|10-12", where title is an optional group title
|
||||
and i-j is a numeric range of value indices; groups are
|
||||
separated by commas.
|
||||
'''
|
||||
i = 0
|
||||
r = ""
|
||||
lastgroup = "<undefined>"
|
||||
for service in servicelist:
|
||||
i += 1
|
||||
group = service._group
|
||||
if group != lastgroup:
|
||||
lastgroup = group
|
||||
# finish previous group
|
||||
if i > 1:
|
||||
r += "-%d|" % (i -1)
|
||||
# optionally include group title
|
||||
if group == "":
|
||||
r += "%d" % i
|
||||
else:
|
||||
r += "%s:%d" % (group, i)
|
||||
# finish the last group list
|
||||
if i > 0:
|
||||
r += "-%d" % i
|
||||
return r
|
||||
|
||||
def getservicefile(self, services, node, filename):
|
||||
''' Send a File Message when the GUI has requested a service file.
|
||||
The file data is either auto-generated or comes from an existing config.
|
||||
'''
|
||||
svc = services[0]
|
||||
# get the filename and determine the config file index
|
||||
if svc._custom:
|
||||
cfgfiles = svc._configs
|
||||
else:
|
||||
cfgfiles = svc.getconfigfilenames(node.objid, services)
|
||||
if filename not in cfgfiles:
|
||||
self.session.warn("Request for unknown file '%s' for service '%s'" \
|
||||
% (filename, services[0]))
|
||||
return None
|
||||
|
||||
# get the file data
|
||||
data = self.getservicefiledata(svc, filename)
|
||||
if data is None:
|
||||
data = "%s" % (svc.generateconfig(node, filename, services))
|
||||
else:
|
||||
data = "%s" % data
|
||||
filetypestr = "service:%s" % svc._name
|
||||
|
||||
# send a file message
|
||||
flags = coreapi.CORE_API_ADD_FLAG
|
||||
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE, node.objid)
|
||||
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME, filename)
|
||||
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE, filetypestr)
|
||||
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA, data)
|
||||
reply = coreapi.CoreFileMessage.pack(flags, tlvdata)
|
||||
return reply
|
||||
|
||||
def getservicefiledata(self, service, filename):
|
||||
''' Get the customized file data associated with a service. Return None
|
||||
for invalid filenames or missing file data.
|
||||
'''
|
||||
try:
|
||||
i = service._configs.index(filename)
|
||||
except ValueError:
|
||||
return None
|
||||
if i >= len(service._configtxt) or service._configtxt[i] is None:
|
||||
return None
|
||||
return service._configtxt[i]
|
||||
|
||||
def setservicefile(self, nodenum, type, filename, srcname, data):
|
||||
''' Receive a File Message from the GUI and store the customized file
|
||||
in the service config. The filename must match one from the list of
|
||||
config files in the service.
|
||||
'''
|
||||
if len(type.split(':')) < 2:
|
||||
self.session.warn("Received file type did not contain service info.")
|
||||
return
|
||||
if srcname is not None:
|
||||
raise NotImplementedError
|
||||
(svcid, svcname) = type.split(':')[:2]
|
||||
svc = self.getservicebyname(svcname)
|
||||
svc = self.getcustomservice(nodenum, svc)
|
||||
if svc is None:
|
||||
self.session.warn("Received filename for unknown service '%s'" % \
|
||||
svcname)
|
||||
return
|
||||
cfgfiles = svc._configs
|
||||
if filename not in cfgfiles:
|
||||
self.session.warn("Received unknown file '%s' for service '%s'" \
|
||||
% (filename, svcname))
|
||||
return
|
||||
i = cfgfiles.index(filename)
|
||||
configtxtlist = list(svc._configtxt)
|
||||
numitems = len(configtxtlist)
|
||||
if numitems < i+1:
|
||||
# add empty elements to list to support index assignment
|
||||
for j in range(1, (i + 2) - numitems):
|
||||
configtxtlist += None,
|
||||
configtxtlist[i] = data
|
||||
svc._configtxt = configtxtlist
|
||||
|
||||
def handleevent(self, msg):
|
||||
''' Handle an Event Message used to start, stop, restart, or validate
|
||||
a service on a given node.
|
||||
'''
|
||||
eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE)
|
||||
name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME)
|
||||
try:
|
||||
node = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
self.session.warn("Ignoring event for service '%s', unknown node " \
|
||||
"'%s'" % (name, nodenum))
|
||||
return
|
||||
|
||||
services = self.servicesfromopaque(name, nodenum)
|
||||
for s in services:
|
||||
if eventtype == coreapi.CORE_EVENT_STOP or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
self.stopnodeservice(node, s)
|
||||
if eventtype == coreapi.CORE_EVENT_START or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
if s._custom:
|
||||
cmds = s._startup
|
||||
else:
|
||||
cmds = s.getstartup(node, services)
|
||||
for cmd in cmds:
|
||||
try:
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error starting command %s" % cmd)
|
||||
if eventtype == coreapi.CORE_EVENT_PAUSE:
|
||||
self.validatenodeservice(node, s, services)
|
||||
|
||||
|
||||
class CoreService(object):
|
||||
''' Parent class used for defining services.
|
||||
'''
|
||||
# service name should not include spaces
|
||||
_name = ""
|
||||
# group string allows grouping services together
|
||||
_group = ""
|
||||
# list name(s) of services that this service depends upon
|
||||
_depends = ()
|
||||
keys = ["dirs","files","startidx","cmdup","cmddown","cmdval","meta","starttime"]
|
||||
# private, per-node directories required by this service
|
||||
_dirs = ()
|
||||
# config files written by this service
|
||||
_configs = ()
|
||||
# index used to determine start order with other services
|
||||
_startindex = 0
|
||||
# time in seconds after runtime to run startup commands
|
||||
_starttime = ""
|
||||
# list of startup commands
|
||||
_startup = ()
|
||||
# list of shutdown commands
|
||||
_shutdown = ()
|
||||
# list of validate commands
|
||||
_validate = ()
|
||||
# metadata associated with this service
|
||||
_meta = ""
|
||||
# custom configuration text
|
||||
_configtxt = ()
|
||||
_custom = False
|
||||
_custom_needed = False
|
||||
|
||||
def __init__(self):
|
||||
''' Services are not necessarily instantiated. Classmethods may be used
|
||||
against their config. Services are instantiated when a custom
|
||||
configuration is used to override their default parameters.
|
||||
'''
|
||||
self._custom = True
|
||||
|
||||
@classmethod
|
||||
def getconfigfilenames(cls, nodenum, services):
|
||||
''' Return the tuple of configuration file filenames. This default method
|
||||
returns the cls._configs tuple, but this method may be overriden to
|
||||
provide node-specific filenames that may be based on other services.
|
||||
'''
|
||||
return cls._configs
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate configuration file given a node object. The filename is
|
||||
provided to allow for multiple config files. The other services are
|
||||
provided to allow interdependencies (e.g. zebra and OSPF).
|
||||
Return the configuration string to be written to a file or sent
|
||||
to the GUI for customization.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Return the tuple of startup commands. This default method
|
||||
returns the cls._startup tuple, but this method may be
|
||||
overriden to provide node-specific commands that may be
|
||||
based on other services.
|
||||
'''
|
||||
return cls._startup
|
||||
|
||||
@classmethod
|
||||
def getvalidate(cls, node, services):
|
||||
''' Return the tuple of validate commands. This default method
|
||||
returns the cls._validate tuple, but this method may be
|
||||
overriden to provide node-specific commands that may be
|
||||
based on other services.
|
||||
'''
|
||||
return cls._validate
|
||||
|
||||
@classmethod
|
||||
def tovaluelist(cls, node, services):
|
||||
''' Convert service properties into a string list of key=value pairs,
|
||||
separated by "|".
|
||||
'''
|
||||
valmap = [cls._dirs, cls._configs, cls._startindex, cls._startup,
|
||||
cls._shutdown, cls._validate, cls._meta, cls._starttime]
|
||||
if not cls._custom:
|
||||
# this is always reached due to classmethod
|
||||
valmap[valmap.index(cls._configs)] = \
|
||||
cls.getconfigfilenames(node.objid, services)
|
||||
valmap[valmap.index(cls._startup)] = \
|
||||
cls.getstartup(node, services)
|
||||
vals = map( lambda a,b: "%s=%s" % (a, str(b)), cls.keys, valmap)
|
||||
return "|".join(vals)
|
||||
|
||||
def fromvaluelist(self, values):
|
||||
''' Convert list of values into properties for this instantiated
|
||||
(customized) service.
|
||||
'''
|
||||
# TODO: support empty value? e.g. override default meta with ''
|
||||
for key in self.keys:
|
||||
try:
|
||||
self.setvalue(key, values[self.keys.index(key)])
|
||||
except IndexError:
|
||||
# old config does not need to have new keys
|
||||
pass
|
||||
|
||||
def setvalue(self, key, value):
|
||||
if key not in self.keys:
|
||||
raise ValueError
|
||||
# this handles data conversion to int, string, and tuples
|
||||
if value:
|
||||
if key == "startidx":
|
||||
value = int(value)
|
||||
elif key == "meta":
|
||||
value = str(value)
|
||||
else:
|
||||
value = maketuplefromstr(value, str)
|
||||
|
||||
if key == "dirs":
|
||||
self._dirs = value
|
||||
elif key == "files":
|
||||
self._configs = value
|
||||
elif key == "startidx":
|
||||
self._startindex = value
|
||||
elif key == "cmdup":
|
||||
self._startup = value
|
||||
elif key == "cmddown":
|
||||
self._shutdown = value
|
||||
elif key == "cmdval":
|
||||
self._validate = value
|
||||
elif key == "meta":
|
||||
self._meta = value
|
||||
elif key == "starttime":
|
||||
self._starttime = value
|
6
daemon/core/services/__init__.py
Normal file
6
daemon/core/services/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
"""Services
|
||||
|
||||
Services available to nodes can be put in this directory. Everything listed in
|
||||
__all__ is automatically loaded by the main core module.
|
||||
"""
|
||||
__all__ = ["quagga", "nrl", "xorp", "bird", "utility", "security", "ucarp"]
|
249
daemon/core/services/bird.py
Normal file
249
daemon/core/services/bird.py
Normal file
|
@ -0,0 +1,249 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 Jean-Tiare Le Bigot.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jean-Tiare Le Bigot <admin@jtlebi.fr>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
bird.py: defines routing services provided by the BIRD Internet Routing Daemon.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
class Bird(CoreService):
|
||||
''' Bird router support
|
||||
'''
|
||||
_name = "bird"
|
||||
_group = "BIRD"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/bird",)
|
||||
_configs = ("/etc/bird/bird.conf",)
|
||||
_startindex = 35
|
||||
_startup = ("bird -c %s" % (_configs[0]),)
|
||||
_shutdown = ("killall bird", )
|
||||
_validate = ("pidof bird", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the bird.conf file contents.
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateBirdConf(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a .split('/') [0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateBirdConf(cls, node, services):
|
||||
''' Returns configuration file text. Other services that depend on bird
|
||||
will have generatebirdifcconfig() and generatebirdconfig()
|
||||
hooks that are invoked here.
|
||||
'''
|
||||
cfg = """\
|
||||
/* Main configuration file for BIRD. This is ony a template,
|
||||
* you will *need* to customize it according to your needs
|
||||
* Beware that only double quotes \'"\' are valid. No singles. */
|
||||
|
||||
|
||||
log "/var/log/%s.log" all;
|
||||
#debug protocols all;
|
||||
#debug commands 2;
|
||||
|
||||
router id %s; # Mandatory for IPv6, may be automatic for IPv4
|
||||
|
||||
protocol kernel {
|
||||
persist; # Don\'t remove routes on BIRD shutdown
|
||||
scan time 200; # Scan kernel routing table every 200 seconds
|
||||
export all;
|
||||
import all;
|
||||
}
|
||||
|
||||
protocol device {
|
||||
scan time 10; # Scan interfaces every 10 seconds
|
||||
}
|
||||
|
||||
""" % (cls._name, cls.routerid(node))
|
||||
|
||||
# Generate protocol specific configurations
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
cfg += s.generatebirdconfig(node)
|
||||
|
||||
return cfg
|
||||
|
||||
class BirdService(CoreService):
|
||||
''' Parent class for Bird services. Defines properties and methods
|
||||
common to Bird's routing daemons.
|
||||
'''
|
||||
|
||||
_name = "BirdDaemon"
|
||||
_group = "BIRD"
|
||||
_depends = ("bird", )
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the bird service."
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatebirdifcconfig(cls, node):
|
||||
''' Use only bare interfaces descriptions in generated protocol
|
||||
configurations. This has the slight advantage of being the same
|
||||
everywhere.
|
||||
'''
|
||||
cfg = ""
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True: continue
|
||||
cfg += ' interface "%s";\n'% ifc.name
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdBgp(BirdService):
|
||||
'''BGP BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_BGP"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
return """
|
||||
/* This is a sample config that should be customized with appropriate AS numbers
|
||||
* and peers; add one section like this for each neighbor */
|
||||
|
||||
protocol bgp {
|
||||
local as 65000; # Customize your AS number
|
||||
neighbor 198.51.100.130 as 64496; # Customize neighbor AS number && IP
|
||||
export filter { # We use non-trivial export rules
|
||||
# This is an example. You should advertise only *your routes*
|
||||
if (source = RTS_DEVICE) || (source = RTS_OSPF) then {
|
||||
# bgp_community.add((65000,64501)); # Assign our community
|
||||
accept;
|
||||
}
|
||||
reject;
|
||||
};
|
||||
import all;
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
class BirdOspf(BirdService):
|
||||
'''OSPF BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = 'protocol ospf {\n'
|
||||
cfg += ' export filter {\n'
|
||||
cfg += ' if source = RTS_BGP then {\n'
|
||||
cfg += ' ospf_metric1 = 100;\n'
|
||||
cfg += ' accept;\n'
|
||||
cfg += ' }\n'
|
||||
cfg += ' accept;\n'
|
||||
cfg += ' };\n'
|
||||
cfg += ' area 0.0.0.0 {\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' };\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdRadv(BirdService):
|
||||
'''RADV BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_RADV"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
|
||||
cfg += 'protocol radv {\n'
|
||||
cfg += ' # auto configuration on all interfaces\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' # Advertise DNS\n'
|
||||
cfg += ' rdnss {\n'
|
||||
cfg += '# lifetime mult 10;\n'
|
||||
cfg += '# lifetime mult 10;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::11;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::11;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::12;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::12;\n'
|
||||
cfg += ' };\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdRip(BirdService):
|
||||
'''RIP BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = 'protocol rip {\n'
|
||||
cfg += ' period 10;\n'
|
||||
cfg += ' garbage time 60;\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' honor neighbor;\n'
|
||||
cfg += ' authentication none;\n'
|
||||
cfg += ' import all;\n'
|
||||
cfg += ' export all;\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdStatic(BirdService):
|
||||
'''Static Bird Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_static"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
|
||||
cfg += 'protocol static {\n'
|
||||
cfg += '# route 0.0.0.0/0 via 198.51.100.130; # Default route. Do NOT advertise on BGP !\n'
|
||||
cfg += '# route 203.0.113.0/24 reject; # Sink route\n'
|
||||
cfg += '# route 10.2.0.0/24 via "arc0"; # Secondary network\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
# Register all protocols
|
||||
addservice(Bird)
|
||||
addservice(BirdOspf)
|
||||
addservice(BirdBgp)
|
||||
#addservice(BirdRadv) # untested
|
||||
addservice(BirdRip)
|
||||
addservice(BirdStatic)
|
191
daemon/core/services/nrl.py
Normal file
191
daemon/core/services/nrl.py
Normal file
|
@ -0,0 +1,191 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
nrl.py: defines services provided by NRL protolib tools hosted here:
|
||||
http://cs.itd.nrl.navy.mil/products/
|
||||
'''
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
|
||||
class NrlService(CoreService):
|
||||
''' Parent class for NRL services. Defines properties and methods
|
||||
common to NRL's routing daemons.
|
||||
'''
|
||||
_name = "NRLDaemon"
|
||||
_group = "Routing"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 45
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def firstipv4prefix(node, prefixlen=24):
|
||||
''' Similar to QuaggaService.routerid(). Helper to return the first IPv4
|
||||
prefix of a node, using the supplied prefix length. This ignores the
|
||||
interface's prefix length, so e.g. '/32' can turn into '/24'.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
addr = a.split('/')[0]
|
||||
pre = IPv4Prefix("%s/%s" % (addr, prefixlen))
|
||||
return str(pre)
|
||||
#raise ValueError, "no IPv4 address found"
|
||||
return "0.0.0.0/%s" % prefixlen
|
||||
|
||||
class NrlNhdp(NrlService):
|
||||
''' NeighborHood Discovery Protocol for MANET networks.
|
||||
'''
|
||||
_name = "NHDP"
|
||||
_startup = ("nrlnhdp", )
|
||||
_shutdown = ("killall nrlnhdp", )
|
||||
_validate = ("pidof nrlnhdp", )
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
cmd = cls._startup[0]
|
||||
cmd += " -l /var/log/nrlnhdp.log"
|
||||
cmd += " -rpipe %s_nhdp" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames:
|
||||
cmd += " -flooding ecds"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
cmd += " -i ".join(interfacenames)
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlNhdp)
|
||||
|
||||
class NrlSmf(NrlService):
|
||||
''' Simplified Multicast Forwarding for MANET networks.
|
||||
'''
|
||||
_name = "SMF"
|
||||
_startup = ("nrlsmf", )
|
||||
_shutdown = ("killall nrlsmf", )
|
||||
_validate = ("pidof nrlsmf", )
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
cmd = cls._startup[0]
|
||||
cmd += " instance %s_smf" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
if len(netifs) == 0:
|
||||
return ()
|
||||
|
||||
if "arouted" in servicenames:
|
||||
cmd += " tap %s_tap" % (node.name,)
|
||||
cmd += " unicast %s" % cls.firstipv4prefix(node, 24)
|
||||
cmd += " push lo,%s resequence on" % netifs[0].name
|
||||
if len(netifs) > 0:
|
||||
if "NHDP" in servicenames:
|
||||
cmd += " ecds "
|
||||
elif "OLSR" in servicenames:
|
||||
cmd += " smpr "
|
||||
else:
|
||||
cmd += " cf "
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += ",".join(interfacenames)
|
||||
|
||||
cmd += " hash MD5"
|
||||
cmd += " log /var/log/nrlsmf.log"
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlSmf)
|
||||
|
||||
class NrlOlsr(NrlService):
|
||||
''' Optimized Link State Routing protocol for MANET networks.
|
||||
'''
|
||||
_name = "OLSR"
|
||||
_startup = ("nrlolsrd", )
|
||||
_shutdown = ("killall nrlolsrd", )
|
||||
_validate = ("pidof nrlolsrd", )
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
cmd = cls._startup[0]
|
||||
# are multiple interfaces supported? No.
|
||||
netifs = list(node.netifs())
|
||||
if len(netifs) > 0:
|
||||
ifc = netifs[0]
|
||||
cmd += " -i %s" % ifc.name
|
||||
cmd += " -l /var/log/nrlolsrd.log"
|
||||
cmd += " -rpipe %s_olsr" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames and not "NHDP" in servicenames:
|
||||
cmd += " -flooding s-mpr"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
if "zebra" in servicenames:
|
||||
cmd += " -z"
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlOlsr)
|
||||
|
||||
class Arouted(NrlService):
|
||||
''' Adaptive Routing
|
||||
'''
|
||||
_name = "arouted"
|
||||
_configs = ("startarouted.sh", )
|
||||
_startindex = NrlService._startindex + 10
|
||||
_startup = ("sh startarouted.sh", )
|
||||
_shutdown = ("pkill arouted", )
|
||||
_validate = ("pidof arouted", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
'''
|
||||
cfg = """
|
||||
#!/bin/sh
|
||||
for f in "/tmp/%s_smf"; do
|
||||
count=1
|
||||
until [ -e "$f" ]; do
|
||||
if [ $count -eq 10 ]; then
|
||||
echo "ERROR: nrlmsf pipe not found: $f" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.1
|
||||
count=$(($count + 1))
|
||||
done
|
||||
done
|
||||
|
||||
""" % (node.name)
|
||||
cfg += "ip route add %s dev lo\n" % cls.firstipv4prefix(node, 24)
|
||||
cfg += "arouted instance %s_smf tap %s_tap" % (node.name, node.name)
|
||||
cfg += " stability 10" # seconds to consider a new route valid
|
||||
cfg += " 2>&1 > /var/log/arouted.log &\n\n"
|
||||
return cfg
|
||||
|
||||
# experimental
|
||||
#addservice(Arouted)
|
589
daemon/core/services/quagga.py
Normal file
589
daemon/core/services/quagga.py
Normal file
|
@ -0,0 +1,589 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
quagga.py: defines routing services provided by Quagga.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns import nodes
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from core.bsd import nodes
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, isIPv4Address, isIPv6Address
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
|
||||
QUAGGA_USER="root"
|
||||
QUAGGA_GROUP="root"
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
QUAGGA_GROUP="wheel"
|
||||
|
||||
class Zebra(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "zebra"
|
||||
_group = "Quagga"
|
||||
_depends = ("vtysh", )
|
||||
_dirs = ("/usr/local/etc/quagga", "/var/run/quagga")
|
||||
_configs = ("/usr/local/etc/quagga/Quagga.conf",
|
||||
"quaggaboot.sh","/usr/local/etc/quagga/vtysh.conf")
|
||||
_startindex = 35
|
||||
_startup = ("sh quaggaboot.sh zebra",)
|
||||
_shutdown = ("killall zebra", )
|
||||
_validate = ("pidof zebra", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateQuaggaConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateQuaggaBoot(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVtyshConf(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateVtyshConf(cls, node, services):
|
||||
''' Returns configuration file text.
|
||||
'''
|
||||
return "service integrated-vtysh-config"
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaConf(cls, node, services):
|
||||
''' Returns configuration file text. Other services that depend on zebra
|
||||
will have generatequaggaifcconfig() and generatequaggaconfig()
|
||||
hooks that are invoked here.
|
||||
'''
|
||||
# we could verify here that filename == Quagga.conf
|
||||
cfg = ""
|
||||
for ifc in node.netifs():
|
||||
cfg += "interface %s\n" % ifc.name
|
||||
# include control interfaces in addressing but not routing daemons
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
continue
|
||||
cfgv4 = ""
|
||||
cfgv6 = ""
|
||||
want_ipv4 = False
|
||||
want_ipv6 = False
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
ifccfg = s.generatequaggaifcconfig(node, ifc)
|
||||
if s._ipv4_routing:
|
||||
want_ipv4 = True
|
||||
if s._ipv6_routing:
|
||||
want_ipv6 = True
|
||||
cfgv6 += ifccfg
|
||||
else:
|
||||
cfgv4 += ifccfg
|
||||
|
||||
if want_ipv4:
|
||||
ipv4list = filter(lambda x: isIPv4Address(x.split('/')[0]),
|
||||
ifc.addrlist)
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ipv4list))
|
||||
cfg += "\n"
|
||||
cfg += cfgv4
|
||||
if want_ipv6:
|
||||
ipv6list = filter(lambda x: isIPv6Address(x.split('/')[0]),
|
||||
ifc.addrlist)
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ipv6list))
|
||||
cfg += "\n"
|
||||
cfg += cfgv6
|
||||
cfg += "!\n"
|
||||
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
cfg += s.generatequaggaconfig(node)
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
''' helper for mapping IP addresses to zebra config statements
|
||||
'''
|
||||
if x.find(".") >= 0:
|
||||
return "ip address %s" % x
|
||||
elif x.find(":") >= 0:
|
||||
return "ipv6 address %s" % x
|
||||
else:
|
||||
raise Value, "invalid address: %s", x
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaBoot(cls, node, services):
|
||||
''' Generate a shell script used to boot the Quagga daemons.
|
||||
'''
|
||||
try:
|
||||
quagga_bin_search = node.session.cfg['quagga_bin_search']
|
||||
quagga_sbin_search = node.session.cfg['quagga_sbin_search']
|
||||
except KeyError:
|
||||
quagga_bin_search = '"/usr/local/bin /usr/bin /usr/lib/quagga"'
|
||||
quagga_sbin_search = '"/usr/local/sbin /usr/sbin /usr/lib/quagga"'
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by zebra service (quagga.py)
|
||||
QUAGGA_CONF=%s
|
||||
QUAGGA_SBIN_SEARCH=%s
|
||||
QUAGGA_BIN_SEARCH=%s
|
||||
QUAGGA_STATE_DIR=%s
|
||||
QUAGGA_USER=%s
|
||||
QUAGGA_GROUP=%s
|
||||
|
||||
searchforprog()
|
||||
{
|
||||
prog=$1
|
||||
searchpath=$@
|
||||
ret=
|
||||
for p in $searchpath; do
|
||||
if [ -x $p/$prog ]; then
|
||||
ret=$p
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo $ret
|
||||
}
|
||||
|
||||
confcheck()
|
||||
{
|
||||
CONF_DIR=`dirname $QUAGGA_CONF`
|
||||
# if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then
|
||||
ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf
|
||||
fi
|
||||
# if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then
|
||||
ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf
|
||||
fi
|
||||
}
|
||||
|
||||
waitforvtyfiles()
|
||||
{
|
||||
for f in "$@"; do
|
||||
count=1
|
||||
until [ -e $QUAGGA_STATE_DIR/$f ]; do
|
||||
if [ $count -eq 10 ]; then
|
||||
echo "ERROR: vty file not found: $QUAGGA_STATE_DIR/$f" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep 0.1
|
||||
count=$(($count + 1))
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
{
|
||||
QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH)
|
||||
if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon not found in search path:"
|
||||
echo " $QUAGGA_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$1" != "zebra" ]; then
|
||||
waitforvtyfiles zebra.vty
|
||||
fi
|
||||
|
||||
$QUAGGA_SBIN_DIR/$1 -u $QUAGGA_USER -g $QUAGGA_GROUP -d
|
||||
}
|
||||
|
||||
bootvtysh()
|
||||
{
|
||||
QUAGGA_BIN_DIR=$(searchforprog $1 $QUAGGA_BIN_SEARCH)
|
||||
if [ "z$QUAGGA_BIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon not found in search path:"
|
||||
echo " $QUAGGA_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
vtyfiles="zebra.vty"
|
||||
for r in rip ripng ospf6 ospf bgp babel; do
|
||||
if grep -q "^router \<${r}\>" $QUAGGA_CONF; then
|
||||
vtyfiles="$vtyfiles ${r}d.vty"
|
||||
fi
|
||||
done
|
||||
|
||||
# wait for Quagga daemon vty files to appear before invoking vtysh
|
||||
waitforvtyfiles $vtyfiles
|
||||
|
||||
$QUAGGA_BIN_DIR/vtysh -b
|
||||
}
|
||||
|
||||
confcheck
|
||||
if [ "x$1" = "x" ]; then
|
||||
echo "ERROR: missing the name of the Quagga daemon to boot"
|
||||
exit 1
|
||||
elif [ "$1" = "vtysh" ]; then
|
||||
bootvtysh $1
|
||||
else
|
||||
bootdaemon $1
|
||||
fi
|
||||
""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \
|
||||
QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP)
|
||||
|
||||
addservice(Zebra)
|
||||
|
||||
class QuaggaService(CoreService):
|
||||
''' Parent class for Quagga services. Defines properties and methods
|
||||
common to Quagga's routing daemons.
|
||||
'''
|
||||
_name = "QuaggaDaemon"
|
||||
_group = "Quagga"
|
||||
_depends = ("zebra", )
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the Zebra service."
|
||||
|
||||
_ipv4_routing = False
|
||||
_ipv6_routing = False
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a .split('/') [0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@staticmethod
|
||||
def rj45check(ifc):
|
||||
''' Helper to detect whether interface is connected an external RJ45
|
||||
link.
|
||||
'''
|
||||
if ifc.net:
|
||||
for peerifc in ifc.net.netifs():
|
||||
if peerifc == ifc:
|
||||
continue
|
||||
if isinstance(peerifc, nodes.RJ45Node):
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
return ""
|
||||
|
||||
|
||||
|
||||
class Ospfv2(QuaggaService):
|
||||
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
_name = "OSPFv2"
|
||||
_startup = ("sh quaggaboot.sh ospfd",)
|
||||
_shutdown = ("killall ospfd", )
|
||||
_validate = ("pidof ospfd", )
|
||||
_ipv4_routing = True
|
||||
|
||||
@staticmethod
|
||||
def mtucheck(ifc):
|
||||
''' Helper to detect MTU mismatch and add the appropriate OSPF
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
'''
|
||||
if ifc.mtu != 1500:
|
||||
# a workaround for PhysicalNode GreTap, which has no knowledge of
|
||||
# the other nodes/nets
|
||||
return " ip ospf mtu-ignore\n"
|
||||
if not ifc.net:
|
||||
return ""
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu != ifc.mtu:
|
||||
return " ip ospf mtu-ignore\n"
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def ptpcheck(ifc):
|
||||
''' Helper to detect whether interface is connected to a notional
|
||||
point-to-point link.
|
||||
'''
|
||||
if isinstance(ifc.net, nodes.PtpNet):
|
||||
return " ip ospf network point-to-point\n"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router ospf\n"
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " router-id %s\n" % rtrid
|
||||
# network 10.0.0.0/24 area 0
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
net = IPv4Prefix(a)
|
||||
cfg += " network %s area 0\n" % net
|
||||
cfg += "!\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
#cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
#if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
#cfg += cls.ptpcheck(ifc)
|
||||
|
||||
#return cfg + """\
|
||||
# ip ospf hello-interval 2
|
||||
# ip ospf dead-interval 6
|
||||
# ip ospf retransmit-interval 5
|
||||
#"""
|
||||
|
||||
addservice(Ospfv2)
|
||||
|
||||
class Ospfv3(QuaggaService):
|
||||
''' The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
_name = "OSPFv3"
|
||||
_startup = ("sh quaggaboot.sh ospf6d",)
|
||||
_shutdown = ("killall ospf6d", )
|
||||
_validate = ("pidof ospf6d", )
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
|
||||
@staticmethod
|
||||
def minmtu(ifc):
|
||||
''' Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
'''
|
||||
mtu = ifc.mtu
|
||||
if not ifc.net:
|
||||
return mtu
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu < mtu:
|
||||
mtu = i.mtu
|
||||
return mtu
|
||||
|
||||
@classmethod
|
||||
def mtucheck(cls, ifc):
|
||||
''' Helper to detect MTU mismatch and add the appropriate OSPFv3
|
||||
ifmtu command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
'''
|
||||
minmtu = cls.minmtu(ifc)
|
||||
if minmtu < ifc.mtu:
|
||||
return " ipv6 ospf6 ifmtu %d\n" % minmtu
|
||||
else:
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def ptpcheck(ifc):
|
||||
''' Helper to detect whether interface is connected to a notional
|
||||
point-to-point link.
|
||||
'''
|
||||
if isinstance(ifc.net, nodes.PtpNet):
|
||||
return " ipv6 ospf6 network point-to-point\n"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router ospf6\n"
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " router-id %s\n" % rtrid
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += " interface %s area 0.0.0.0\n" % ifc.name
|
||||
cfg += "!\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
#cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
#if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
#cfg += cls.ptpcheck(ifc)
|
||||
|
||||
#return cfg + """\
|
||||
# ipv6 ospf6 hello-interval 2
|
||||
# ipv6 ospf6 dead-interval 6
|
||||
# ipv6 ospf6 retransmit-interval 5
|
||||
#"""
|
||||
|
||||
addservice(Ospfv3)
|
||||
|
||||
class Ospfv3mdr(Ospfv3):
|
||||
''' The OSPFv3 MANET Designated Router (MDR) service provides IPv6
|
||||
routing for wireless networks. It does not build its own
|
||||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
_name = "OSPFv3MDR"
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
cfg = cls.mtucheck(ifc)
|
||||
|
||||
return cfg + """\
|
||||
ipv6 ospf6 instance-id 65
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network manet-designated-router
|
||||
ipv6 ospf6 diffhellos
|
||||
ipv6 ospf6 adjacencyconnectivity uniconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
"""
|
||||
|
||||
addservice(Ospfv3mdr)
|
||||
|
||||
class Bgp(QuaggaService):
|
||||
'''' The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
'''
|
||||
_name = "BGP"
|
||||
_startup = ("sh quaggaboot.sh bgpd",)
|
||||
_shutdown = ("killall bgpd", )
|
||||
_validate = ("pidof bgpd", )
|
||||
_custom_needed = True
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "!\n! BGP configuration\n!\n"
|
||||
cfg += "! You should configure the AS number below,\n"
|
||||
cfg += "! along with this router's peers.\n!\n"
|
||||
cfg += "router bgp %s\n" % node.objid
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " bgp router-id %s\n" % rtrid
|
||||
cfg += " redistribute connected\n"
|
||||
cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n"
|
||||
return cfg
|
||||
|
||||
addservice(Bgp)
|
||||
|
||||
class Rip(QuaggaService):
|
||||
''' The RIP service provides IPv4 routing for wired networks.
|
||||
'''
|
||||
_name = "RIP"
|
||||
_startup = ("sh quaggaboot.sh ripd",)
|
||||
_shutdown = ("killall ripd", )
|
||||
_validate = ("pidof ripd", )
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = """\
|
||||
router rip
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf
|
||||
network 0.0.0.0/0
|
||||
!
|
||||
"""
|
||||
return cfg
|
||||
|
||||
addservice(Rip)
|
||||
|
||||
class Ripng(QuaggaService):
|
||||
''' The RIP NG service provides IPv6 routing for wired networks.
|
||||
'''
|
||||
_name = "RIPNG"
|
||||
_startup = ("sh quaggaboot.sh ripngd",)
|
||||
_shutdown = ("killall ripngd", )
|
||||
_validate = ("pidof ripngd", )
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = """\
|
||||
router ripng
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf6
|
||||
network ::/0
|
||||
!
|
||||
"""
|
||||
return cfg
|
||||
|
||||
addservice(Ripng)
|
||||
|
||||
class Babel(QuaggaService):
|
||||
''' The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
'''
|
||||
_name = "Babel"
|
||||
_startup = ("sh quaggaboot.sh babeld",)
|
||||
_shutdown = ("killall babeld", )
|
||||
_validate = ("pidof babeld", )
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router babel\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += " network %s\n" % ifc.name
|
||||
cfg += " redistribute static\n redistribute connected\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
type = "wired"
|
||||
if ifc.net and ifc.net.linktype == coreapi.CORE_LINK_WIRELESS:
|
||||
return " babel wireless\n no babel split-horizon\n"
|
||||
else:
|
||||
return " babel wired\n babel split-horizon\n"
|
||||
|
||||
addservice(Babel)
|
||||
|
||||
|
||||
class Vtysh(CoreService):
|
||||
''' Simple service to run vtysh -b (boot) after all Quagga daemons have
|
||||
started.
|
||||
'''
|
||||
_name = "vtysh"
|
||||
_group = "Quagga"
|
||||
_startindex = 45
|
||||
_startup = ("sh quaggaboot.sh vtysh",)
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
addservice(Vtysh)
|
||||
|
||||
|
129
daemon/core/services/security.py
Normal file
129
daemon/core/services/security.py
Normal file
|
@ -0,0 +1,129 @@
|
|||
#
|
||||
# CORE - define security services : vpnclient, vpnserver, ipsec and firewall
|
||||
#
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
'''
|
||||
security.py: defines security services (vpnclient, vpnserver, ipsec and
|
||||
firewall)
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.constants import *
|
||||
|
||||
class VPNClient(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "VPNClient"
|
||||
_group = "Security"
|
||||
_configs = ('vpnclient.sh', )
|
||||
_startindex = 60
|
||||
_startup = ('sh vpnclient.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn", )
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the client.conf and vpnclient.sh file contents to
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom VPN Client configuration for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleVPNClient" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening VPN client configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
# this line is required to add the above class to the list of available services
|
||||
addservice(VPNClient)
|
||||
|
||||
class VPNServer(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "VPNServer"
|
||||
_group = "Security"
|
||||
_configs = ('vpnserver.sh', )
|
||||
_startindex = 50
|
||||
_startup = ('sh vpnserver.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn", )
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the sample server.conf and vpnserver.sh file contents to
|
||||
GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom VPN Server Configuration for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleVPNServer" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening VPN server configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
addservice(VPNServer)
|
||||
|
||||
class IPsec(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "IPsec"
|
||||
_group = "Security"
|
||||
_configs = ('ipsec.sh', )
|
||||
_startindex = 60
|
||||
_startup = ('sh ipsec.sh',)
|
||||
_shutdown = ("killall racoon",)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the ipsec.conf and racoon.conf file contents to
|
||||
GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# set up static tunnel mode security assocation for service "
|
||||
cfg += "(security.py)\n"
|
||||
fname = "%s/examples/services/sampleIPsec" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening IPsec configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
addservice(IPsec)
|
||||
|
||||
class Firewall(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "Firewall"
|
||||
_group = "Security"
|
||||
_configs = ('firewall.sh', )
|
||||
_startindex = 20
|
||||
_startup = ('sh firewall.sh',)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the firewall rule examples to GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom node firewall rules for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleFirewall" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening Firewall configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
addservice(Firewall)
|
||||
|
189
daemon/core/services/ucarp.py
Executable file
189
daemon/core/services/ucarp.py
Executable file
|
@ -0,0 +1,189 @@
|
|||
#
|
||||
# CORE configuration for UCARP
|
||||
# Copyright (c) 2012 Jonathan deBoer
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
#
|
||||
# author: Jonathan deBoer <jdccdevel@gmail.com>
|
||||
#
|
||||
'''
|
||||
ucarp.py: defines high-availability IP address controlled by ucarp
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
|
||||
UCARP_ETC="/usr/local/etc/ucarp"
|
||||
|
||||
class Ucarp(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "ucarp"
|
||||
_group = "Utility"
|
||||
_depends = ( )
|
||||
_dirs = (UCARP_ETC, )
|
||||
_configs = (UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",)
|
||||
_startindex = 65
|
||||
_startup = ("sh ucarpboot.sh",)
|
||||
_shutdown = ("killall ucarp", )
|
||||
_validate = ("pidof ucarp", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the default file contents
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateUcarpConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateVipUp(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVipDown(node, services)
|
||||
elif filename == cls._configs[3]:
|
||||
return cls.generateUcarpBoot(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateUcarpConf(cls, node, services):
|
||||
''' Returns configuration file text.
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of UCARP executable
|
||||
UCARP_EXEC=%s
|
||||
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
# Logging Facility
|
||||
FACILITY=daemon
|
||||
|
||||
# Instance ID
|
||||
# Any number from 1 to 255
|
||||
INSTANCE_ID=1
|
||||
|
||||
# Password
|
||||
# Master and Backup(s) need to be the same
|
||||
PASSWORD="changeme"
|
||||
|
||||
# The failover application address
|
||||
VIRTUAL_ADDRESS=127.0.0.254
|
||||
VIRTUAL_NET=8
|
||||
|
||||
# Interface for IP Address
|
||||
INTERFACE=lo
|
||||
|
||||
# Maintanence address of the local machine
|
||||
SOURCE_ADDRESS=127.0.0.1
|
||||
|
||||
# The ratio number to be considered before marking the node as dead
|
||||
DEAD_RATIO=3
|
||||
|
||||
# UCARP base, lower number will be preferred master
|
||||
# set to same to have master stay as long as possible
|
||||
UCARP_BASE=1
|
||||
SKEW=0
|
||||
|
||||
# UCARP options
|
||||
# -z run shutdown script on exit
|
||||
# -P force preferred master
|
||||
# -n don't run down script at start up when we are backup
|
||||
# -M use broadcast instead of multicast
|
||||
# -S ignore interface state
|
||||
OPTIONS="-z -n -M"
|
||||
|
||||
# Send extra parameter to down and up scripts
|
||||
#XPARAM="-x <enter param here>"
|
||||
XPARAM="-x ${VIRTUAL_NET}"
|
||||
|
||||
# The start and stop scripts
|
||||
START_SCRIPT=${UCARP_CFGDIR}/default-up.sh
|
||||
STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh
|
||||
|
||||
# These line should not need to be touched
|
||||
UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM"
|
||||
|
||||
${UCARP_EXEC} -B ${UCARP_OPTS}
|
||||
""" % (ucarp_bin, UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateUcarpBoot(cls, node, services):
|
||||
''' Generate a shell script used to boot the Ucarp daemons.
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
chmod a+x ${UCARP_CFGDIR}/*.sh
|
||||
|
||||
# Start the default ucarp daemon configuration
|
||||
${UCARP_CFGDIR}/default.sh
|
||||
|
||||
""" % (UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateVipUp(cls, node, services):
|
||||
''' Generate a shell script used to start the virtual ip
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-up.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr add ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generateVipDown(cls, node, services):
|
||||
''' Generate a shell script used to stop the virtual ip
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-down.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr del ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
addservice(Ucarp)
|
||||
|
676
daemon/core/services/utility.py
Normal file
676
daemon/core/services/utility.py
Normal file
|
@ -0,0 +1,676 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
utility.py: defines miscellaneous utility services.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
class UtilService(CoreService):
|
||||
''' Parent class for utility services.
|
||||
'''
|
||||
_name = "UtilityProcess"
|
||||
_group = "Utility"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 80
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
class IPForwardService(UtilService):
|
||||
_name = "IPForward"
|
||||
_configs = ("ipforward.sh", )
|
||||
_startindex = 5
|
||||
_startup = ("sh ipforward.sh", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
if os.uname()[0] == "Linux":
|
||||
return cls.generateconfiglinux(node, filename, services)
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
return cls.generateconfigbsd(node, filename, services)
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
|
||||
@classmethod
|
||||
def generateconfiglinux(cls, node, filename, services):
|
||||
cfg = """\
|
||||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
%s -w net.ipv4.conf.all.forwarding=1
|
||||
%s -w net.ipv6.conf.all.forwarding=1
|
||||
%s -w net.ipv4.conf.all.send_redirects=0
|
||||
%s -w net.ipv4.conf.all.rp_filter=0
|
||||
%s -w net.ipv4.conf.default.rp_filter=0
|
||||
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
|
||||
for ifc in node.netifs():
|
||||
name = sysctldevname(ifc.name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \
|
||||
(SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (SYSCTL_BIN, name)
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generateconfigbsd(cls, node, filename, services):
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
%s -w net.inet.ip.forwarding=1
|
||||
%s -w net.inet6.ip6.forwarding=1
|
||||
%s -w net.inet.icmp.bmcastecho=1
|
||||
%s -w net.inet.icmp.icmplim=0
|
||||
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
|
||||
|
||||
addservice(IPForwardService)
|
||||
|
||||
class DefaultRouteService(UtilService):
|
||||
_name = "DefaultRoute"
|
||||
_configs = ("defaultroute.sh",)
|
||||
_startup = ("sh defaultroute.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultRoute service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
fam = "inet6 ::"
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
fam = "inet 0.0.0.0"
|
||||
if net.maxaddr() == net.minaddr():
|
||||
return ""
|
||||
else:
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "ip route add default via"
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
rtcmd = "route add -%s" % fam
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
return "%s %s" % (rtcmd, net.minaddr())
|
||||
|
||||
addservice(DefaultRouteService)
|
||||
|
||||
class DefaultMulticastRouteService(UtilService):
|
||||
_name = "DefaultMulticastRoute"
|
||||
_configs = ("defaultmroute.sh",)
|
||||
_startup = ("sh defaultmroute.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n"
|
||||
cfg += "# the first interface is chosen below; please change it "
|
||||
cfg += "as needed\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "ip route add 224.0.0.0/4 dev"
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
rtcmd = "route add 224.0.0.0/4 -iface"
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
cfg += "%s %s\n" % (rtcmd, ifc.name)
|
||||
cfg += "\n"
|
||||
break
|
||||
return cfg
|
||||
|
||||
addservice(DefaultMulticastRouteService)
|
||||
|
||||
class StaticRouteService(UtilService):
|
||||
_name = "StaticRoute"
|
||||
_configs = ("staticroute.sh",)
|
||||
_startup = ("sh staticroute.sh",)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n"
|
||||
cfg += "# NOTE: this service must be customized to be of any use\n"
|
||||
cfg += "# Below are samples that you can uncomment and edit.\n#\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.routestr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def routestr(x):
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
fam = "inet6"
|
||||
dst = "3ffe:4::/64"
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
fam = "inet"
|
||||
dst = "10.9.8.0/24"
|
||||
if net.maxaddr() == net.minaddr():
|
||||
return ""
|
||||
else:
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "#/sbin/ip route add %s via" % dst
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
rtcmd = "#/sbin/route add -%s %s" % (fam, dst)
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
return "%s %s" % (rtcmd, net.minaddr())
|
||||
|
||||
addservice(StaticRouteService)
|
||||
|
||||
class SshService(UtilService):
|
||||
_name = "SSH"
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
_configs = ("startsshd.sh", "sshd_config",)
|
||||
_dirs = ()
|
||||
else:
|
||||
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
|
||||
_dirs = ("/etc/ssh", "/var/run/sshd",)
|
||||
_startup = ("sh startsshd.sh",)
|
||||
_shutdown = ("killall sshd",)
|
||||
_validate = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Use a startup script for launching sshd in order to wait for host
|
||||
key generation.
|
||||
'''
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
sshcfgdir = node.nodedir
|
||||
sshstatedir = node.nodedir
|
||||
sshlibdir = "/usr/libexec"
|
||||
else:
|
||||
sshcfgdir = cls._dirs[0]
|
||||
sshstatedir = cls._dirs[1]
|
||||
sshlibdir = "/usr/lib/openssh"
|
||||
if filename == "startsshd.sh":
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by SSH service (utility.py)
|
||||
ssh-keygen -q -t rsa -N "" -f %s/ssh_host_rsa_key
|
||||
chmod 655 %s
|
||||
# wait until RSA host key has been generated to launch sshd
|
||||
/usr/sbin/sshd -f %s/sshd_config
|
||||
""" % (sshcfgdir, sshstatedir, sshcfgdir)
|
||||
else:
|
||||
return """\
|
||||
# auto-generated by SSH service (utility.py)
|
||||
Port 22
|
||||
Protocol 2
|
||||
HostKey %s/ssh_host_rsa_key
|
||||
UsePrivilegeSeparation yes
|
||||
PidFile %s/sshd.pid
|
||||
|
||||
KeyRegenerationInterval 3600
|
||||
ServerKeyBits 768
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
LoginGraceTime 120
|
||||
PermitRootLogin yes
|
||||
StrictModes yes
|
||||
|
||||
RSAAuthentication yes
|
||||
PubkeyAuthentication yes
|
||||
|
||||
IgnoreRhosts yes
|
||||
RhostsRSAAuthentication no
|
||||
HostbasedAuthentication no
|
||||
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp %s/sftp-server
|
||||
UsePAM yes
|
||||
UseDNS no
|
||||
""" % (sshcfgdir, sshstatedir, sshlibdir)
|
||||
|
||||
addservice(SshService)
|
||||
|
||||
class DhcpService(UtilService):
|
||||
_name = "DHCP"
|
||||
_configs = ("/etc/dhcp/dhcpd.conf",)
|
||||
_dirs = ("/etc/dhcp",)
|
||||
_startup = ("dhcpd",)
|
||||
_shutdown = ("killall dhcpd",)
|
||||
_validate = ("pidof dhcpd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a dhcpd config file using the network address of
|
||||
each interface.
|
||||
'''
|
||||
cfg = """\
|
||||
# auto-generated by DHCP service (utility.py)
|
||||
# NOTE: move these option lines into the desired pool { } block(s) below
|
||||
#option domain-name "test.com";
|
||||
#option domain-name-servers 10.0.0.1;
|
||||
#option routers 10.0.0.1;
|
||||
|
||||
log-facility local6;
|
||||
|
||||
default-lease-time 600;
|
||||
max-lease-time 7200;
|
||||
|
||||
ddns-update-style none;
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv4 prefix string
|
||||
for inclusion in the dhcpd3 config file.
|
||||
'''
|
||||
if x.find(":") >= 0:
|
||||
return ""
|
||||
else:
|
||||
addr = x.split("/")[0]
|
||||
net = IPv4Prefix(x)
|
||||
# divide the address space in half
|
||||
rangelow = net.addr(net.numaddr() / 2)
|
||||
rangehigh = net.maxaddr()
|
||||
return """
|
||||
subnet %s netmask %s {
|
||||
pool {
|
||||
range %s %s;
|
||||
default-lease-time 600;
|
||||
option routers %s;
|
||||
}
|
||||
}
|
||||
""" % (net.prefixstr(), net.netmaskstr(), rangelow, rangehigh, addr)
|
||||
|
||||
addservice(DhcpService)
|
||||
|
||||
class DhcpClientService(UtilService):
|
||||
''' Use a DHCP client for all interfaces for addressing.
|
||||
'''
|
||||
_name = "DHCPClient"
|
||||
_configs = ("startdhcpclient.sh",)
|
||||
_startup = ("sh startdhcpclient.sh",)
|
||||
_shutdown = ("killall dhclient",)
|
||||
_validate = ("pidof dhclient",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a script to invoke dhclient on all interfaces.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DHCPClient service (utility.py)\n"
|
||||
cfg += "# uncomment this mkdir line and symlink line to enable client-"
|
||||
cfg += "side DNS\n# resolution based on the DHCP server response.\n"
|
||||
cfg += "#mkdir -p /var/run/resolvconf/interface\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name
|
||||
cfg += " /var/run/resolvconf/resolv.conf\n"
|
||||
cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name
|
||||
cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name)
|
||||
return cfg
|
||||
|
||||
addservice(DhcpClientService)
|
||||
|
||||
class FtpService(UtilService):
|
||||
''' Start a vsftpd server.
|
||||
'''
|
||||
_name = "FTP"
|
||||
_configs = ("vsftpd.conf",)
|
||||
_dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
|
||||
_startup = ("vsftpd ./vsftpd.conf",)
|
||||
_shutdown = ("killall vsftpd",)
|
||||
_validate = ("pidof vsftpd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a vsftpd.conf configuration file.
|
||||
'''
|
||||
return """\
|
||||
# vsftpd.conf auto-generated by FTP service (utility.py)
|
||||
listen=YES
|
||||
anonymous_enable=YES
|
||||
local_enable=YES
|
||||
dirmessage_enable=YES
|
||||
use_localtime=YES
|
||||
xferlog_enable=YES
|
||||
connect_from_port_20=YES
|
||||
xferlog_file=/var/log/vsftpd.log
|
||||
ftpd_banner=Welcome to the CORE FTP service
|
||||
secure_chroot_dir=/var/run/vsftpd/empty
|
||||
anon_root=/var/ftp
|
||||
"""
|
||||
|
||||
addservice(FtpService)
|
||||
|
||||
class HttpService(UtilService):
|
||||
''' Start an apache server.
|
||||
'''
|
||||
_name = "HTTP"
|
||||
_configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
|
||||
"/var/www/index.html",)
|
||||
_dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
|
||||
"/var/lock/apache2", "/var/www", )
|
||||
_startup = ("apache2ctl start",)
|
||||
_shutdown = ("apache2ctl stop",)
|
||||
_validate = ("pidof apache2",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate an apache2.conf configuration file.
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateapache2conf(node, filename, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateenvvars(node, filename, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generatehtml(node, filename, services)
|
||||
else:
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generateapache2conf(cls, node, filename, services):
|
||||
return """\
|
||||
# apache2.conf generated by utility.py:HttpService
|
||||
LockFile ${APACHE_LOCK_DIR}/accept.lock
|
||||
PidFile ${APACHE_PID_FILE}
|
||||
Timeout 300
|
||||
KeepAlive On
|
||||
MaxKeepAliveRequests 100
|
||||
KeepAliveTimeout 5
|
||||
|
||||
<IfModule mpm_prefork_module>
|
||||
StartServers 5
|
||||
MinSpareServers 5
|
||||
MaxSpareServers 10
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_worker_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_event_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
User ${APACHE_RUN_USER}
|
||||
Group ${APACHE_RUN_GROUP}
|
||||
|
||||
AccessFileName .htaccess
|
||||
|
||||
<Files ~ "^\.ht">
|
||||
Order allow,deny
|
||||
Deny from all
|
||||
Satisfy all
|
||||
</Files>
|
||||
|
||||
DefaultType None
|
||||
|
||||
HostnameLookups Off
|
||||
|
||||
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||
LogLevel warn
|
||||
|
||||
#Include mods-enabled/*.load
|
||||
#Include mods-enabled/*.conf
|
||||
LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so
|
||||
LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so
|
||||
LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so
|
||||
LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so
|
||||
LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so
|
||||
LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so
|
||||
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
|
||||
LoadModule env_module /usr/lib/apache2/modules/mod_env.so
|
||||
|
||||
NameVirtualHost *:80
|
||||
Listen 80
|
||||
|
||||
<IfModule mod_ssl.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
<IfModule mod_gnutls.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
|
||||
LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O" common
|
||||
LogFormat "%{Referer}i -> %U" referer
|
||||
LogFormat "%{User-agent}i" agent
|
||||
|
||||
ServerTokens OS
|
||||
ServerSignature On
|
||||
TraceEnable Off
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerAdmin webmaster@localhost
|
||||
DocumentRoot /var/www
|
||||
<Directory />
|
||||
Options FollowSymLinks
|
||||
AllowOverride None
|
||||
</Directory>
|
||||
<Directory /var/www/>
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Order allow,deny
|
||||
allow from all
|
||||
</Directory>
|
||||
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||
LogLevel warn
|
||||
CustomLog ${APACHE_LOG_DIR}/access.log combined
|
||||
</VirtualHost>
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generateenvvars(cls, node, filename, services):
|
||||
return """\
|
||||
# this file is used by apache2ctl - generated by utility.py:HttpService
|
||||
# these settings come from a default Ubuntu apache2 installation
|
||||
export APACHE_RUN_USER=www-data
|
||||
export APACHE_RUN_GROUP=www-data
|
||||
export APACHE_PID_FILE=/var/run/apache2.pid
|
||||
export APACHE_RUN_DIR=/var/run/apache2
|
||||
export APACHE_LOCK_DIR=/var/lock/apache2
|
||||
export APACHE_LOG_DIR=/var/log/apache2
|
||||
export LANG=C
|
||||
export LANG
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generatehtml(cls, node, filename, services):
|
||||
body = """\
|
||||
<!-- generated by utility.py:HttpService -->
|
||||
<h1>%s web server</h1>
|
||||
<p>This is the default web page for this server.</p>
|
||||
<p>The web server software is running but no content has been added, yet.</p>
|
||||
""" % node.name
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
|
||||
return "<html><body>%s</body></html>" % body
|
||||
|
||||
addservice(HttpService)
|
||||
|
||||
class PcapService(UtilService):
|
||||
''' Pcap service for logging packets.
|
||||
'''
|
||||
_name = "pcap"
|
||||
_configs = ("pcap.sh", )
|
||||
_dirs = ()
|
||||
_startindex = 1
|
||||
_startup = ("sh pcap.sh start",)
|
||||
_shutdown = ("sh pcap.sh stop",)
|
||||
_validate = ("pidof tcpdump",)
|
||||
_meta = "logs network traffic to pcap packet capture files"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a startpcap.sh traffic logging script.
|
||||
'''
|
||||
cfg = """
|
||||
#!/bin/sh
|
||||
# set tcpdump options here (see 'man tcpdump' for help)
|
||||
# (-s snap length, -C limit pcap file length, -n disable name resolution)
|
||||
DUMPOPTS="-s 12288 -C 10 -n"
|
||||
|
||||
if [ "x$1" = "xstart" ]; then
|
||||
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
cfg += '# '
|
||||
redir = "< /dev/null"
|
||||
cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \
|
||||
(node.name, ifc.name, ifc.name, redir)
|
||||
cfg += """
|
||||
|
||||
elif [ "x$1" = "xstop" ]; then
|
||||
mkdir -p ${SESSION_DIR}/pcap
|
||||
mv *.pcap ${SESSION_DIR}/pcap
|
||||
fi;
|
||||
"""
|
||||
return cfg
|
||||
|
||||
addservice(PcapService)
|
||||
|
||||
class RadvdService(UtilService):
|
||||
_name = "radvd"
|
||||
_configs = ("/etc/radvd/radvd.conf",)
|
||||
_dirs = ("/etc/radvd",)
|
||||
_startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
|
||||
_shutdown = ("pkill radvd",)
|
||||
_validate = ("pidof radvd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a RADVD router advertisement daemon config file
|
||||
using the network address of each interface.
|
||||
'''
|
||||
cfg = "# auto-generated by RADVD service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
prefixes = map(cls.subnetentry, ifc.addrlist)
|
||||
if len(prefixes) < 1:
|
||||
continue
|
||||
cfg += """\
|
||||
interface %s
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
MinRtrAdvInterval 3;
|
||||
MaxRtrAdvInterval 10;
|
||||
AdvDefaultPreference low;
|
||||
AdvHomeAgentFlag off;
|
||||
""" % ifc.name
|
||||
for prefix in prefixes:
|
||||
if prefix == "":
|
||||
continue
|
||||
cfg += """\
|
||||
prefix %s
|
||||
{
|
||||
AdvOnLink on;
|
||||
AdvAutonomous on;
|
||||
AdvRouterAddr on;
|
||||
};
|
||||
""" % prefix
|
||||
cfg += "};\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv6 prefix string
|
||||
for inclusion in the RADVD config file.
|
||||
'''
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
return str(net)
|
||||
else:
|
||||
return ""
|
||||
|
||||
addservice(RadvdService)
|
||||
|
||||
class AtdService(UtilService):
|
||||
''' Atd service for scheduling at jobs
|
||||
'''
|
||||
_name = "atd"
|
||||
_configs = ("startatd.sh",)
|
||||
_dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
|
||||
_startup = ("sh startatd.sh", )
|
||||
_shutdown = ("pkill atd", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return """
|
||||
#!/bin/sh
|
||||
echo 00001 > /var/spool/cron/atjobs/.SEQ
|
||||
chown -R daemon /var/spool/cron/*
|
||||
chmod -R 700 /var/spool/cron/*
|
||||
atd
|
||||
"""
|
||||
|
||||
addservice(AtdService)
|
||||
|
||||
class UserDefinedService(UtilService):
|
||||
''' Dummy service allowing customization of anything.
|
||||
'''
|
||||
_name = "UserDefined"
|
||||
_startindex = 50
|
||||
_meta = "Customize this service to do anything upon startup."
|
||||
|
||||
addservice(UserDefinedService)
|
472
daemon/core/services/xorp.py
Normal file
472
daemon/core/services/xorp.py
Normal file
|
@ -0,0 +1,472 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
xorp.py: defines routing services provided by the XORP routing suite.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
class XorpRtrmgr(CoreService):
|
||||
''' XORP router manager service builds a config.boot file based on other
|
||||
enabled XORP services, and launches necessary daemons upon startup.
|
||||
'''
|
||||
_name = "xorp_rtrmgr"
|
||||
_group = "XORP"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/xorp",)
|
||||
_configs = ("/etc/xorp/config.boot",)
|
||||
_startindex = 35
|
||||
_startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (_configs[0], _name, _name),)
|
||||
_shutdown = ("killall xorp_rtrmgr", )
|
||||
_validate = ("pidof xorp_rtrmgr", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Returns config.boot configuration file text. Other services that
|
||||
depend on this will have generatexorpconfig() hooks that are
|
||||
invoked here. Filename currently ignored.
|
||||
'''
|
||||
cfg = "interfaces {\n"
|
||||
for ifc in node.netifs():
|
||||
cfg += " interface %s {\n" % ifc.name
|
||||
cfg += "\tvif %s {\n" % ifc.name
|
||||
cfg += "".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += cls.lladdrstr(ifc)
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n\n"
|
||||
|
||||
for s in services:
|
||||
try:
|
||||
s._depends.index(cls._name)
|
||||
cfg += s.generatexorpconfig(node)
|
||||
except ValueError:
|
||||
pass
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
''' helper for mapping IP addresses to XORP config statements
|
||||
'''
|
||||
try:
|
||||
(addr, plen) = x.split("/")
|
||||
except Exception:
|
||||
raise ValueError, "invalid address"
|
||||
cfg = "\t address %s {\n" % addr
|
||||
cfg += "\t\tprefix-length: %s\n" % plen
|
||||
cfg +="\t }\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def lladdrstr(ifc):
|
||||
''' helper for adding link-local address entries (required by OSPFv3)
|
||||
'''
|
||||
cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\tprefix-length: 64\n"
|
||||
cfg += "\t }\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRtrmgr)
|
||||
|
||||
class XorpService(CoreService):
|
||||
''' Parent class for XORP services. Defines properties and methods
|
||||
common to XORP's routing daemons.
|
||||
'''
|
||||
_name = "XorpDaemon"
|
||||
_group = "XORP"
|
||||
_depends = ("xorp_rtrmgr", )
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the xorp_rtrmgr service."
|
||||
|
||||
@staticmethod
|
||||
def fea(forwarding):
|
||||
''' Helper to add a forwarding engine entry to the config file.
|
||||
'''
|
||||
cfg = "fea {\n"
|
||||
cfg += " %s {\n" % forwarding
|
||||
cfg += "\tdisable:false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def mfea(forwarding, ifcs):
|
||||
''' Helper to add a multicast forwarding engine entry to the config file.
|
||||
'''
|
||||
names = []
|
||||
for ifc in ifcs:
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
names.append("register_vif")
|
||||
|
||||
cfg = "plumbing {\n"
|
||||
cfg += " %s {\n" % forwarding
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
cfg += "\t vif %s {\n" % name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def policyexportconnected():
|
||||
''' Helper to add a policy statement for exporting connected routes.
|
||||
'''
|
||||
cfg = "policy {\n"
|
||||
cfg += " policy-statement export-connected {\n"
|
||||
cfg += "\tterm 100 {\n"
|
||||
cfg += "\t from {\n"
|
||||
cfg += "\t\tprotocol: \"connected\"\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a.split('/')[0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
return ""
|
||||
|
||||
class XorpOspfv2(XorpService):
|
||||
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
'''
|
||||
_name = "XORP_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ospf4 {\n"
|
||||
cfg += "\trouter-id: %s\n" % rtrid
|
||||
cfg += "\tarea 0.0.0.0 {\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\t interface %s {\n" % ifc.name
|
||||
cfg += "\t\tvif %s {\n" % ifc.name
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
addr = a.split("/")[0]
|
||||
cfg += "\t\t address %s {\n" % addr
|
||||
cfg += "\t\t }\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOspfv2)
|
||||
|
||||
class XorpOspfv3(XorpService):
|
||||
''' The OSPFv3 service provides IPv6 routing. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
'''
|
||||
_name = "XORP_OSPFv3"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding6")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ospf6 0 { /* Instance ID 0 */\n"
|
||||
cfg += "\trouter-id: %s\n" % rtrid
|
||||
cfg += "\tarea 0.0.0.0 {\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\t interface %s {\n" % ifc.name
|
||||
cfg += "\t\tvif %s {\n" % ifc.name
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOspfv3)
|
||||
|
||||
class XorpBgp(XorpService):
|
||||
''' IPv4 inter-domain routing. AS numbers and peers must be customized.
|
||||
'''
|
||||
_name = "XORP_BGP"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = "/* This is a sample config that should be customized with\n"
|
||||
cfg += " appropriate AS numbers and peers */\n"
|
||||
cfg += cls.fea("unicast-forwarding4")
|
||||
cfg += cls.policyexportconnected()
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " bgp {\n"
|
||||
cfg += "\tbgp-id: %s\n" % rtrid
|
||||
cfg += "\tlocal-as: 65001 /* change this */\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
cfg += "\tpeer 10.0.1.1 { /* change this */\n"
|
||||
cfg += "\t local-ip: 10.0.1.1\n"
|
||||
cfg += "\t as: 65002\n"
|
||||
cfg += "\t next-hop: 10.0.0.2\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpBgp)
|
||||
|
||||
class XorpRip(XorpService):
|
||||
''' RIP IPv4 unicast routing.
|
||||
'''
|
||||
_name = "XORP_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
cfg += cls.policyexportconnected()
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " rip {\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
addr = a.split("/")[0]
|
||||
cfg += "\t\taddress %s {\n" % addr
|
||||
cfg += "\t\t disable: false\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRip)
|
||||
|
||||
class XorpRipng(XorpService):
|
||||
''' RIP NG IPv6 unicast routing.
|
||||
'''
|
||||
_name = "XORP_RIPNG"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding6")
|
||||
cfg += cls.policyexportconnected()
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ripng {\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
# for a in ifc.addrlist:
|
||||
# if a.find(":") < 0:
|
||||
# continue
|
||||
# addr = a.split("/")[0]
|
||||
# cfg += "\t\taddress %s {\n" % addr
|
||||
# cfg += "\t\t disable: false\n"
|
||||
# cfg += "\t\t}\n"
|
||||
cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\t disable: false\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRipng)
|
||||
|
||||
class XorpPimSm4(XorpService):
|
||||
''' PIM Sparse Mode IPv4 multicast routing.
|
||||
'''
|
||||
_name = "XORP_PIMSM4"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.mfea("mfea4", node.netifs())
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " igmp {\n"
|
||||
names = []
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " pimsm4 {\n"
|
||||
|
||||
names.append("register_vif")
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
cfg += "\t vif %s {\n" % name
|
||||
cfg += "\t\tdr-priority: 1\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\tbootstrap {\n"
|
||||
cfg += "\t cand-bsr {\n"
|
||||
cfg += "\t\tscope-zone 224.0.0.0/4 {\n"
|
||||
cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t cand-rp {\n"
|
||||
cfg += "\t\tgroup-prefix 224.0.0.0/4 {\n"
|
||||
cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " fib2mrib {\n"
|
||||
cfg += "\tdisable: false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpPimSm4)
|
||||
|
||||
class XorpPimSm6(XorpService):
|
||||
''' PIM Sparse Mode IPv6 multicast routing.
|
||||
'''
|
||||
_name = "XORP_PIMSM6"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.mfea("mfea6", node.netifs())
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " mld {\n"
|
||||
names = []
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " pimsm6 {\n"
|
||||
|
||||
names.append("register_vif")
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
cfg += "\t vif %s {\n" % name
|
||||
cfg += "\t\tdr-priority: 1\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\tbootstrap {\n"
|
||||
cfg += "\t cand-bsr {\n"
|
||||
cfg += "\t\tscope-zone ff00::/8 {\n"
|
||||
cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t cand-rp {\n"
|
||||
cfg += "\t\tgroup-prefix ff00::/8 {\n"
|
||||
cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " fib2mrib {\n"
|
||||
cfg += "\tdisable: false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpPimSm6)
|
||||
|
||||
class XorpOlsr(XorpService):
|
||||
''' OLSR IPv4 unicast MANET routing.
|
||||
'''
|
||||
_name = "XORP_OLSR"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " olsr4 {\n"
|
||||
cfg += "\tmain-address: %s\n" % rtrid
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
addr = a.split("/")[0]
|
||||
cfg += "\t\taddress %s {\n" % addr
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOlsr)
|
1029
daemon/core/session.py
Normal file
1029
daemon/core/session.py
Normal file
File diff suppressed because it is too large
Load diff
0
daemon/core/xen/__init__.py
Normal file
0
daemon/core/xen/__init__.py
Normal file
818
daemon/core/xen/xen.py
Normal file
818
daemon/core/xen/xen.py
Normal file
|
@ -0,0 +1,818 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
'''
|
||||
xen.py: implementation of the XenNode and XenVEth classes that support
|
||||
generating Xen domUs based on an ISO image and persistent configuration area
|
||||
'''
|
||||
|
||||
from core.netns.vnet import *
|
||||
from core.netns.vnode import LxcNode
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf
|
||||
from core.misc.ipaddr import *
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.netns.vif import TunTap
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
try:
|
||||
import parted
|
||||
except ImportError, e:
|
||||
#print "Failed to load parted Python module required by Xen support."
|
||||
#print "Error was:", e
|
||||
raise ImportError
|
||||
|
||||
import base64
|
||||
import crypt
|
||||
import subprocess
|
||||
try:
|
||||
import fsimage
|
||||
except ImportError, e:
|
||||
# fix for fsimage under Ubuntu
|
||||
sys.path.append("/usr/lib/xen-default/lib/python")
|
||||
try:
|
||||
import fsimage
|
||||
except ImportError, e:
|
||||
#print "Failed to load fsimage Python module required by Xen support."
|
||||
#print "Error was:", e
|
||||
raise ImportError
|
||||
|
||||
|
||||
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
import string
|
||||
|
||||
# XXX move these out to config file
|
||||
AWK_PATH = "/bin/awk"
|
||||
KPARTX_PATH = "/sbin/kpartx"
|
||||
LVCREATE_PATH = "/sbin/lvcreate"
|
||||
LVREMOVE_PATH = "/sbin/lvremove"
|
||||
LVCHANGE_PATH = "/sbin/lvchange"
|
||||
MKFSEXT4_PATH = "/sbin/mkfs.ext4"
|
||||
MKSWAP_PATH = "/sbin/mkswap"
|
||||
TAR_PATH = "/bin/tar"
|
||||
SED_PATH = "/bin/sed"
|
||||
XM_PATH = "/usr/sbin/xm"
|
||||
UDEVADM_PATH = "/sbin/udevadm"
|
||||
|
||||
class XenVEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True, hwaddr = None):
|
||||
# note that net arg is ignored
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.hwaddr = hwaddr
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
cmd = [XM_PATH, 'network-attach', self.node.vmname,
|
||||
'vifname=%s' % self.localname, 'script=vif-core']
|
||||
if self.hwaddr is not None:
|
||||
cmd.append('mac=%s' % self.hwaddr)
|
||||
check_call(cmd)
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
if self.localname:
|
||||
if self.hwaddr is not None:
|
||||
pass
|
||||
# this should be doable, but some argument isn't a string
|
||||
#check_call([XM_PATH, 'network-detach', self.node.vmname,
|
||||
# self.hwaddr])
|
||||
self.up = False
|
||||
|
||||
|
||||
class XenNode(PyCoreNode):
|
||||
apitype = coreapi.CORE_NODE_XEN
|
||||
|
||||
FilesToIgnore = frozenset([
|
||||
#'ipforward.sh',
|
||||
'quaggaboot.sh',
|
||||
])
|
||||
|
||||
FilesRedirection = {
|
||||
'ipforward.sh' : '/core-tmp/ipforward.sh',
|
||||
}
|
||||
|
||||
CmdsToIgnore = frozenset([
|
||||
#'sh ipforward.sh',
|
||||
#'sh quaggaboot.sh zebra',
|
||||
#'sh quaggaboot.sh ospfd',
|
||||
#'sh quaggaboot.sh ospf6d',
|
||||
'sh quaggaboot.sh vtysh',
|
||||
'killall zebra',
|
||||
'killall ospfd',
|
||||
'killall ospf6d',
|
||||
'pidof zebra', 'pidof ospfd', 'pidof ospf6d',
|
||||
])
|
||||
|
||||
def RedirCmd_ipforward(self):
|
||||
sysctlFile = open(os.path.join(self.mountdir, self.etcdir,
|
||||
'sysctl.conf'), 'a')
|
||||
p1 = subprocess.Popen([AWK_PATH,
|
||||
'/^\/sbin\/sysctl -w/ {print $NF}',
|
||||
os.path.join(self.nodedir,
|
||||
'core-tmp/ipforward.sh') ],
|
||||
stdout=sysctlFile)
|
||||
p1.wait()
|
||||
sysctlFile.close()
|
||||
|
||||
def RedirCmd_zebra(self):
|
||||
check_call([SED_PATH, '-i', '-e', 's/^zebra=no/zebra=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
def RedirCmd_ospfd(self):
|
||||
check_call([SED_PATH, '-i', '-e', 's/^ospfd=no/ospfd=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
def RedirCmd_ospf6d(self):
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/^ospf6d=no/ospf6d=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
|
||||
CmdsRedirection = {
|
||||
'sh ipforward.sh' : RedirCmd_ipforward,
|
||||
'sh quaggaboot.sh zebra' : RedirCmd_zebra,
|
||||
'sh quaggaboot.sh ospfd' : RedirCmd_ospfd,
|
||||
'sh quaggaboot.sh ospf6d' : RedirCmd_ospf6d,
|
||||
}
|
||||
|
||||
# CoreNode: no __init__, take from LxcNode & SimpleLxcNode
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True, model = None,
|
||||
vgname = None, ramsize = None, disksize = None,
|
||||
isofile = None):
|
||||
# SimpleLxcNode initialization
|
||||
PyCoreNode.__init__(self, session = session, objid = objid, name = name,
|
||||
verbose = verbose)
|
||||
self.nodedir = nodedir
|
||||
self.model = model
|
||||
# indicates startup() has been invoked and disk has been initialized
|
||||
self.up = False
|
||||
# indicates boot() has been invoked and domU is running
|
||||
self.booted = False
|
||||
self.ifindex = 0
|
||||
self.lock = threading.RLock()
|
||||
self._netif = {}
|
||||
# domU name
|
||||
self.vmname = "c" + str(session.sessionid) + "-" + name
|
||||
# LVM volume group name
|
||||
self.vgname = self.getconfigitem('vg_name', vgname)
|
||||
# LVM logical volume name
|
||||
self.lvname = self.vmname + '-'
|
||||
# LVM logical volume device path name
|
||||
self.lvpath = os.path.join('/dev', self.vgname, self.lvname)
|
||||
self.disksize = self.getconfigitem('disk_size', disksize)
|
||||
self.ramsize = int(self.getconfigitem('ram_size', ramsize))
|
||||
self.isofile = self.getconfigitem('iso_file', isofile)
|
||||
# temporary mount point for paused VM persistent filesystem
|
||||
self.mountdir = None
|
||||
self.etcdir = self.getconfigitem('etc_path')
|
||||
|
||||
# TODO: remove this temporary hack
|
||||
self.FilesRedirection['/usr/local/etc/quagga/Quagga.conf'] = \
|
||||
os.path.join(self.getconfigitem('mount_path'), self.etcdir,
|
||||
'quagga/Quagga.conf')
|
||||
|
||||
# LxcNode initialization
|
||||
# self.makenodedir()
|
||||
if self.nodedir is None:
|
||||
self.nodedir = \
|
||||
os.path.join(session.sessiondir, self.name + ".conf")
|
||||
self.mountdir = self.nodedir + self.getconfigitem('mount_path')
|
||||
if not os.path.isdir(self.mountdir):
|
||||
os.makedirs(self.mountdir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
raise Exception, "Xen PVM node requires a temporary nodedir"
|
||||
self.tmpnodedir = False
|
||||
self.bootsh = bootsh
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def getconfigitem(self, name, default=None):
|
||||
''' Configuration items come from the xen.conf file and/or input from
|
||||
the GUI, and are stored in the session using the XenConfigManager
|
||||
object. self.model is used to identify particular profiles
|
||||
associated with a node type in the GUI.
|
||||
'''
|
||||
return self.session.xen.getconfigitem(name=name, model=self.model,
|
||||
node=self, value=default)
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def startup(self):
|
||||
self.warn("XEN PVM startup() called: preparing disk for %s" % self.name)
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
self.createlogicalvolume()
|
||||
self.createpartitions()
|
||||
persistdev = self.createfilesystems()
|
||||
check_call([MOUNT_BIN, '-t', 'ext4', persistdev, self.mountdir])
|
||||
self.untarpersistent(tarname=self.getconfigitem('persist_tar_iso'),
|
||||
iso=True)
|
||||
self.setrootpassword(pw = self.getconfigitem('root_password'))
|
||||
self.sethostname(old='UBASE', new=self.name)
|
||||
self.setupssh(keypath=self.getconfigitem('ssh_key_path'))
|
||||
self.createvm()
|
||||
self.up = True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def boot(self):
|
||||
self.warn("XEN PVM boot() called")
|
||||
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
raise Exception, "Can't boot VM without initialized disk"
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
return
|
||||
|
||||
self.session.services.bootnodeservices(self)
|
||||
tarname = self.getconfigitem('persist_tar')
|
||||
if tarname:
|
||||
self.untarpersistent(tarname=tarname, iso=False)
|
||||
|
||||
try:
|
||||
check_call([UMOUNT_BIN, self.mountdir])
|
||||
self.unmount_all(self.mountdir)
|
||||
check_call([UDEVADM_PATH, 'settle'])
|
||||
check_call([KPARTX_PATH, '-d', self.lvpath])
|
||||
|
||||
#time.sleep(5)
|
||||
#time.sleep(1)
|
||||
|
||||
# unpause VM
|
||||
if self.verbose:
|
||||
self.warn("XEN PVM boot() unpause domU %s" % self.vmname)
|
||||
mutecheck_call([XM_PATH, 'unpause', self.vmname])
|
||||
|
||||
self.booted = True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def shutdown(self):
|
||||
self.warn("XEN PVM shutdown() called")
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.up:
|
||||
# sketch from SimpleLxcNode
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
|
||||
try:
|
||||
# RJE XXX what to do here
|
||||
if self.booted:
|
||||
mutecheck_call([XM_PATH, 'destroy', self.vmname])
|
||||
self.booted = False
|
||||
except OSError:
|
||||
pass
|
||||
except subprocess.CalledProcessError:
|
||||
# ignore this error too, the VM may have exited already
|
||||
pass
|
||||
|
||||
# discard LVM volume
|
||||
lvmRemoveCount = 0
|
||||
while os.path.exists(self.lvpath):
|
||||
try:
|
||||
check_call([UDEVADM_PATH, 'settle'])
|
||||
mutecall([LVCHANGE_PATH, '-an', self.lvpath])
|
||||
lvmRemoveCount += 1
|
||||
mutecall([LVREMOVE_PATH, '-f', self.lvpath])
|
||||
except OSError:
|
||||
pass
|
||||
if (lvmRemoveCount > 1):
|
||||
self.warn("XEN PVM shutdown() required %d lvremove " \
|
||||
"executions." % lvmRemoveCount)
|
||||
|
||||
self._netif.clear()
|
||||
del self.session
|
||||
|
||||
self.up = False
|
||||
|
||||
finally:
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
def createlogicalvolume(self):
|
||||
''' Create a logical volume for this Xen domU. Called from startup().
|
||||
'''
|
||||
if os.path.exists(self.lvpath):
|
||||
raise Exception, "LVM volume already exists"
|
||||
mutecheck_call([LVCREATE_PATH, '--size', self.disksize,
|
||||
'--name', self.lvname, self.vgname])
|
||||
|
||||
def createpartitions(self):
|
||||
''' Partition the LVM volume into persistent and swap partitions
|
||||
using the parted module.
|
||||
'''
|
||||
dev = parted.Device(path=self.lvpath)
|
||||
dev.removeFromCache()
|
||||
disk = parted.freshDisk(dev, 'msdos')
|
||||
constraint = parted.Constraint(device=dev)
|
||||
persist_size = int(0.75 * constraint.maxSize);
|
||||
self.createpartition(device=dev, disk=disk, start=1,
|
||||
end=(persist_size - 1) , type="ext4")
|
||||
self.createpartition(device=dev, disk=disk, start=persist_size,
|
||||
end=(constraint.maxSize - 1) , type="linux-swap(v1)")
|
||||
disk.commit()
|
||||
|
||||
def createpartition(self, device, disk, start, end, type):
|
||||
''' Create a single partition of the specified type and size and add
|
||||
it to the disk object, using the parted module.
|
||||
'''
|
||||
geo = parted.Geometry(device=device, start=start, end=end)
|
||||
fs = parted.FileSystem(type=type, geometry=geo)
|
||||
part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL,
|
||||
geometry=geo)
|
||||
constraint = parted.Constraint(exactGeom=geo)
|
||||
disk.addPartition(partition=part, constraint=constraint)
|
||||
|
||||
def createfilesystems(self):
|
||||
''' Make an ext4 filesystem and swap space. Return the device name for
|
||||
the persistent partition so we can mount it.
|
||||
'''
|
||||
output = subprocess.Popen([KPARTX_PATH, '-l', self.lvpath],
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
lines = output.splitlines()
|
||||
persistdev = '/dev/mapper/' + lines[0].strip().split(' ')[0].strip()
|
||||
swapdev = '/dev/mapper/' + lines[1].strip().split(' ')[0].strip()
|
||||
check_call([KPARTX_PATH, '-a', self.lvpath])
|
||||
mutecheck_call([MKFSEXT4_PATH, '-L', 'persist', persistdev])
|
||||
mutecheck_call([MKSWAP_PATH, '-f', '-L', 'swap', swapdev])
|
||||
return persistdev
|
||||
|
||||
def untarpersistent(self, tarname, iso):
|
||||
''' Unpack a persistent template tar file to the mounted mount dir.
|
||||
Uses fsimage library to read from an ISO file.
|
||||
'''
|
||||
tarname = tarname.replace('%h', self.name) # filename may use hostname
|
||||
if iso:
|
||||
try:
|
||||
fs = fsimage.open(self.isofile, 0)
|
||||
except IOError, e:
|
||||
self.warn("Failed to open ISO file: %s (%s)" % (self.isofile,e))
|
||||
return
|
||||
try:
|
||||
tardata = fs.open_file(tarname).read();
|
||||
except IOError, e:
|
||||
self.warn("Failed to open tar file: %s (%s)" % (tarname, e))
|
||||
return
|
||||
finally:
|
||||
del fs;
|
||||
else:
|
||||
try:
|
||||
f = open(tarname)
|
||||
tardata = f.read()
|
||||
f.close()
|
||||
except IOError, e:
|
||||
self.warn("Failed to open tar file: %s (%s)" % (tarname, e))
|
||||
return
|
||||
p = subprocess.Popen([TAR_PATH, '-C', self.mountdir, '--numeric-owner',
|
||||
'-xf', '-'], stdin=subprocess.PIPE)
|
||||
p.communicate(input=tardata)
|
||||
p.wait()
|
||||
|
||||
def setrootpassword(self, pw):
|
||||
''' Set the root password by updating the shadow password file that
|
||||
is on the filesystem mounted in the temporary area.
|
||||
'''
|
||||
saltedpw = crypt.crypt(pw, '$6$'+base64.b64encode(os.urandom(12)))
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
'/^root:/s_^root:\([^:]*\):_root:' + saltedpw + ':_',
|
||||
os.path.join(self.mountdir, self.etcdir, 'shadow')])
|
||||
|
||||
def sethostname(self, old, new):
|
||||
''' Set the hostname by updating the hostname and hosts files that
|
||||
reside on the filesystem mounted in the temporary area.
|
||||
'''
|
||||
check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hostname')])
|
||||
check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hosts')])
|
||||
|
||||
def setupssh(self, keypath):
|
||||
''' Configure SSH access by installing host keys and a system-wide
|
||||
authorized_keys file.
|
||||
'''
|
||||
sshdcfg = os.path.join(self.mountdir, self.etcdir, 'ssh/sshd_config')
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/PermitRootLogin no/PermitRootLogin yes/', sshdcfg])
|
||||
sshdir = os.path.join(self.getconfigitem('mount_path'), self.etcdir,
|
||||
'ssh')
|
||||
sshdir = sshdir.replace('/','\\/') # backslash slashes for use in sed
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/' + \
|
||||
'AuthorizedKeysFile ' + sshdir + '\/authorized_keys/',
|
||||
sshdcfg])
|
||||
for f in ('ssh_host_rsa_key','ssh_host_rsa_key.pub','authorized_keys'):
|
||||
src = os.path.join(keypath, f)
|
||||
dst = os.path.join(self.mountdir, self.etcdir, 'ssh', f)
|
||||
shutil.copy(src, dst)
|
||||
if f[-3:] != "pub":
|
||||
os.chmod(dst, 0600)
|
||||
|
||||
def createvm(self):
|
||||
''' Instantiate a *paused* domU VM
|
||||
Instantiate it now, so we can add network interfaces,
|
||||
pause it so we can have the filesystem open for configuration.
|
||||
'''
|
||||
args = [XM_PATH, 'create', os.devnull, '--paused']
|
||||
args.extend(['name=' + self.vmname, 'memory=' + str(self.ramsize)])
|
||||
args.append('disk=tap:aio:' + self.isofile + ',hda,r')
|
||||
args.append('disk=phy:' + self.lvpath + ',hdb,w')
|
||||
args.append('bootloader=pygrub')
|
||||
bootargs = '--kernel=/isolinux/vmlinuz --ramdisk=/isolinux/initrd'
|
||||
args.append('bootargs=' + bootargs)
|
||||
for action in ('poweroff', 'reboot', 'suspend', 'crash', 'halt'):
|
||||
args.append('on_%s=destroy' % action)
|
||||
args.append('extra=' + self.getconfigitem('xm_create_extra'))
|
||||
mutecheck_call(args)
|
||||
|
||||
# from class LxcNode
|
||||
def privatedir(self, path):
|
||||
#self.warn("XEN PVM privatedir() called")
|
||||
# Do nothing, Xen PVM nodes are fully private
|
||||
pass
|
||||
|
||||
# from class LxcNode
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
self.warn("XEN PVM opennodefile() called")
|
||||
raise Exception, "Can't open VM file with opennodefile()"
|
||||
|
||||
# from class LxcNode
|
||||
# open a file on a paused Xen node
|
||||
def openpausednodefile(self, filename, mode = "w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
#dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
# from class LxcNode
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
if filename in self.FilesToIgnore:
|
||||
#self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename])
|
||||
return
|
||||
|
||||
if filename in self.FilesRedirection:
|
||||
redirFilename = self.FilesRedirection[filename]
|
||||
self.warn("XEN PVM nodefile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
filename = redirFilename
|
||||
|
||||
self.warn("XEN PVM nodefile(filename=%s) called" % [filename])
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM disk isn't ready"
|
||||
return
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM is already running"
|
||||
return
|
||||
|
||||
try:
|
||||
f = self.openpausednodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def alive(self):
|
||||
# is VM running?
|
||||
return False # XXX
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
if cmdAsString in self.CmdsToIgnore:
|
||||
#self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return 0
|
||||
if cmdAsString in self.CmdsRedirection:
|
||||
self.CmdsRedirection[cmdAsString](self)
|
||||
return 0
|
||||
|
||||
self.warn("XEN PVM cmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return 0
|
||||
|
||||
def cmdresult(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
if cmdAsString in self.CmdsToIgnore:
|
||||
#self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return (0, "")
|
||||
self.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return (0, "")
|
||||
|
||||
def popen(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
self.warn("XEN PVM popen(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return
|
||||
|
||||
def icmd(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
self.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
self.warn("XEN PVM term() called, but not yet implemented")
|
||||
return
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' We may add 'sudo' to the command string because the GUI runs as a
|
||||
normal user. Use SSH if control interface is available, otherwise
|
||||
use Xen console with a keymapping for easy login.
|
||||
'''
|
||||
controlifc = None
|
||||
for ifc in self.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
controlifc = ifc
|
||||
break
|
||||
cmd = "xterm "
|
||||
# use SSH if control interface is available
|
||||
if controlifc:
|
||||
controlip = controlifc.addrlist[0].split('/')[0]
|
||||
cmd += "-e ssh root@%s" % controlip
|
||||
return cmd
|
||||
# otherwise use 'xm console'
|
||||
#pw = self.getconfigitem('root_password')
|
||||
#cmd += "-xrm 'XTerm*VT100.translations: #override <Key>F1: "
|
||||
#cmd += "string(\"root\\n\") \\n <Key>F2: string(\"%s\\n\")' " % pw
|
||||
cmd += "-e sudo %s console %s" % (XM_PATH, self.vmname)
|
||||
return cmd
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
self.warn("XEN PVM shcmd(args=[%s]) called, but not yet implemented" % cmdstr)
|
||||
return
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def info(self, msg):
|
||||
if self.verbose:
|
||||
print "%s: %s" % (self.name, msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def warn(self, msg):
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def mount(self, source, target):
|
||||
self.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
|
||||
def umount(self, target):
|
||||
self.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
|
||||
def newifindex(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def getifindex(self, netif):
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
return ifindex
|
||||
return -1
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
self.warn("XEN PVM addnetif() called")
|
||||
PyCoreNode.addnetif(self, netif, ifindex)
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
self.warn("XEN PVM delnetif() called")
|
||||
PyCoreNode.delnetif(self, ifindex)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None, hwaddr = None):
|
||||
self.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
localname = "n%s.%s.%s" % (self.objid, ifname, sessionid)
|
||||
ifclass = XenVEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, hwaddr = hwaddr)
|
||||
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newtuntap(self, ifindex = None, ifname = None, net = None):
|
||||
self.warn("XEN PVM newtuntap() called but not implemented")
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
pass
|
||||
#self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex),
|
||||
# "address", str(addr)])
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
pass
|
||||
# self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
# "dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
pass
|
||||
# self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
# "dev", self.ifname(ifindex)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
# Xen PVM relies on boot process to bring up links
|
||||
#def ifup(self, ifindex):
|
||||
# if self.up:
|
||||
# self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
|
||||
self.lock.acquire()
|
||||
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access add veth as VM disk isn't ready"
|
||||
return
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access add veth as VM is already running"
|
||||
return
|
||||
|
||||
try:
|
||||
if isinstance(net, EmaneNode):
|
||||
raise Exception, "Xen PVM doesn't yet support Emane nets"
|
||||
|
||||
# ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
|
||||
# net = net)
|
||||
# # TUN/TAP is not ready for addressing yet; the device may
|
||||
# # take some time to appear, and installing it into a
|
||||
# # namespace after it has been bound removes addressing;
|
||||
# # save addresses with the interface now
|
||||
# self.attachnet(ifindex, net)
|
||||
# netif = self.netif(ifindex)
|
||||
# netif.sethwaddr(hwaddr)
|
||||
# for addr in maketuple(addrlist):
|
||||
# netif.addaddr(addr)
|
||||
# return ifindex
|
||||
else:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net, hwaddr = hwaddr)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
|
||||
rulefile = os.path.join(self.getconfigitem('mount_path'),
|
||||
self.etcdir,
|
||||
'udev/rules.d/70-persistent-net.rules')
|
||||
f = self.openpausednodefile(rulefile, "a")
|
||||
f.write('\n# Xen PVM virtual interface #%s %s with MAC address %s\n' % (ifindex, self.ifname(ifindex), hwaddr))
|
||||
# Using MAC address as we're now loading PVM net driver "early"
|
||||
# OLD: Would like to use MAC address, but udev isn't working with paravirtualized NICs. Perhaps the "set hw address" isn't triggering a rescan.
|
||||
f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % (hwaddr, self.ifname(ifindex)))
|
||||
#f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n' % (ifindex, self.ifname(ifindex)))
|
||||
f.close()
|
||||
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
#self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def connectnode(self, ifname, othernode, otherifname):
|
||||
self.warn("XEN PVM connectnode() called")
|
||||
|
||||
# tmplen = 8
|
||||
# tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
# for x in xrange(tmplen)])
|
||||
# tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
# for x in xrange(tmplen)])
|
||||
# check_call([IP_BIN, "link", "add", "name", tmp1,
|
||||
# "type", "veth", "peer", "name", tmp2])
|
||||
#
|
||||
# check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
# self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
# self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
|
||||
#
|
||||
# check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
# othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
# othernode.addnetif(PyCoreNetIf(othernode, otherifname),
|
||||
# othernode.newifindex())
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM disk isn't ready"
|
||||
return
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM is already running"
|
||||
return
|
||||
|
||||
if filename in self.FilesToIgnore:
|
||||
#self.warn("XEN PVM addfile(filename=%s) ignored" % [filename])
|
||||
return
|
||||
|
||||
if filename in self.FilesRedirection:
|
||||
redirFilename = self.FilesRedirection[filename]
|
||||
self.warn("XEN PVM addfile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
filename = redirFilename
|
||||
|
||||
try:
|
||||
fin = open(srcname, "r")
|
||||
contents = fin.read()
|
||||
fin.close()
|
||||
|
||||
fout = self.openpausednodefile(filename, "w")
|
||||
fout.write(contents)
|
||||
os.chmod(fout.name, mode)
|
||||
fout.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (fout.name, mode))
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
self.warn("XEN PVM addfile(filename=%s) called" % [filename])
|
||||
|
||||
#shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
# (filename, srcname, filename)
|
||||
#self.shcmd(shcmd)
|
||||
|
||||
def unmount_all(self, path):
|
||||
''' Namespaces inherit the host mounts, so we need to ensure that all
|
||||
namespaces have unmounted our temporary mount area so that the
|
||||
kpartx command will succeed.
|
||||
'''
|
||||
# Session.bootnodes() already has self.session._objslock
|
||||
for o in self.session.objs():
|
||||
if not isinstance(o, LxcNode):
|
||||
continue
|
||||
o.umount(path)
|
||||
|
265
daemon/core/xen/xenconfig.py
Normal file
265
daemon/core/xen/xenconfig.py
Normal file
|
@ -0,0 +1,265 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
xenconfig.py: Implementation of the XenConfigManager class for managing
|
||||
configurable items for XenNodes.
|
||||
|
||||
Configuration for a XenNode is available at these three levels:
|
||||
Global config: XenConfigManager.configs[0] = (type='xen', values)
|
||||
Nodes of this machine type have this config. These are the default values.
|
||||
XenConfigManager.default_config comes from defaults + xen.conf
|
||||
Node type config: XenConfigManager.configs[0] = (type='mytype', values)
|
||||
All nodes of this type have this config.
|
||||
Node-specific config: XenConfigManager.configs[nodenumber] = (type, values)
|
||||
The node having this specific number has this config.
|
||||
'''
|
||||
|
||||
import sys, os, threading, subprocess, time, string
|
||||
import ConfigParser
|
||||
from xml.dom.minidom import parseString, Document
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
|
||||
|
||||
class XenConfigManager(ConfigurableManager):
|
||||
''' Xen controller object. Lives in a Session instance and is used for
|
||||
building Xen profiles.
|
||||
'''
|
||||
_name = "xen"
|
||||
_type = coreapi.CORE_TLV_REG_EMULSRV
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.default_config = XenDefaultConfig(session, objid=None)
|
||||
self.loadconfigfile()
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
''' add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
'''
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.setconfig(self, nodenum, conftype, values)
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
''' get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied; if conftype
|
||||
is None then we return a match on any conftype.
|
||||
'''
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.getconfig(self, nodenum, conftype,
|
||||
defaultvalues)
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
''' remove configuration values for a node
|
||||
'''
|
||||
ConfigurableManager.clearconfig(self, nodenum)
|
||||
if 0 in self.configs:
|
||||
self.configs.pop(0)
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configuration messages for global Xen config.
|
||||
'''
|
||||
return self.default_config.configure(self, msg)
|
||||
|
||||
def loadconfigfile(self, filename=None):
|
||||
''' Load defaults from the /etc/core/xen.conf file into dict object.
|
||||
'''
|
||||
if filename is None:
|
||||
filename = os.path.join(CORE_CONF_DIR, 'xen.conf')
|
||||
cfg = ConfigParser.SafeConfigParser()
|
||||
if filename not in cfg.read(filename):
|
||||
self.session.warn("unable to read Xen config file: %s" % filename)
|
||||
return
|
||||
section = "xen"
|
||||
if not cfg.has_section(section):
|
||||
self.session.warn("%s is missing a xen section!" % filename)
|
||||
return
|
||||
self.configfile = dict(cfg.items(section))
|
||||
# populate default config items from config file entries
|
||||
vals = list(self.default_config.getdefaultvalues())
|
||||
names = self.default_config.getnames()
|
||||
for i in range(len(names)):
|
||||
if names[i] in self.configfile:
|
||||
vals[i] = self.configfile[names[i]]
|
||||
# this sets XenConfigManager.configs[0] = (type='xen', vals)
|
||||
self.setconfig(None, self.default_config._name, vals)
|
||||
|
||||
def getconfigitem(self, name, model=None, node=None, value=None):
|
||||
''' Get a config item of the given name, first looking for node-specific
|
||||
configuration, then model specific, and finally global defaults.
|
||||
If a value is supplied, it will override any stored config.
|
||||
'''
|
||||
if value is not None:
|
||||
return value
|
||||
n = None
|
||||
if node:
|
||||
n = node.objid
|
||||
(t, v) = self.getconfig(nodenum=n, conftype=model, defaultvalues=None)
|
||||
if n is not None and v is None:
|
||||
# get item from default config for the node type
|
||||
(t, v) = self.getconfig(nodenum=None, conftype=model,
|
||||
defaultvalues=None)
|
||||
if v is None:
|
||||
# get item from default config for the machine type
|
||||
(t, v) = self.getconfig(nodenum=None,
|
||||
conftype=self.default_config._name,
|
||||
defaultvalues=None)
|
||||
|
||||
confignames = self.default_config.getnames()
|
||||
if v and name in confignames:
|
||||
i = confignames.index(name)
|
||||
return v[i]
|
||||
else:
|
||||
# name may only exist in config file
|
||||
if name in self.configfile:
|
||||
return self.configfile[name]
|
||||
else:
|
||||
#self.warn("missing config item '%s'" % name)
|
||||
return None
|
||||
|
||||
|
||||
class XenConfig(Configurable):
|
||||
''' Manage Xen configuration profiles.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def configure(cls, xen, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Similar to Configurable.configure(), but considers opaque data
|
||||
for indicating node types.
|
||||
'''
|
||||
reply = None
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
|
||||
nodetype = objname
|
||||
if opaque is not None:
|
||||
opaque_items = opaque.split(':')
|
||||
if len(opaque_items) != 2:
|
||||
xen.warn("xen config: invalid opaque data in conf message")
|
||||
return None
|
||||
nodetype = opaque_items[1]
|
||||
|
||||
if xen.verbose:
|
||||
xen.info("received configure message for %s" % nodetype)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
if xen.verbose:
|
||||
xen.info("replying to configure request for %s " % nodetype)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if objname == "all":
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
else:
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = xen.getconfig(nodenum, nodetype, defaultvalues=None)[1]
|
||||
if values is None:
|
||||
# get defaults from default "xen" config which includes
|
||||
# settings from both cls._confdefaultvalues and xen.conf
|
||||
defaults = cls.getdefaultvalues()
|
||||
values = xen.getconfig(nodenum, cls._name, defaults)[1]
|
||||
if values is None:
|
||||
return None
|
||||
# reply with config options
|
||||
if nodenum is None:
|
||||
nodenum = 0
|
||||
reply = cls.toconfmsg(0, nodenum, typeflags, nodetype, values)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all":
|
||||
xen.clearconfig(nodenum)
|
||||
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the XenNode
|
||||
# object has been created
|
||||
if objname is None:
|
||||
xen.info("no configuration object for node %s" % nodenum)
|
||||
return None
|
||||
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
defaults = cls.getdefaultvalues()
|
||||
values = xen.getconfig(nodenum, cls._name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
xen.setconfig(nodenum, nodetype, values)
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def toconfmsg(cls, flags, nodenum, typeflags, nodetype, values):
|
||||
''' Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
'''
|
||||
values_str = string.join(values, '|')
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
cls._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
typeflags)
|
||||
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
values_str)
|
||||
captions = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[4], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
possiblevals = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[3], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if cls._bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
|
||||
cls._bitmap)
|
||||
if cls._confgroups is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
cls._confgroups)
|
||||
opaque = "%s:%s" % (cls._name, nodetype)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
|
||||
opaque)
|
||||
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
|
||||
class XenDefaultConfig(XenConfig):
|
||||
''' Global default Xen configuration options.
|
||||
'''
|
||||
_name = "xen"
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = [
|
||||
('ram_size', coreapi.CONF_DATA_TYPE_STRING, '256', '',
|
||||
'ram size (MB)'),
|
||||
('disk_size', coreapi.CONF_DATA_TYPE_STRING, '256M', '',
|
||||
'disk size (use K/M/G suffix)'),
|
||||
('iso_file', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'iso file'),
|
||||
('mount_path', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'mount path'),
|
||||
('etc_path', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'etc path'),
|
||||
('persist_tar_iso', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'iso persist tar file'),
|
||||
('persist_tar', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'persist tar file'),
|
||||
('root_password', coreapi.CONF_DATA_TYPE_STRING, 'password', '',
|
||||
'root password'),
|
||||
]
|
||||
|
||||
_confgroups = "domU properties:1-%d" % len(_confmatrix)
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue