fixed issue with sending the wrong type flag from cleaning up configuration request code in services
This commit is contained in:
parent
8e9dc21c3c
commit
97f3c3a070
7 changed files with 73 additions and 77 deletions
|
@ -15,7 +15,7 @@ def convert_node(node_data):
|
||||||
Callback to handle an node broadcast out from a session.
|
Callback to handle an node broadcast out from a session.
|
||||||
|
|
||||||
:param core.data.NodeData node_data: node data to handle
|
:param core.data.NodeData node_data: node data to handle
|
||||||
:return: nothing
|
:return: packed node message
|
||||||
"""
|
"""
|
||||||
logger.debug("converting node data to message: %s", node_data)
|
logger.debug("converting node data to message: %s", node_data)
|
||||||
|
|
||||||
|
|
|
@ -426,32 +426,35 @@ class CoreBroker(ConfigurableManager):
|
||||||
for n in self.network_nodes:
|
for n in self.network_nodes:
|
||||||
self.addnettunnel(n)
|
self.addnettunnel(n)
|
||||||
|
|
||||||
def addnettunnel(self, node):
|
def addnettunnel(self, node_id):
|
||||||
"""
|
"""
|
||||||
Add network tunnel between node and broker.
|
Add network tunnel between node and broker.
|
||||||
|
|
||||||
:param node: node to add network tunnel to
|
:param int node_id: node id of network to add tunnel to
|
||||||
:return: list of grep taps
|
:return: list of gre taps
|
||||||
:rtype: list
|
:rtype: list
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
net = self.session.get_object(node)
|
net = self.session.get_object(node_id)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise KeyError("network node %s not found" % node)
|
raise KeyError("network node %s not found" % node_id)
|
||||||
|
|
||||||
# add other nets here that do not require tunnels
|
# add other nets here that do not require tunnels
|
||||||
if nodeutils.is_node(net, NodeTypes.EMANE_NET):
|
if nodeutils.is_node(net, NodeTypes.EMANE_NET):
|
||||||
|
logger.warn("emane network does not require a tunnel")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
server_interface = getattr(net, "serverintf", None)
|
server_interface = getattr(net, "serverintf", None)
|
||||||
if nodeutils.is_node(net, NodeTypes.CONTROL_NET) and server_interface is not None:
|
if nodeutils.is_node(net, NodeTypes.CONTROL_NET) and server_interface is not None:
|
||||||
|
logger.warn("control networks with server interfaces do not need a tunnel")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
servers = self.getserversbynode(node)
|
servers = self.getserversbynode(node_id)
|
||||||
if len(servers) < 2:
|
if len(servers) < 2:
|
||||||
|
logger.warn("not enough servers to create a tunnel: %s", servers)
|
||||||
return None
|
return None
|
||||||
hosts = []
|
|
||||||
|
|
||||||
|
hosts = []
|
||||||
for server in servers:
|
for server in servers:
|
||||||
if server.host is None:
|
if server.host is None:
|
||||||
continue
|
continue
|
||||||
|
@ -469,10 +472,10 @@ class CoreBroker(ConfigurableManager):
|
||||||
else:
|
else:
|
||||||
# we are the session master
|
# we are the session master
|
||||||
myip = host
|
myip = host
|
||||||
key = self.tunnelkey(node, IpAddress.to_int(myip))
|
key = self.tunnelkey(node_id, IpAddress.to_int(myip))
|
||||||
if key in self.tunnels.keys():
|
if key in self.tunnels.keys():
|
||||||
continue
|
continue
|
||||||
logger.info("Adding tunnel for net %s to %s with key %s" % (node, host, key))
|
logger.info("Adding tunnel for net %s to %s with key %s" % (node_id, host, key))
|
||||||
gt = GreTap(node=None, name=None, session=self.session, remoteip=host, key=key)
|
gt = GreTap(node=None, name=None, session=self.session, remoteip=host, key=key)
|
||||||
self.tunnels[key] = gt
|
self.tunnels[key] = gt
|
||||||
r.append(gt)
|
r.append(gt)
|
||||||
|
|
|
@ -212,7 +212,7 @@ class CoreRequestHandler(SocketServer.BaseRequestHandler):
|
||||||
(ConfigTlvs.NETWORK_ID, config_data.network_id),
|
(ConfigTlvs.NETWORK_ID, config_data.network_id),
|
||||||
(ConfigTlvs.OPAQUE, config_data.opaque),
|
(ConfigTlvs.OPAQUE, config_data.opaque),
|
||||||
])
|
])
|
||||||
message = coreapi.CoreConfMessage.pack(0, tlv_data)
|
message = coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sendall(message)
|
self.sendall(message)
|
||||||
|
@ -467,6 +467,7 @@ class CoreRequestHandler(SocketServer.BaseRequestHandler):
|
||||||
:param message: message for replies
|
:param message: message for replies
|
||||||
:return: nothing
|
:return: nothing
|
||||||
"""
|
"""
|
||||||
|
logger.info("replies to dispatch: %s", replies)
|
||||||
for reply in replies:
|
for reply in replies:
|
||||||
message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply)
|
message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply)
|
||||||
try:
|
try:
|
||||||
|
@ -613,9 +614,7 @@ class CoreRequestHandler(SocketServer.BaseRequestHandler):
|
||||||
|
|
||||||
# add services to a node, either from its services TLV or
|
# add services to a node, either from its services TLV or
|
||||||
# through the configured defaults for this node type
|
# through the configured defaults for this node type
|
||||||
if node_type == NodeTypes.DEFAULT.value or \
|
if node_type in [NodeTypes.DEFAULT.value, NodeTypes.PHYSICAL.value, NodeTypes.XEN.value]:
|
||||||
node_type == NodeTypes.PHYSICAL.value or \
|
|
||||||
node_type == NodeTypes.XEN.value:
|
|
||||||
if model is None:
|
if model is None:
|
||||||
# TODO: default model from conf file?
|
# TODO: default model from conf file?
|
||||||
model = "router"
|
model = "router"
|
||||||
|
@ -1215,8 +1214,10 @@ class CoreRequestHandler(SocketServer.BaseRequestHandler):
|
||||||
# dispatch to any registered callback for this object type
|
# dispatch to any registered callback for this object type
|
||||||
replies = self.session.config_object(config_data)
|
replies = self.session.config_object(config_data)
|
||||||
|
|
||||||
# config requests usually have a reply with default data
|
for reply in replies:
|
||||||
return replies
|
self.handle_broadcast_config(reply)
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
def handle_file_message(self, message):
|
def handle_file_message(self, message):
|
||||||
"""
|
"""
|
||||||
|
@ -1280,7 +1281,7 @@ class CoreRequestHandler(SocketServer.BaseRequestHandler):
|
||||||
|
|
||||||
def handle_interface_message(self, message):
|
def handle_interface_message(self, message):
|
||||||
"""
|
"""
|
||||||
Interface Message handler
|
Interface Message handler.
|
||||||
|
|
||||||
:param message: interface message to handle
|
:param message: interface message to handle
|
||||||
:return: reply messages
|
:return: reply messages
|
||||||
|
|
|
@ -39,7 +39,7 @@ class PhysicalNode(PyCoreNode):
|
||||||
# self.privatedir("/var/run")
|
# self.privatedir("/var/run")
|
||||||
# self.privatedir("/var/log")
|
# self.privatedir("/var/log")
|
||||||
except OSError:
|
except OSError:
|
||||||
logger.exception("PhysicalNode.startup()")
|
logger.exception("startup error")
|
||||||
finally:
|
finally:
|
||||||
self.lock.release()
|
self.lock.release()
|
||||||
|
|
||||||
|
@ -154,30 +154,25 @@ class PhysicalNode(PyCoreNode):
|
||||||
"""
|
"""
|
||||||
Apply tc queing disciplines using LxBrNet.linkconfig()
|
Apply tc queing disciplines using LxBrNet.linkconfig()
|
||||||
"""
|
"""
|
||||||
netcls = LxBrNet
|
|
||||||
|
|
||||||
# borrow the tc qdisc commands from LxBrNet.linkconfig()
|
# borrow the tc qdisc commands from LxBrNet.linkconfig()
|
||||||
tmp = netcls(session=self.session, start=False)
|
linux_bridge = LxBrNet(session=self.session, start=False)
|
||||||
tmp.up = True
|
linux_bridge.up = True
|
||||||
tmp.linkconfig(netif, bw=bw, delay=delay, loss=loss,
|
linux_bridge.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate,
|
||||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
jitter=jitter, netif2=netif2)
|
||||||
del tmp
|
del linux_bridge
|
||||||
|
|
||||||
def newifindex(self):
|
def newifindex(self):
|
||||||
self.lock.acquire()
|
with self.lock:
|
||||||
try:
|
|
||||||
while self.ifindex in self._netif:
|
while self.ifindex in self._netif:
|
||||||
self.ifindex += 1
|
self.ifindex += 1
|
||||||
ifindex = self.ifindex
|
ifindex = self.ifindex
|
||||||
self.ifindex += 1
|
self.ifindex += 1
|
||||||
return ifindex
|
return ifindex
|
||||||
finally:
|
|
||||||
self.lock.release()
|
|
||||||
|
|
||||||
def newnetif(self, net=None, addrlist=[], hwaddr=None,
|
def newnetif(self, net=None, addrlist=[], hwaddr=None, ifindex=None, ifname=None):
|
||||||
ifindex=None, ifname=None):
|
|
||||||
if self.up and net is None:
|
if self.up and net is None:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
if ifindex is None:
|
if ifindex is None:
|
||||||
ifindex = self.newifindex()
|
ifindex = self.newifindex()
|
||||||
|
|
||||||
|
@ -186,7 +181,7 @@ class PhysicalNode(PyCoreNode):
|
||||||
# tunnel to net not built yet, so build it now and adopt it
|
# tunnel to net not built yet, so build it now and adopt it
|
||||||
gt = self.session.broker.addnettunnel(net.objid)
|
gt = self.session.broker.addnettunnel(net.objid)
|
||||||
if gt is None or len(gt) != 1:
|
if gt is None or len(gt) != 1:
|
||||||
logger.warn("Error building tunnel from PhysicalNode.newnetif()")
|
raise ValueError("error building tunnel from adding a new network interface: %s" % gt)
|
||||||
gt = gt[0]
|
gt = gt[0]
|
||||||
net.detach(gt)
|
net.detach(gt)
|
||||||
self.adoptnetif(gt, ifindex, hwaddr, addrlist)
|
self.adoptnetif(gt, ifindex, hwaddr, addrlist)
|
||||||
|
@ -195,16 +190,15 @@ class PhysicalNode(PyCoreNode):
|
||||||
# this is reached when configuring services (self.up=False)
|
# this is reached when configuring services (self.up=False)
|
||||||
if ifname is None:
|
if ifname is None:
|
||||||
ifname = "gt%d" % ifindex
|
ifname = "gt%d" % ifindex
|
||||||
netif = GreTap(node=self, name=ifname, session=self.session,
|
|
||||||
start=False)
|
netif = GreTap(node=self, name=ifname, session=self.session, start=False)
|
||||||
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
||||||
return ifindex
|
return ifindex
|
||||||
|
|
||||||
def privatedir(self, path):
|
def privatedir(self, path):
|
||||||
if path[0] != "/":
|
if path[0] != "/":
|
||||||
raise ValueError, "path not fully qualified: " + path
|
raise ValueError, "path not fully qualified: " + path
|
||||||
hostpath = os.path.join(self.nodedir,
|
hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip('/').replace('/', '.'))
|
||||||
os.path.normpath(path).strip('/').replace('/', '.'))
|
|
||||||
try:
|
try:
|
||||||
os.mkdir(hostpath)
|
os.mkdir(hostpath)
|
||||||
except OSError:
|
except OSError:
|
||||||
|
|
|
@ -489,10 +489,9 @@ class CoreServices(ConfigurableManager):
|
||||||
session_id = config_data.session
|
session_id = config_data.session
|
||||||
opaque = config_data.opaque
|
opaque = config_data.opaque
|
||||||
|
|
||||||
type_flag = ConfigFlags.UPDATE.value
|
|
||||||
|
|
||||||
# send back a list of available services
|
# send back a list of available services
|
||||||
if opaque is None:
|
if opaque is None:
|
||||||
|
type_flag = ConfigFlags.NONE.value
|
||||||
data_types = tuple(repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services)))
|
data_types = tuple(repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services)))
|
||||||
values = "|".join(repeat('0', len(ServiceManager.services)))
|
values = "|".join(repeat('0', len(ServiceManager.services)))
|
||||||
names = map(lambda x: x._name, ServiceManager.services)
|
names = map(lambda x: x._name, ServiceManager.services)
|
||||||
|
@ -526,40 +525,15 @@ class CoreServices(ConfigurableManager):
|
||||||
svc = services[0]
|
svc = services[0]
|
||||||
# send back:
|
# send back:
|
||||||
# dirs, configs, startindex, startup, shutdown, metadata, config
|
# dirs, configs, startindex, startup, shutdown, metadata, config
|
||||||
|
type_flag = ConfigFlags.UPDATE.value
|
||||||
data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(svc.keys)))
|
data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(svc.keys)))
|
||||||
values = svc.tovaluelist(n, services)
|
values = svc.tovaluelist(n, services)
|
||||||
captions = None
|
captions = None
|
||||||
possible_values = None
|
possible_values = None
|
||||||
groups = None
|
groups = None
|
||||||
|
|
||||||
# create response message
|
|
||||||
# tlv_data = ""
|
|
||||||
# if node_id is not None:
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.NODE.value, node_id)
|
|
||||||
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, self.name)
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, type_flag)
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPES.value, data_types)
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES, values)
|
|
||||||
|
|
||||||
# if captions:
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.CAPTIONS.value, captions)
|
|
||||||
#
|
|
||||||
# if possible_values:
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.POSSIBLE_VALUES.value, possible_values)
|
|
||||||
#
|
|
||||||
# if groups:
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.GROUPS.value, groups)
|
|
||||||
#
|
|
||||||
# if session_id is not None:
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(coreapi.ConfigTlvs.SESSION.value, session_id)
|
|
||||||
#
|
|
||||||
# if opaque:
|
|
||||||
# tlv_data += coreapi.CoreConfigTlv.pack(ConfigTlvs.OPAQUE.value, opaque)
|
|
||||||
|
|
||||||
# return coreapi.CoreConfMessage.pack(0, tlv_data)
|
|
||||||
|
|
||||||
return ConfigData(
|
return ConfigData(
|
||||||
|
message_type=0,
|
||||||
node=node_id,
|
node=node_id,
|
||||||
object=self.name,
|
object=self.name,
|
||||||
type=type_flag,
|
type=type_flag,
|
||||||
|
|
|
@ -48,18 +48,10 @@ class Core(object):
|
||||||
network.link(from_interface, to_interface)
|
network.link(from_interface, to_interface)
|
||||||
|
|
||||||
def configure_link(self, network, interface_one, interface_two, values, unidirectional=False):
|
def configure_link(self, network, interface_one, interface_two, values, unidirectional=False):
|
||||||
network.linkconfig(
|
network.linkconfig(netif=interface_one, netif2=interface_two, **values)
|
||||||
netif=interface_one,
|
|
||||||
netif2=interface_two,
|
|
||||||
**values
|
|
||||||
)
|
|
||||||
|
|
||||||
if not unidirectional:
|
if not unidirectional:
|
||||||
network.linkconfig(
|
network.linkconfig(netif=interface_two, netif2=interface_one, **values)
|
||||||
netif=interface_two,
|
|
||||||
netif2=interface_one,
|
|
||||||
**values
|
|
||||||
)
|
|
||||||
|
|
||||||
def ping(self, from_name, to_name):
|
def ping(self, from_name, to_name):
|
||||||
from_node = self.nodes[from_name]
|
from_node = self.nodes[from_name]
|
||||||
|
|
|
@ -6,9 +6,41 @@ import time
|
||||||
|
|
||||||
from core.mobility import BasicRangeModel
|
from core.mobility import BasicRangeModel
|
||||||
from core.netns import nodes
|
from core.netns import nodes
|
||||||
|
from core.phys.pnodes import PhysicalNode
|
||||||
|
|
||||||
|
|
||||||
class TestCore:
|
class TestCore:
|
||||||
|
def test_physical(self, core):
|
||||||
|
"""
|
||||||
|
Test physical node network.
|
||||||
|
|
||||||
|
:param conftest.Core core: core fixture to test with
|
||||||
|
"""
|
||||||
|
|
||||||
|
# create switch node
|
||||||
|
switch_node = core.session.add_object(cls=nodes.SwitchNode)
|
||||||
|
|
||||||
|
# create a physical node
|
||||||
|
physical_node = core.session.add_object(cls=PhysicalNode, name="p1")
|
||||||
|
core.nodes[physical_node.name] = physical_node
|
||||||
|
|
||||||
|
# create regular node
|
||||||
|
core.create_node("n1")
|
||||||
|
|
||||||
|
# add interface
|
||||||
|
core.add_interface(switch_node, "n1")
|
||||||
|
core.add_interface(switch_node, "p1")
|
||||||
|
|
||||||
|
# instantiate session
|
||||||
|
core.session.instantiate()
|
||||||
|
|
||||||
|
# assert node directories created
|
||||||
|
core.assert_nodes()
|
||||||
|
|
||||||
|
# ping n2 from n1 and assert success
|
||||||
|
status = core.ping("n1", "p1")
|
||||||
|
assert not status
|
||||||
|
|
||||||
def test_ptp(self, core):
|
def test_ptp(self, core):
|
||||||
"""
|
"""
|
||||||
Test ptp node network.
|
Test ptp node network.
|
||||||
|
@ -16,7 +48,7 @@ class TestCore:
|
||||||
:param conftest.Core core: core fixture to test with
|
:param conftest.Core core: core fixture to test with
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# create switch
|
# create ptp
|
||||||
ptp_node = core.session.add_object(cls=nodes.PtpNet)
|
ptp_node = core.session.add_object(cls=nodes.PtpNet)
|
||||||
|
|
||||||
# create nodes
|
# create nodes
|
||||||
|
@ -44,7 +76,7 @@ class TestCore:
|
||||||
:param conftest.Core core: core fixture to test with
|
:param conftest.Core core: core fixture to test with
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# create switch
|
# create hub
|
||||||
hub_node = core.session.add_object(cls=nodes.HubNode)
|
hub_node = core.session.add_object(cls=nodes.HubNode)
|
||||||
|
|
||||||
# create nodes
|
# create nodes
|
||||||
|
|
Loading…
Add table
Reference in a new issue