########## for uploading onto pypi
# this assumes you have an entry 'pypi' in your .pypirc
# see pypi documentation on how to create .pypirc
+LOCAL_ID=$(shell id)
+ifneq "$(LOCAL_ID)" "$(filter $(LOCAL_ID),parmen)"
+BUILD_ID=thierry
+else
+BUILD_ID=mario
+endif
+
PYPI_TARGET=pypi
-PYPI_TARBALL_HOST=mario@build.onelab.eu
+PYPI_TARBALL_HOST=$(BUILD_ID)@build.onelab.eu
PYPI_TARBALL_TOPDIR=/build/nepi
VERSION=$(shell cat VERSION)
#./setup.py sdist upload -r testpypi
./setup.py sdist
twine upload dist/* -r testpypi
+
+#################### convenience, for debugging only
+# make +foo : prints the value of $(foo)
+# make ++foo : idem but verbose, i.e. foo=$(foo)
+++%: varname=$(subst +,,$@)
+++%:
+ @echo "$(varname)=$($(varname))"
++%: varname=$(subst +,,$@)
++%:
+ @echo "$($(varname))"
master_doc = 'index'
# General information about the project.
-project = u'NEPI'
-copyright = u'2014, Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino'
+project = 'NEPI'
+copyright = '2014, Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'NEPI.tex', u'NEPI Documentation',
- u'Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino', 'manual'),
+ ('index', 'NEPI.tex', 'NEPI Documentation',
+ 'Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- ('index', 'NEPI', u'NEPI Documentation',
- [u'Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino'], 1)
+ ('index', 'NEPI', 'NEPI Documentation',
+ ['Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino'], 1)
]
# If true, show URL addresses after external links.
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'NEPI', u'NEPI Documentation',
- u'Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino',
+ ('index', 'NEPI', 'NEPI Documentation',
+ 'Alina Quereilhac, Lucia Guevgeozian Odizzio, Julien Tribino',
'NEPI', 'The network experimentation programming interface',
'Miscellaneous'),
]
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.runner import ExperimentRunner
from nepi.util.netgraph import NetGraph, TopologyType
### Compute metric: Avg number of Interests seen per content name
### normalized by the number of nodes in the shortest path
- content_name_count = len(content_names.values())
+ content_name_count = len(content_names)
nodes_in_shortest_path = len(shortest_path) - 1
metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
# TODO: DUMP RESULTS TO FILE
# TODO: DUMP GRAPH DELAYS!
- f = open("/tmp/metric", "a+")
- f.write("%.2f\n" % metric)
- f.close()
- print " METRIC", metric
+ with open("/tmp/metric", "a+") as f:
+ f.write("%.2f\n" % metric)
+ print(" METRIC", metric)
return metric
add_node_callback = add_dce_node,
add_edge_callback = add_dce_edge)
- print "Results stored at", ec.exp_dir
+ print("Results stored at", ec.exp_dir)
#### Retrieve the consumer to wait for ot to finish
ccncat = ec.filter_resources("linux::ns3::dce::CCNCat")
-#!/usr/bin/env python\r
-#\r
-# NEPI, a framework to manage network experiments\r
-# Copyright (C) 2013 INRIA\r
-#\r
-# This program is free software: you can redistribute it and/or modify\r
-# it under the terms of the GNU General Public License version 2 as\r
-# published by the Free Software Foundation;\r
-#\r
-# This program is distributed in the hope that it will be useful,\r
-# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-# GNU General Public License for more details.\r
-#\r
-# You should have received a copy of the GNU General Public License\r
-# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
-#\r
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>\r
-\r
-\r
-from nepi.execution.ec import ExperimentController \r
-from nepi.execution.runner import ExperimentRunner\r
-from nepi.util.netgraph import TopologyType\r
-import nepi.data.processing.ccn.parser as ccn_parser\r
-\r
-import networkx\r
-import socket\r
-import os\r
-import numpy\r
-from scipy import stats\r
-from matplotlib import pyplot\r
-import math\r
-import random\r
-\r
-def avg_interest_rtt(ec, run):\r
- logs_dir = ec.run_dir\r
- \r
- # Parse downloaded CCND logs\r
- (graph,\r
- content_names,\r
- interest_expiry_count,\r
- interest_dupnonce_count,\r
- interest_count,\r
- content_count) = ccn_parser.process_content_history_logs(\r
- logs_dir, ec.netgraph.topology)\r
-\r
- # statistics on RTT\r
- rtts = [content_names[content_name]["rtt"] \\r
- for content_name in content_names.keys()]\r
-\r
- # sample mean and standard deviation\r
- sample = numpy.array(rtts)\r
- n, min_max, mean, var, skew, kurt = stats.describe(sample)\r
- std = math.sqrt(var)\r
- ci = stats.t.interval(0.95, n-1, loc = mean, \r
- scale = std/math.sqrt(n))\r
-\r
- global metrics\r
- metrics.append((mean, ci[0], ci[1]))\r
- \r
- return mean\r
-\r
-def normal_law(ec, run, sample):\r
- x = numpy.array(sample)\r
- n = len(sample)\r
- std = x.std()\r
- se = std / math.sqrt(n)\r
- m = x.mean()\r
- se95 = se * 2\r
- \r
- return m * 0.05 >= se95\r
-\r
-def post_process(ec, runs):\r
- global metrics\r
- \r
- # plot convergence graph\r
- y = numpy.array([float(m[0]) for m in metrics])\r
- low = numpy.array([float(m[1]) for m in metrics])\r
- high = numpy.array([float(m[2]) for m in metrics])\r
- error = [y - low, high - y]\r
- x = range(1,runs + 1)\r
-\r
- # plot average RTT and confidence interval for each iteration\r
- pyplot.errorbar(x, y, yerr = error, fmt='o')\r
- pyplot.plot(x, y, 'r-')\r
- pyplot.xlim([0.5, runs + 0.5])\r
- pyplot.xticks(numpy.arange(1, len(y)+1, 1))\r
- pyplot.xlabel('Iteration')\r
- pyplot.ylabel('Average RTT')\r
- pyplot.grid()\r
- pyplot.savefig("plot.png")\r
- pyplot.show()\r
-\r
-content_name = "ccnx:/test/bunny.ts"\r
-\r
-STOP_TIME = "5000s"\r
-\r
-repofile = os.path.join(\r
- os.path.dirname(os.path.realpath(__file__)), \r
- "repoFile1.0.8.2")\r
-\r
-def get_simulator(ec):\r
- simulator = ec.filter_resources("linux::ns3::Simulation")\r
-\r
- if not simulator:\r
- node = ec.register_resource("linux::Node")\r
- ec.set(node, "hostname", "localhost")\r
-\r
- simu = ec.register_resource("linux::ns3::Simulation")\r
- ec.register_connection(simu, node)\r
- return simu\r
-\r
- return simulator[0]\r
-\r
-def add_collector(ec, trace_name, subdir, newname = None):\r
- collector = ec.register_resource("Collector")\r
- ec.set(collector, "traceName", trace_name)\r
- ec.set(collector, "subDir", subdir)\r
- if newname:\r
- ec.set(collector, "rename", newname)\r
-\r
- return collector\r
-\r
-def add_dce_host(ec, nid):\r
- simu = get_simulator(ec)\r
- \r
- host = ec.register_resource("ns3::Node")\r
- ec.set(host, "enableStack", True)\r
- ec.register_connection(host, simu)\r
-\r
- # Annotate the graph\r
- ec.netgraph.annotate_node(nid, "host", host)\r
- \r
-def add_dce_ccnd(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add dce ccnd to the dce node\r
- ccnd = ec.register_resource("linux::ns3::dce::CCND")\r
- ec.set (ccnd, "stackSize", 1<<20)\r
- ec.set (ccnd, "debug", 7)\r
- ec.set (ccnd, "capacity", 50000)\r
- ec.set (ccnd, "StartTime", "1s")\r
- ec.set (ccnd, "StopTime", STOP_TIME)\r
- ec.register_connection(ccnd, host)\r
-\r
- # Collector to retrieve ccnd log\r
- collector = add_collector(ec, "stderr", str(nid), "log")\r
- ec.register_connection(collector, ccnd)\r
-\r
- # Annotate the graph\r
- ec.netgraph.annotate_node(nid, "ccnd", ccnd)\r
-\r
-def add_dce_ccnr(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add a CCN content repository to the dce node\r
- ccnr = ec.register_resource("linux::ns3::dce::CCNR")\r
- ec.set (ccnr, "repoFile1", repofile) \r
- ec.set (ccnr, "stackSize", 1<<20)\r
- ec.set (ccnr, "StartTime", "2s")\r
- ec.set (ccnr, "StopTime", STOP_TIME)\r
- ec.register_connection(ccnr, host)\r
-\r
-def add_dce_ccncat(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add a ccncat application to the dce host\r
- ccncat = ec.register_resource("linux::ns3::dce::CCNCat")\r
- ec.set (ccncat, "contentName", content_name)\r
- ec.set (ccncat, "stackSize", 1<<20)\r
- ec.set (ccncat, "StartTime", "8s")\r
- ec.set (ccncat, "StopTime", STOP_TIME)\r
- ec.register_connection(ccncat, host)\r
-\r
-def add_dce_fib_entry(ec, nid1, nid2):\r
- # Retrieve annotations from netgraph\r
- host1 = ec.netgraph.node_annotation(nid1, "host")\r
- net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
- ip2 = net[nid2]\r
-\r
- # Add FIB entry between peer hosts\r
- ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")\r
- ec.set (ccndc, "protocol", "udp") \r
- ec.set (ccndc, "uri", "ccnx:/") \r
- ec.set (ccndc, "host", ip2)\r
- ec.set (ccndc, "stackSize", 1<<20)\r
- ec.set (ccndc, "StartTime", "2s")\r
- ec.set (ccndc, "StopTime", STOP_TIME)\r
- ec.register_connection(ccndc, host1)\r
-\r
-def add_dce_net_iface(ec, nid1, nid2):\r
- # Retrieve annotations from netgraph\r
- host = ec.netgraph.node_annotation(nid1, "host")\r
- net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
- ip1 = net[nid1]\r
- prefix = net["prefix"]\r
-\r
- dev = ec.register_resource("ns3::PointToPointNetDevice")\r
- ec.set(dev,"DataRate", "5Mbps")\r
- ec.set(dev, "ip", ip1)\r
- ec.set(dev, "prefix", prefix)\r
- ec.register_connection(host, dev)\r
-\r
- queue = ec.register_resource("ns3::DropTailQueue")\r
- ec.register_connection(dev, queue)\r
-\r
- return dev\r
-\r
-def add_edge(ec, nid1, nid2):\r
- ### Add network interfaces to hosts\r
- p2p1 = add_dce_net_iface(ec, nid1, nid2)\r
- p2p2 = add_dce_net_iface(ec, nid2, nid1)\r
-\r
- # Create point to point link between interfaces\r
- chan = ec.register_resource("ns3::PointToPointChannel")\r
- ec.set(chan, "Delay", "0ms")\r
-\r
- ec.register_connection(chan, p2p1)\r
- ec.register_connection(chan, p2p2)\r
-\r
- #### Add routing between CCN nodes\r
- add_dce_fib_entry(ec, nid1, nid2)\r
- add_dce_fib_entry(ec, nid2, nid1)\r
-\r
-def add_node(ec, nid):\r
- ### Add CCN nodes (ec.netgraph holds the topology graph)\r
- add_dce_host(ec, nid)\r
- add_dce_ccnd(ec, nid)\r
- \r
- if nid == ec.netgraph.targets()[0]:\r
- add_dce_ccnr(ec, nid)\r
-\r
- if nid == ec.netgraph.sources()[0]:\r
- add_dce_ccncat(ec, nid)\r
-\r
-def wait_guids(ec):\r
- return ec.filter_resources("linux::ns3::dce::CCNCat")\r
-\r
-if __name__ == '__main__':\r
-\r
- metrics = []\r
-\r
- # topology translation to NEPI model\r
- ec = ExperimentController("dce_4n_linear",\r
- topo_type = TopologyType.LINEAR, \r
- node_count = 4,\r
- assign_st = True,\r
- assign_ips = True,\r
- add_node_callback = add_node,\r
- add_edge_callback = add_edge)\r
-\r
- #### Run experiment until metric convergence\r
- rnr = ExperimentRunner()\r
- runs = rnr.run(ec,\r
- min_runs = 10,\r
- max_runs = 100, \r
- compute_metric_callback = avg_interest_rtt,\r
- evaluate_convergence_callback = normal_law,\r
- wait_guids = wait_guids(ec))\r
- \r
- ### post processing\r
- post_process(ec, runs)\r
-\r
-\r
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.runner import ExperimentRunner
+from nepi.util.netgraph import TopologyType
+import nepi.data.processing.ccn.parser as ccn_parser
+
+import networkx
+import socket
+import os
+import numpy
+from scipy import stats
+from matplotlib import pyplot
+import math
+import random
+
+def avg_interest_rtt(ec, run):
+ logs_dir = ec.run_dir
+
+ # Parse downloaded CCND logs
+ (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count) = ccn_parser.process_content_history_logs(
+ logs_dir, ec.netgraph.topology)
+
+ # statistics on RTT
+ rtts = [content_names[content_name]["rtt"] \
+ for content_name in content_names]
+
+ # sample mean and standard deviation
+ sample = numpy.array(rtts)
+ n, min_max, mean, var, skew, kurt = stats.describe(sample)
+ std = math.sqrt(var)
+ ci = stats.t.interval(0.95, n-1, loc = mean,
+ scale = std/math.sqrt(n))
+
+ global metrics
+ metrics.append((mean, ci[0], ci[1]))
+
+ return mean
+
+def normal_law(ec, run, sample):
+ x = numpy.array(sample)
+ n = len(sample)
+ std = x.std()
+ se = std / math.sqrt(n)
+ m = x.mean()
+ se95 = se * 2
+
+ return m * 0.05 >= se95
+
+def post_process(ec, runs):
+ global metrics
+
+ # plot convergence graph
+ y = numpy.array([float(m[0]) for m in metrics])
+ low = numpy.array([float(m[1]) for m in metrics])
+ high = numpy.array([float(m[2]) for m in metrics])
+ error = [y - low, high - y]
+ x = range(1,runs + 1)
+
+ # plot average RTT and confidence interval for each iteration
+ pyplot.errorbar(x, y, yerr = error, fmt='o')
+ pyplot.plot(x, y, 'r-')
+ pyplot.xlim([0.5, runs + 0.5])
+ pyplot.xticks(numpy.arange(1, len(y)+1, 1))
+ pyplot.xlabel('Iteration')
+ pyplot.ylabel('Average RTT')
+ pyplot.grid()
+ pyplot.savefig("plot.png")
+ pyplot.show()
+
+content_name = "ccnx:/test/bunny.ts"
+
+STOP_TIME = "5000s"
+
+repofile = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "repoFile1.0.8.2")
+
+def get_simulator(ec):
+ simulator = ec.filter_resources("linux::ns3::Simulation")
+
+ if not simulator:
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", "localhost")
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.register_connection(simu, node)
+ return simu
+
+ return simulator[0]
+
+def add_collector(ec, trace_name, subdir, newname = None):
+ collector = ec.register_resource("Collector")
+ ec.set(collector, "traceName", trace_name)
+ ec.set(collector, "subDir", subdir)
+ if newname:
+ ec.set(collector, "rename", newname)
+
+ return collector
+
+def add_dce_host(ec, nid):
+ simu = get_simulator(ec)
+
+ host = ec.register_resource("ns3::Node")
+ ec.set(host, "enableStack", True)
+ ec.register_connection(host, simu)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "host", host)
+
+def add_dce_ccnd(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add dce ccnd to the dce node
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")
+ ec.set (ccnd, "stackSize", 1<<20)
+ ec.set (ccnd, "debug", 7)
+ ec.set (ccnd, "capacity", 50000)
+ ec.set (ccnd, "StartTime", "1s")
+ ec.set (ccnd, "StopTime", STOP_TIME)
+ ec.register_connection(ccnd, host)
+
+ # Collector to retrieve ccnd log
+ collector = add_collector(ec, "stderr", str(nid), "log")
+ ec.register_connection(collector, ccnd)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_dce_ccnr(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a CCN content repository to the dce node
+ ccnr = ec.register_resource("linux::ns3::dce::CCNR")
+ ec.set (ccnr, "repoFile1", repofile)
+ ec.set (ccnr, "stackSize", 1<<20)
+ ec.set (ccnr, "StartTime", "2s")
+ ec.set (ccnr, "StopTime", STOP_TIME)
+ ec.register_connection(ccnr, host)
+
+def add_dce_ccncat(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a ccncat application to the dce host
+ ccncat = ec.register_resource("linux::ns3::dce::CCNCat")
+ ec.set (ccncat, "contentName", content_name)
+ ec.set (ccncat, "stackSize", 1<<20)
+ ec.set (ccncat, "StartTime", "8s")
+ ec.set (ccncat, "StopTime", STOP_TIME)
+ ec.register_connection(ccncat, host)
+
+def add_dce_fib_entry(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ host1 = ec.netgraph.node_annotation(nid1, "host")
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)
+ ip2 = net[nid2]
+
+ # Add FIB entry between peer hosts
+ ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")
+ ec.set (ccndc, "protocol", "udp")
+ ec.set (ccndc, "uri", "ccnx:/")
+ ec.set (ccndc, "host", ip2)
+ ec.set (ccndc, "stackSize", 1<<20)
+ ec.set (ccndc, "StartTime", "2s")
+ ec.set (ccndc, "StopTime", STOP_TIME)
+ ec.register_connection(ccndc, host1)
+
+def add_dce_net_iface(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ host = ec.netgraph.node_annotation(nid1, "host")
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)
+ ip1 = net[nid1]
+ prefix = net["prefix"]
+
+ dev = ec.register_resource("ns3::PointToPointNetDevice")
+ ec.set(dev,"DataRate", "5Mbps")
+ ec.set(dev, "ip", ip1)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(host, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+def add_edge(ec, nid1, nid2):
+ ### Add network interfaces to hosts
+ p2p1 = add_dce_net_iface(ec, nid1, nid2)
+ p2p2 = add_dce_net_iface(ec, nid2, nid1)
+
+ # Create point to point link between interfaces
+ chan = ec.register_resource("ns3::PointToPointChannel")
+ ec.set(chan, "Delay", "0ms")
+
+ ec.register_connection(chan, p2p1)
+ ec.register_connection(chan, p2p2)
+
+ #### Add routing between CCN nodes
+ add_dce_fib_entry(ec, nid1, nid2)
+ add_dce_fib_entry(ec, nid2, nid1)
+
+def add_node(ec, nid):
+ ### Add CCN nodes (ec.netgraph holds the topology graph)
+ add_dce_host(ec, nid)
+ add_dce_ccnd(ec, nid)
+
+ if nid == ec.netgraph.targets()[0]:
+ add_dce_ccnr(ec, nid)
+
+ if nid == ec.netgraph.sources()[0]:
+ add_dce_ccncat(ec, nid)
+
+def wait_guids(ec):
+ return ec.filter_resources("linux::ns3::dce::CCNCat")
+
+if __name__ == '__main__':
+
+ metrics = []
+
+ # topology translation to NEPI model
+ ec = ExperimentController("dce_4n_linear",
+ topo_type = TopologyType.LINEAR,
+ node_count = 4,
+ assign_st = True,
+ assign_ips = True,
+ add_node_callback = add_node,
+ add_edge_callback = add_edge)
+
+ #### Run experiment until metric convergence
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec,
+ min_runs = 10,
+ max_runs = 100,
+ compute_metric_callback = avg_interest_rtt,
+ evaluate_convergence_callback = normal_law,
+ wait_guids = wait_guids(ec))
+
+ ### post processing
+ post_process(ec, runs)
+
+
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
from nepi.execution.ec import ExperimentController
from nepi.execution.runner import ExperimentRunner
### Compute metric: Avg number of Interests seen per content name
### normalized by the number of nodes in the shortest path
- content_name_count = len(content_names.values())
+ content_name_count = len(content_names)
nodes_in_shortest_path = len(shortest_path) - 1
metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
# TODO: DUMP RESULTS TO FILE
# TODO: DUMP GRAPH DELAYS!
- f = open("/tmp/metric", "a+")
- f.write("%.2f\n" % metric)
- f.close()
- print " METRIC", metric
+ with open("/tmp/metric", "a+") as f:
+ f.write("%.2f\n" % metric)
+ print(" METRIC", metric)
return metric
add_node_callback = add_pl_node,
add_edge_callback = add_pl_edge)
- print "Results stored at", ec.exp_dir
+ print("Results stored at", ec.exp_dir)
#### Retrieve the content producing resource to wait for ot to finish
ccncat = ec.filter_resources("linux::CCNCat")
-#!/usr/bin/env python\r
-#\r
-# NEPI, a framework to manage network experiments\r
-# Copyright (C) 2013 INRIA\r
-#\r
-# This program is free software: you can redistribute it and/or modify\r
-# it under the terms of the GNU General Public License version 2 as\r
-# published by the Free Software Foundation;\r
-#\r
-# This program is distributed in the hope that it will be useful,\r
-# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-# GNU General Public License for more details.\r
-#\r
-# You should have received a copy of the GNU General Public License\r
-# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
-#\r
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>\r
-\r
-from nepi.execution.ec import ExperimentController \r
-from nepi.execution.runner import ExperimentRunner\r
-from nepi.util.netgraph import TopologyType\r
-import nepi.data.processing.ccn.parser as ccn_parser\r
-\r
-import networkx\r
-import socket\r
-import os\r
-import numpy\r
-from scipy import stats\r
-from matplotlib import pyplot\r
-import math\r
-import random\r
-\r
-from optparse import OptionParser\r
-\r
-usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "\r
- "-k <pl-ssh-key> -N <nodes>")\r
-\r
-parser = OptionParser(usage = usage)\r
-parser.add_option("-s", "--pl-slice", dest="pl_slice",\r
- help="PlanetLab slicename", type="str")\r
-parser.add_option("-u", "--pl-user", dest="pl_user",\r
- help="PlanetLab web username", type="str")\r
-parser.add_option("-p", "--pl-password", dest="pl_password",\r
- help="PlanetLab web password", type="str")\r
-parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",\r
- help="Path to private SSH key associated with the PL account",\r
- type="str")\r
-parser.add_option("-N", "--nodes", dest="nodes",\r
- help="Comma separated list of Planetlab nodes",\r
- type="str")\r
-\r
-(options, args) = parser.parse_args()\r
-\r
-pl_slice = options.pl_slice\r
-pl_ssh_key = options.pl_ssh_key\r
-pl_user = options.pl_user\r
-pl_password = options.pl_password\r
-NODES = options.nodes.strip().split(",")\r
-\r
-def avg_interest_rtt(ec, run):\r
- logs_dir = ec.run_dir\r
- \r
- # Parse downloaded CCND logs\r
- (graph,\r
- content_names,\r
- interest_expiry_count,\r
- interest_dupnonce_count,\r
- interest_count,\r
- content_count) = ccn_parser.process_content_history_logs(\r
- logs_dir, ec.netgraph.topology)\r
-\r
- # statistics on RTT\r
- rtts = [content_names[content_name]["rtt"] \\r
- for content_name in content_names.keys()]\r
-\r
- # sample mean and standard deviation\r
- sample = numpy.array(rtts)\r
- n, min_max, mean, var, skew, kurt = stats.describe(sample)\r
- std = math.sqrt(var)\r
- ci = stats.t.interval(0.95, n-1, loc = mean, \r
- scale = std/math.sqrt(n))\r
-\r
- global metrics\r
- metrics.append((mean, ci[0], ci[1]))\r
- \r
- return mean\r
-\r
-def normal_law(ec, run, sample):\r
- print "SAMPLE", sample\r
-\r
- x = numpy.array(sample)\r
- n = len(sample)\r
- std = x.std()\r
- se = std / math.sqrt(n)\r
- m = x.mean()\r
- se95 = se * 2\r
- \r
- return m * 0.05 >= se95\r
-\r
-def post_process(ec, runs):\r
- global metrics\r
- \r
- # plot convergence graph\r
- y = numpy.array([float(m[0]) for m in metrics])\r
- low = numpy.array([float(m[1]) for m in metrics])\r
- high = numpy.array([float(m[2]) for m in metrics])\r
- error = [y - low, high - y]\r
- x = range(1,runs + 1)\r
-\r
- # plot average RTT and confidence interval for each iteration\r
- pyplot.errorbar(x, y, yerr = error, fmt='o')\r
- pyplot.plot(x, y, 'r-')\r
- pyplot.xlim([0.5, runs + 0.5])\r
- pyplot.xticks(numpy.arange(1, len(y)+1, 1))\r
- pyplot.xlabel('Iteration')\r
- pyplot.ylabel('Average RTT')\r
- pyplot.grid()\r
- pyplot.savefig("plot.png")\r
- pyplot.show()\r
-\r
-content_name = "ccnx:/test/bunny.ts"\r
-\r
-repofile = os.path.join(\r
- os.path.dirname(os.path.realpath(__file__)), \r
- "repoFile1.0.8.2")\r
-\r
-def get_simulator(ec):\r
- simulator = ec.filter_resources("linux::ns3::Simulation")\r
-\r
- if not simulator:\r
- node = ec.register_resource("linux::Node")\r
- ec.set(node, "hostname", "localhost")\r
-\r
- simu = ec.register_resource("linux::ns3::Simulation")\r
- ec.register_connection(simu, node)\r
- return simu\r
-\r
- return simulator[0]\r
-\r
-def add_collector(ec, trace_name, subdir, newname = None):\r
- collector = ec.register_resource("Collector")\r
- ec.set(collector, "traceName", trace_name)\r
- ec.set(collector, "subDir", subdir)\r
- if newname:\r
- ec.set(collector, "rename", newname)\r
-\r
- return collector\r
-\r
-def add_dce_host(ec, nid):\r
- simu = get_simulator(ec)\r
- \r
- host = ec.register_resource("ns3::Node")\r
- ec.set(host, "enableStack", True)\r
- ec.register_connection(host, simu)\r
-\r
- # Annotate the graph\r
- ec.netgraph.annotate_node(nid, "host", host)\r
- \r
-def add_dce_ccnd(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add dce ccnd to the dce node\r
- ccnd = ec.register_resource("linux::ns3::dce::CCND")\r
- ec.set (ccnd, "stackSize", 1<<20)\r
- ec.set (ccnd, "debug", 7)\r
- ec.set (ccnd, "capacity", 50000)\r
- ec.set (ccnd, "StartTime", "1s")\r
- ec.set (ccnd, "StopTime", STOP_TIME)\r
- ec.register_connection(ccnd, host)\r
-\r
- # Collector to retrieve ccnd log\r
- collector = add_collector(ec, "stderr", str(nid), "log")\r
- ec.register_connection(collector, ccnd)\r
-\r
- # Annotate the graph\r
- ec.netgraph.annotate_node(nid, "ccnd", ccnd)\r
-\r
-def add_dce_ccnr(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add a CCN content repository to the dce node\r
- ccnr = ec.register_resource("linux::ns3::dce::CCNR")\r
- ec.set (ccnr, "repoFile1", repofile) \r
- ec.set (ccnr, "stackSize", 1<<20)\r
- ec.set (ccnr, "StartTime", "2s")\r
- ec.set (ccnr, "StopTime", STOP_TIME)\r
- ec.register_connection(ccnr, host)\r
-\r
-def add_dce_ccncat(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add a ccncat application to the dce host\r
- ccncat = ec.register_resource("linux::ns3::dce::CCNCat")\r
- ec.set (ccncat, "contentName", content_name)\r
- ec.set (ccncat, "stackSize", 1<<20)\r
- ec.set (ccncat, "StartTime", "8s")\r
- ec.set (ccncat, "StopTime", STOP_TIME)\r
- ec.register_connection(ccncat, host)\r
-\r
-def add_dce_fib_entry(ec, nid1, nid2):\r
- # Retrieve annotations from netgraph\r
- host1 = ec.netgraph.node_annotation(nid1, "host")\r
- net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
- ip2 = net[nid2]\r
-\r
- # Add FIB entry between peer hosts\r
- ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")\r
- ec.set (ccndc, "protocol", "udp") \r
- ec.set (ccndc, "uri", "ccnx:/") \r
- ec.set (ccndc, "host", ip2)\r
- ec.set (ccndc, "stackSize", 1<<20)\r
- ec.set (ccndc, "StartTime", "2s")\r
- ec.set (ccndc, "StopTime", STOP_TIME)\r
- ec.register_connection(ccndc, host1)\r
-\r
-def add_dce_net_iface(ec, nid1, nid2):\r
- # Retrieve annotations from netgraph\r
- host = ec.netgraph.node_annotation(nid1, "host")\r
- net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
- ip1 = net[nid1]\r
- prefix = net["prefix"]\r
-\r
- dev = ec.register_resource("ns3::PointToPointNetDevice")\r
- ec.set(dev,"DataRate", "5Mbps")\r
- ec.set(dev, "ip", ip1)\r
- ec.set(dev, "prefix", prefix)\r
- ec.register_connection(host, dev)\r
-\r
- queue = ec.register_resource("ns3::DropTailQueue")\r
- ec.register_connection(dev, queue)\r
-\r
- return dev\r
-\r
-def add_pl_host(ec, nid):\r
- hostname = NODES[nid]\r
-\r
- # Add a planetlab host to the experiment description\r
- host = ec.register_resource("planetlab::Node")\r
- ec.set(host, "hostname", hostname)\r
- ec.set(host, "username", pl_slice)\r
- ec.set(host, "identity", pl_ssh_key)\r
- ec.set(host, "cleanExperiment", True)\r
- ec.set(host, "cleanProcesses", True)\r
-\r
- # Annotate the graph\r
- ec.netgraph.annotate_node(nid, "hostname", hostname)\r
- ec.netgraph.annotate_node(nid, "host", host)\r
- \r
- # Annotate the graph node with an ip address\r
- ip = socket.gethostbyname(hostname)\r
- ec.netgraph.annotate_node_ip(nid, ip)\r
-\r
-def add_pl_ccnd(ec, nid):\r
- # Retrieve annotation from netgraph\r
- host = ec.netgraph.node_annotation(nid, "host")\r
- \r
- # Add a CCN daemon to the planetlab node\r
- ccnd = ec.register_resource("linux::CCND")\r
- ec.set(ccnd, "debug", 7)\r
- ec.register_connection(ccnd, host)\r
- \r
- # Collector to retrieve ccnd log\r
- collector = add_collector(ec, "stderr", str(nid), "log")\r
- ec.register_connection(collector, ccnd)\r
-\r
- # Annotate the graph\r
- ec.netgraph.annotate_node(nid, "ccnd", ccnd)\r
-\r
-def add_pl_ccnr(ec, nid):\r
- # Retrieve annotation from netgraph\r
- ccnd = ec.netgraph.node_annotation(nid, "ccnd")\r
- \r
- # Add a CCN content repository to the planetlab node\r
- ccnr = ec.register_resource("linux::CCNR")\r
-\r
- ec.set(ccnr, "repoFile1", repofile)\r
- ec.register_connection(ccnr, ccnd)\r
-\r
-def add_pl_ccncat(ec, nid):\r
- # Retrieve annotation from netgraph\r
- ccnd = ec.netgraph.node_annotation(nid, "ccnd")\r
- \r
- # Add a CCN cat application to the planetlab node\r
- ccncat = ec.register_resource("linux::CCNCat")\r
- ec.set(ccncat, "contentName", content_name)\r
- ec.register_connection(ccncat, ccnd)\r
-\r
-def add_pl_fib_entry(ec, nid1, nid2):\r
- # Retrieve annotations from netgraph\r
- ccnd1 = ec.netgraph.node_annotation(nid1, "ccnd")\r
- hostname2 = ec.netgraph.node_annotation(nid2, "hostname")\r
- \r
- # Add a FIB entry between one planetlab node and its peer\r
- entry = ec.register_resource("linux::FIBEntry")\r
- ec.set(entry, "host", hostname2)\r
- ec.register_connection(entry, ccnd1)\r
-\r
- # Collector to retrieve peering ping output (to measure neighbors delay)\r
- ec.enable_trace(entry, "ping")\r
- collector = add_collector(ec, "ping", str(nid1))\r
- ec.register_connection(collector, entry)\r
-\r
- return entry\r
-\r
-def add_node(ec, nid):\r
- ### Add CCN nodes (ec.netgraph holds the topology graph)\r
- add_dce_host(ec, nid)\r
- add_dce_ccnd(ec, nid)\r
- \r
- if nid == ec.netgraph.targets()[0]:\r
- add_dce_ccnr(ec, nid)\r
-\r
- if nid == ec.netgraph.sources()[0]:\r
- add_dce_ccncat(ec, nid)\r
-\r
-def add_edge(ec, nid1, nid2):\r
- #### Add connections between CCN nodes\r
- add_pl_fib_entry(ec, nid1, nid2)\r
- add_pl_fib_entry(ec, nid2, nid1)\r
-\r
-def add_node(ec, nid):\r
- ### Add CCN nodes (ec.netgraph holds the topology graph)\r
- add_pl_host(ec, nid)\r
- add_pl_ccnd(ec, nid)\r
- \r
- if nid == ec.netgraph.targets()[0]:\r
- add_pl_ccnr(ec, nid)\r
-\r
- if nid == ec.netgraph.sources()[0]:\r
- add_pl_ccncat(ec, nid)\r
-\r
-def wait_guids(ec):\r
- return ec.filter_resources("linux::CCNCat")\r
-\r
-if __name__ == '__main__':\r
-\r
- metrics = []\r
-\r
- # topology translation to NEPI model\r
- ec = ExperimentController("pl_4n_linear",\r
- topo_type = TopologyType.LINEAR, \r
- node_count = 4,\r
- assign_st = True,\r
- assign_ips = True,\r
- add_node_callback = add_node,\r
- add_edge_callback = add_edge)\r
-\r
- #### Run experiment until metric convergence\r
- rnr = ExperimentRunner()\r
- runs = rnr.run(ec,\r
- min_runs = 10,\r
- max_runs = 100, \r
- compute_metric_callback = avg_interest_rtt,\r
- evaluate_convergence_callback = normal_law,\r
- wait_guids = wait_guids(ec))\r
- \r
- ### post processing\r
- post_process(ec, runs)\r
-\r
-\r
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from __future__ import print_function
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.runner import ExperimentRunner
+from nepi.util.netgraph import TopologyType
+import nepi.data.processing.ccn.parser as ccn_parser
+
+import networkx
+import socket
+import os
+import numpy
+from scipy import stats
+from matplotlib import pyplot
+import math
+import random
+
+from optparse import OptionParser
+
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+ "-k <pl-ssh-key> -N <nodes>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+ help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+ help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+ help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+ help="Path to private SSH key associated with the PL account",
+ type="str")
+parser.add_option("-N", "--nodes", dest="nodes",
+ help="Comma separated list of Planetlab nodes",
+ type="str")
+
+(options, args) = parser.parse_args()
+
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+NODES = options.nodes.strip().split(",")
+
+def avg_interest_rtt(ec, run):
+ logs_dir = ec.run_dir
+
+ # Parse downloaded CCND logs
+ (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count) = ccn_parser.process_content_history_logs(
+ logs_dir, ec.netgraph.topology)
+
+ # statistics on RTT
+ rtts = [content_names[content_name]["rtt"] \
+ for content_name in content_names]
+
+ # sample mean and standard deviation
+ sample = numpy.array(rtts)
+ n, min_max, mean, var, skew, kurt = stats.describe(sample)
+ std = math.sqrt(var)
+ ci = stats.t.interval(0.95, n-1, loc = mean,
+ scale = std/math.sqrt(n))
+
+ global metrics
+ metrics.append((mean, ci[0], ci[1]))
+
+ return mean
+
+def normal_law(ec, run, sample):
+ print("SAMPLE", sample)
+
+ x = numpy.array(sample)
+ n = len(sample)
+ std = x.std()
+ se = std / math.sqrt(n)
+ m = x.mean()
+ se95 = se * 2
+
+ return m * 0.05 >= se95
+
+def post_process(ec, runs):
+ global metrics
+
+ # plot convergence graph
+ y = numpy.array([float(m[0]) for m in metrics])
+ low = numpy.array([float(m[1]) for m in metrics])
+ high = numpy.array([float(m[2]) for m in metrics])
+ error = [y - low, high - y]
+ x = list(range(1,runs + 1))
+
+ # plot average RTT and confidence interval for each iteration
+ pyplot.errorbar(x, y, yerr = error, fmt='o')
+ pyplot.plot(x, y, 'r-')
+ pyplot.xlim([0.5, runs + 0.5])
+ pyplot.xticks(numpy.arange(1, len(y)+1, 1))
+ pyplot.xlabel('Iteration')
+ pyplot.ylabel('Average RTT')
+ pyplot.grid()
+ pyplot.savefig("plot.png")
+ pyplot.show()
+
+content_name = "ccnx:/test/bunny.ts"
+
+repofile = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "repoFile1.0.8.2")
+
+def get_simulator(ec):
+ simulator = ec.filter_resources("linux::ns3::Simulation")
+
+ if not simulator:
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", "localhost")
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.register_connection(simu, node)
+ return simu
+
+ return simulator[0]
+
+def add_collector(ec, trace_name, subdir, newname = None):
+ collector = ec.register_resource("Collector")
+ ec.set(collector, "traceName", trace_name)
+ ec.set(collector, "subDir", subdir)
+ if newname:
+ ec.set(collector, "rename", newname)
+
+ return collector
+
+def add_dce_host(ec, nid):
+ simu = get_simulator(ec)
+
+ host = ec.register_resource("ns3::Node")
+ ec.set(host, "enableStack", True)
+ ec.register_connection(host, simu)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "host", host)
+
+def add_dce_ccnd(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add dce ccnd to the dce node
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")
+ ec.set (ccnd, "stackSize", 1<<20)
+ ec.set (ccnd, "debug", 7)
+ ec.set (ccnd, "capacity", 50000)
+ ec.set (ccnd, "StartTime", "1s")
+ ec.set (ccnd, "StopTime", STOP_TIME)
+ ec.register_connection(ccnd, host)
+
+ # Collector to retrieve ccnd log
+ collector = add_collector(ec, "stderr", str(nid), "log")
+ ec.register_connection(collector, ccnd)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_dce_ccnr(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a CCN content repository to the dce node
+ ccnr = ec.register_resource("linux::ns3::dce::CCNR")
+ ec.set (ccnr, "repoFile1", repofile)
+ ec.set (ccnr, "stackSize", 1<<20)
+ ec.set (ccnr, "StartTime", "2s")
+ ec.set (ccnr, "StopTime", STOP_TIME)
+ ec.register_connection(ccnr, host)
+
+def add_dce_ccncat(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a ccncat application to the dce host
+ ccncat = ec.register_resource("linux::ns3::dce::CCNCat")
+ ec.set (ccncat, "contentName", content_name)
+ ec.set (ccncat, "stackSize", 1<<20)
+ ec.set (ccncat, "StartTime", "8s")
+ ec.set (ccncat, "StopTime", STOP_TIME)
+ ec.register_connection(ccncat, host)
+
+def add_dce_fib_entry(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ host1 = ec.netgraph.node_annotation(nid1, "host")
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)
+ ip2 = net[nid2]
+
+ # Add FIB entry between peer hosts
+ ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")
+ ec.set (ccndc, "protocol", "udp")
+ ec.set (ccndc, "uri", "ccnx:/")
+ ec.set (ccndc, "host", ip2)
+ ec.set (ccndc, "stackSize", 1<<20)
+ ec.set (ccndc, "StartTime", "2s")
+ ec.set (ccndc, "StopTime", STOP_TIME)
+ ec.register_connection(ccndc, host1)
+
+def add_dce_net_iface(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ host = ec.netgraph.node_annotation(nid1, "host")
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)
+ ip1 = net[nid1]
+ prefix = net["prefix"]
+
+ dev = ec.register_resource("ns3::PointToPointNetDevice")
+ ec.set(dev,"DataRate", "5Mbps")
+ ec.set(dev, "ip", ip1)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(host, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+def add_pl_host(ec, nid):
+ hostname = NODES[nid]
+
+ # Add a planetlab host to the experiment description
+ host = ec.register_resource("planetlab::Node")
+ ec.set(host, "hostname", hostname)
+ ec.set(host, "username", pl_slice)
+ ec.set(host, "identity", pl_ssh_key)
+ ec.set(host, "cleanExperiment", True)
+ ec.set(host, "cleanProcesses", True)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "hostname", hostname)
+ ec.netgraph.annotate_node(nid, "host", host)
+
+ # Annotate the graph node with an ip address
+ ip = socket.gethostbyname(hostname)
+ ec.netgraph.annotate_node_ip(nid, ip)
+
+def add_pl_ccnd(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a CCN daemon to the planetlab node
+ ccnd = ec.register_resource("linux::CCND")
+ ec.set(ccnd, "debug", 7)
+ ec.register_connection(ccnd, host)
+
+ # Collector to retrieve ccnd log
+ collector = add_collector(ec, "stderr", str(nid), "log")
+ ec.register_connection(collector, ccnd)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_pl_ccnr(ec, nid):
+ # Retrieve annotation from netgraph
+ ccnd = ec.netgraph.node_annotation(nid, "ccnd")
+
+ # Add a CCN content repository to the planetlab node
+ ccnr = ec.register_resource("linux::CCNR")
+
+ ec.set(ccnr, "repoFile1", repofile)
+ ec.register_connection(ccnr, ccnd)
+
+def add_pl_ccncat(ec, nid):
+ # Retrieve annotation from netgraph
+ ccnd = ec.netgraph.node_annotation(nid, "ccnd")
+
+ # Add a CCN cat application to the planetlab node
+ ccncat = ec.register_resource("linux::CCNCat")
+ ec.set(ccncat, "contentName", content_name)
+ ec.register_connection(ccncat, ccnd)
+
+def add_pl_fib_entry(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ ccnd1 = ec.netgraph.node_annotation(nid1, "ccnd")
+ hostname2 = ec.netgraph.node_annotation(nid2, "hostname")
+
+ # Add a FIB entry between one planetlab node and its peer
+ entry = ec.register_resource("linux::FIBEntry")
+ ec.set(entry, "host", hostname2)
+ ec.register_connection(entry, ccnd1)
+
+ # Collector to retrieve peering ping output (to measure neighbors delay)
+ ec.enable_trace(entry, "ping")
+ collector = add_collector(ec, "ping", str(nid1))
+ ec.register_connection(collector, entry)
+
+ return entry
+
+def add_node(ec, nid):
+ ### Add CCN nodes (ec.netgraph holds the topology graph)
+ add_dce_host(ec, nid)
+ add_dce_ccnd(ec, nid)
+
+ if nid == ec.netgraph.targets()[0]:
+ add_dce_ccnr(ec, nid)
+
+ if nid == ec.netgraph.sources()[0]:
+ add_dce_ccncat(ec, nid)
+
+def add_edge(ec, nid1, nid2):
+ #### Add connections between CCN nodes
+ add_pl_fib_entry(ec, nid1, nid2)
+ add_pl_fib_entry(ec, nid2, nid1)
+
+def add_node(ec, nid):
+ ### Add CCN nodes (ec.netgraph holds the topology graph)
+ add_pl_host(ec, nid)
+ add_pl_ccnd(ec, nid)
+
+ if nid == ec.netgraph.targets()[0]:
+ add_pl_ccnr(ec, nid)
+
+ if nid == ec.netgraph.sources()[0]:
+ add_pl_ccncat(ec, nid)
+
+def wait_guids(ec):
+ return ec.filter_resources("linux::CCNCat")
+
+if __name__ == '__main__':
+
+ metrics = []
+
+ # topology translation to NEPI model
+ ec = ExperimentController("pl_4n_linear",
+ topo_type = TopologyType.LINEAR,
+ node_count = 4,
+ assign_st = True,
+ assign_ips = True,
+ add_node_callback = add_node,
+ add_edge_callback = add_edge)
+
+ #### Run experiment until metric convergence
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec,
+ min_runs = 10,
+ max_runs = 100,
+ compute_metric_callback = avg_interest_rtt,
+ evaluate_convergence_callback = normal_law,
+ wait_guids = wait_guids(ec))
+
+ ### post processing
+ post_process(ec, runs)
+
+
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
def add_ns3_node(ec, simu):
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
def add_ns3_node(ec, simu):
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
import os
stdout = ec.trace(ccncat, "stdout")
# convert from bytes to MB
-print "%0.2f MBytes received" % (len(stdout) / 1024.0 / 1024.0 )
+print("%0.2f MBytes received" % (len(stdout) / 1024.0 / 1024.0 ))
ec.shutdown()
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
def add_ns3_node(ec, simu):
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
def add_ns3_node(ec, simu):
ec.shutdown()
-print "PING OUTPUT", stdout
-
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
import os
stdout = ec.trace(ccncat, "stdout")
# convert from bytes to MB
-print "%0.2f MBytes received" % (len(stdout) / 1024.0 / 1024.0 )
+print("%0.2f MBytes received" % (len(stdout) / 1024.0 / 1024.0 ))
ec.shutdown()
-
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
def add_ns3_node(ec, simu):
ec.shutdown()
-print "PEEK received", stdout
+print("PEEK received", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
def add_ns3_node(ec, simu):
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
# 0 ------- network -------- 1
#
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from optparse import OptionParser
ec.wait_finished([ccncat])
stdout = ec.trace(ccncat, "stdout")
-f = open("video.ts", "w")
-f.write(stdout)
-f.close()
+with open("video.ts", "w") as f:
+ f.write(stdout)
ec.shutdown()
-print "Transfered FILE stored localy at video.ts"
+print("Transfered FILE stored localy at video.ts")
# $ cd <path-to-nepi>
# python examples/linux/ccn_advanced_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
# BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
command = "ccndstart && "
- peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ peers = ["ccndc add ccnx:/ udp %s" % peer for peer in peers]
command += " ; ".join(peers) + " && "
command += " ccnr & "
ec.wait_finished(apps)
stdout = ec.trace(ccncat, "stdout")
-f = open("video.ts", "w")
-f.write(stdout)
-f.close()
+with open("video.ts", "w") as f:
+ f.write(stdout)
# Shutdown the experiment controller
ec.shutdown()
-print "Transfered FILE stored localy at video.ts"
+print("Transfered FILE stored localy at video.ts")
# $ cd <path-to-nepi>
# python examples/linux/hello_world.py -a <hostname> -u <username> -i <ssh-key>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from optparse import OptionParser, SUPPRESS_HELP
ec.wait_finished(app)
-print ec.trace(app, "stdout")
+print(ec.trace(app, "stdout"))
ec.shutdown()
# $ cd <path-to-nepi>
# python examples/linux/netcat_file_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
pcap = ec.trace(capture, "file_transfer.pcap")
# Choose a directory to store the traces, example f = open("/home/<user>/bw.txt", "w")
-f = open("bw.txt", "w")
-f.write(bw)
-f.close()
-f = open("video_transfer.pcap", "w")
-f.write(pcap)
-f.close()
+with open("bw.txt", "w") as f:
+ f.write(bw)
+with open("video_transfer.pcap", "w") as f:
+ f.write(pcap)
ec.shutdown()
-print "Total bytes transfered saved to bw.txt..."
+print("Total bytes transfered saved to bw.txt...")
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-# Example of how to run this experiment (replace with your information):
+# This very simple experiment will ssh in a linux box, and from there
+# issue a ping command towards a landmark (default faraday.inria.fr)
#
-# $ cd <path-to-nepi>
-# python examples/linux/ping.py -a <hostname> -u <username> -i <ssh-key>
+# $ ping.py -u root mybox.domain.com
+#
+# would do the equivalent of
+# ssh root@mybox.domain.com ping -c3 faraday.inria.fr
+#
+# If nepi is not installed in your system, and you only have a git clone
+# you might wish to do instead
+#
+# $ cd <path-to-nepi>/src
+# $ python ../examples/linux/ping.py -u root mybox.domain.com
+
+# let's be ready for python3
+from __future__ import print_function
+import os
+from argparse import ArgumentParser
from nepi.execution.ec import ExperimentController
-from optparse import OptionParser, SUPPRESS_HELP
-import os
+#
+default_landmark = "faraday.inria.fr"
-usage = ("usage: %prog -a <hostanme> -u <username> -i <ssh-key>")
+parser = ArgumentParser()
+parser.add_argument("-u", "--username", dest="username",
+ help="Username to SSH to remote host")
+parser.add_argument("-i", "--ssh-key", dest="ssh_key",
+ help="Path to private SSH key to be used for connection")
+parser.add_argument("-l", "--landmark", dest='landmark', default=default_landmark,
+ help="Set pings destination, default={}".format(default_landmark))
+# this is required
+parser.add_argument("hostname", type=str)
-parser = OptionParser(usage = usage)
-parser.add_option("-a", "--hostname", dest="hostname",
- help="Remote host", type="str")
-parser.add_option("-u", "--username", dest="username",
- help="Username to SSH to remote host", type="str")
-parser.add_option("-i", "--ssh-key", dest="ssh_key",
- help="Path to private SSH key to be used for connection",
- type="str")
-(options, args) = parser.parse_args()
+args = parser.parse_args()
-hostname = options.hostname
-username = options.username
-ssh_key = options.ssh_key
+hostname = args.hostname
+username = args.username
+ssh_key = args.ssh_key
+landmark = args.landmark
ec = ExperimentController(exp_id = "ping-exp")
ec.set(node, "cleanProcesses", True)
app = ec.register_resource("linux::Application")
-ec.set(app, "command", "ping -c3 nepi.inria.fr")
+ec.set(app, "command", "ping -c3 {}".format(landmark))
ec.register_connection(app, node)
ec.deploy()
ec.wait_finished(app)
-print ec.trace(app, "stdout")
+print(ec.trace(app, "stdout"))
ec.shutdown()
# BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
command = "ccndstart && "
- peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ peers = ["ccndc add ccnx:/ udp %s" % peer for peer in peers]
command += " ; ".join(peers) + " && "
command += " ccnr & "
ring_hosts = [host1, host2, host3, host4]
ccnds = dict()
- for i in xrange(len(ring_hosts)):
+ for i in range(len(ring_hosts)):
host = ring_hosts[i]
node = add_node(ec, host, pl_user, pl_ssh_key)
ccnd = add_ccnd(ec, node)
# Register a collector to automatically collect traces
collector = add_collector(ec, "stderr")
- for ccnd in ccnds.values():
+ for ccnd in ccnds:
ec.register_connection(collector, ccnd)
# deploy all ResourceManagers
for host in hostnames:
node = add_node(ec, host, pl_slice)
- for i in xrange(20):
+ for i in range(20):
app = add_app(ec)
ec.register_connection(app, node)
apps.append(app)
# $ cd <path-to-nepi>
# python examples/linux/vlc_streaming.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceState, ResourceAction
ec.wait_finished([server])
video = ec.trace(client, "VIDEO")
-f = open("video.ts", "w")
-f.write(video)
-f.close()
+with open("video.ts", "w") as f:
+ f.write(video)
ec.shutdown()
-print "Streamed VIDEO stored localy at video.ts"
+print("Streamed VIDEO stored localy at video.ts")
# This example must be executed as root:
# $ sudo PYTHONPATH=$PYTHONPATH:src python examples/netns/local_switch_ping.py
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
ec = ExperimentController(exp_id = "netns-local-p2p-ping")
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
"""
network topology:
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
ec = ExperimentController(exp_id = "ns3-local-csma-ping")
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
"""
network topology:
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
ec = ExperimentController(exp_id = "ns3-local-p2p-ping")
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
ec = ExperimentController(exp_id = "ns3-local-wifi-ping")
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
-#!/usr/bin/env python\r
-#\r
-# NEPI, a framework to manage network experiments\r
-# Copyright (C) 2015 INRIA\r
-#\r
-# This program is free software: you can redistribute it and/or modify\r
-# it under the terms of the GNU General Public License version 2 as\r
-# published by the Free Software Foundation;\r
-#\r
-# This program is distributed in the hope that it will be useful,\r
-# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-# GNU General Public License for more details.\r
-#\r
-# You should have received a copy of the GNU General Public License\r
-# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
-#\r
-# Author: Damien Saucez <damien.saucez@inria.fr>\r
-# Alina Quereilhac <alina.quereilhac@inria.fr>\r
-#\r
-\r
-#\r
-# Note: To run this experiment you need to have a PlanetLab account.\r
-#\r
-# This experiment consists of a simulated wireless sensor network (ns-3)\r
-# with one fixed access point (AP), running an agent application, and several\r
-# mobile stations that run a transmitter application to send messages to\r
-# the AP.\r
-#\r
-# The experiment uses 2 networks like the one described above, running in 2\r
-# independent ns-3 instances in remote hosts, and transparently connected\r
-# through a UDP tunnel.\r
-\r
-#\r
-# command line:\r
-#\r
-# PYTHONPATH=$PYTHONPATH:src python examples/ns3/multi_host/ditributed.py\r
-#\r
-\r
-import os\r
-\r
-from nepi.execution.ec import ExperimentController\r
-from nepi.execution.resource import ResourceState, ResourceManager\r
-\r
-from topology import *\r
-\r
-# tunning\r
-os.environ["NEPI_NTHREADS"] = "1"\r
-ResourceManager._reschedule_delay = "0s"\r
-\r
-# list of hosts for running the experiment on\r
-hostname1 = "onelab4.warsaw.rd.tp.pl"\r
-hostname2 = "planet2.servers.ua.pt"\r
-\r
-(username, pl_user, pl_password, ssh_key, node_count) = get_options()\r
-\r
-ec = ExperimentController(exp_id="distributed")\r
-\r
-host1, simu1 = add_host_simu(ec, hostname1, username, pl_user, pl_password, \r
- ssh_key)\r
-\r
-ap1, agent1 = build_ns3_topology(ec, simu1, node_count, network="10.1.0.0", \r
- prefixlen="24", agent_ip="10.1.0.1")\r
-\r
-host2, simu2 = add_host_simu(ec, hostname2, username, pl_user, pl_password, ssh_key)\r
-ap2, agent2 = build_ns3_topology(ec, simu2, node_count, network="10.2.0.0", prefixlen="24", agent_ip="10.1.0.1")\r
-\r
-fddev1 = add_fdnet_device(ec, ap1, "10.0.0.1", "30")\r
-fddev2 = add_fdnet_device(ec, ap2, "10.0.0.2", "30")\r
-\r
-connect_with_udp_tunnel(ec, fddev1, fddev2)\r
-\r
-ec.deploy()\r
-\r
-ec.wait_finished([simu1, simu2])\r
-\r
-stdout = ec.trace(agent1, "stdout")\r
-print " Agent says:"\r
-print stdout\r
-\r
-ec.shutdown()\r
-\r
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2015 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Damien Saucez <damien.saucez@inria.fr>
+# Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+#
+# Note: To run this experiment you need to have a PlanetLab account.
+#
+# This experiment consists of a simulated wireless sensor network (ns-3)
+# with one fixed access point (AP), running an agent application, and several
+# mobile stations that run a transmitter application to send messages to
+# the AP.
+#
+# The experiment uses 2 networks like the one described above, running in 2
+# independent ns-3 instances in remote hosts, and transparently connected
+# through a UDP tunnel.
+
+#
+# command line:
+#
+# PYTHONPATH=$PYTHONPATH:src python examples/ns3/multi_host/ditributed.py
+#
+
+from __future__ import print_function
+
+import os
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceState, ResourceManager
+
+from topology import *
+
+# tunning
+os.environ["NEPI_NTHREADS"] = "1"
+ResourceManager._reschedule_delay = "0s"
+
+# list of hosts for running the experiment on
+hostname1 = "onelab4.warsaw.rd.tp.pl"
+hostname2 = "planet2.servers.ua.pt"
+
+(username, pl_user, pl_password, ssh_key, node_count) = get_options()
+
+ec = ExperimentController(exp_id="distributed")
+
+host1, simu1 = add_host_simu(ec, hostname1, username, pl_user, pl_password,
+ ssh_key)
+
+ap1, agent1 = build_ns3_topology(ec, simu1, node_count, network="10.1.0.0",
+ prefixlen="24", agent_ip="10.1.0.1")
+
+host2, simu2 = add_host_simu(ec, hostname2, username, pl_user, pl_password, ssh_key)
+ap2, agent2 = build_ns3_topology(ec, simu2, node_count, network="10.2.0.0", prefixlen="24", agent_ip="10.1.0.1")
+
+fddev1 = add_fdnet_device(ec, ap1, "10.0.0.1", "30")
+fddev2 = add_fdnet_device(ec, ap2, "10.0.0.2", "30")
+
+connect_with_udp_tunnel(ec, fddev1, fddev2)
+
+ec.deploy()
+
+ec.wait_finished([simu1, simu2])
+
+stdout = ec.trace(agent1, "stdout")
+print(" Agent says:")
+print(stdout)
+
+ec.shutdown()
+
-#!/usr/bin/env python\r
-#\r
-# NEPI, a framework to manage network experiments\r
-# Copyright (C) 2015 INRIA\r
-#\r
-# This program is free software: you can redistribute it and/or modify\r
-# it under the terms of the GNU General Public License version 2 as\r
-# published by the Free Software Foundation;\r
-#\r
-# This program is distributed in the hope that it will be useful,\r
-# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-# GNU General Public License for more details.\r
-#\r
-# You should have received a copy of the GNU General Public License\r
-# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
-#\r
-# Author: Damien Saucez <damien.saucez@inria.fr>\r
-# Alina Quereilhac <alina.quereilhac@inria.fr>\r
-#\r
-\r
-#\r
-# Note that to run this experiment you need to have a PlanetLab account.\r
-#\r
-# This experiment consists of a simulated wireless sensor network (ns-3)\r
-# with one fixed access point (AP), running an agent application, and several\r
-# mobile stations that run a transmitter application to send messages to\r
-# the AP.\r
-#\r
-# One of the transmitter applications runs outside the simulation, on\r
-# the host, and sends messages to the AP through the FdNetDevice/TAP\r
-# link.\r
-#\r
-\r
-#\r
-# command line:\r
-#\r
-# PYTHONPATH=$PYTHONPATH:src python examples/ns3/multi_host/hybrid.py\r
-#\r
-\r
-import os\r
-\r
-from nepi.execution.ec import ExperimentController\r
-from nepi.execution.resource import ResourceState, ResourceManager\r
-\r
-from topology import *\r
-\r
-# tunning\r
-os.environ["NEPI_NTHREADS"] = "1"\r
-ResourceManager._reschedule_delay = "0s"\r
-\r
-# list of hosts for running the experiment on\r
-hostname1 = "onelab4.warsaw.rd.tp.pl"\r
-hostname2 = "planet2.servers.ua.pt"\r
-\r
-(username, pl_user, pl_password, ssh_key, node_count) = get_options()\r
-\r
-ec = ExperimentController(exp_id="hybrid")\r
-\r
-host, simu = add_host_simu(ec, hostname1, username, pl_user, pl_password, \r
- ssh_key)\r
-\r
-ap, agent = build_ns3_topology(ec, simu, node_count, network="192.168.3.0", \r
- prefixlen="25", agent_ip="192.168.3.1")\r
-\r
-fddev = add_fdnet_device(ec, ap, "192.168.3.129", "25")\r
-\r
-tap = ec.register_resource("planetlab::Tap")\r
-ec.set(tap, "ip", "192.168.3.130")\r
-ec.set(tap, "prefix", "25")\r
-ec.set(tap, "pointopoint", "192.168.3.129")\r
-ec.register_connection(host, tap) \r
-\r
-connect_with_virtual_link(ec, tap, fddev)\r
-\r
-add_ns3_route(ec, ap, network="192.168.3.128", prefixlen="25", nexthop="192.168.3.1")\r
-add_planetlab_route(ec, tap, network="192.168.3.0", prefixlen="25", nexthop="192.168.3.129")\r
-\r
-transmitter = ec.register_resource("linux::Application")\r
-ec.set(transmitter, "sources", "code/transmitter.c")\r
-ec.set(transmitter, "build", "gcc ${SRC}/transmitter.c -o ${BIN}/transmitter")\r
-ec.set(transmitter, "command", "${BIN}/transmitter 192.168.3.1")\r
-ec.register_connection(transmitter, host)\r
-\r
-ec.deploy()\r
-\r
-ec.wait_finished([simu, transmitter])\r
-\r
-stdout = ec.trace(agent, "stdout")\r
-print " Agent says: "\r
-print stdout\r
-\r
-stdout = ec.trace(transmitter, "stdout")\r
-print " Live transmitter output: "\r
-print stdout\r
-\r
-ec.shutdown()\r
-\r
-\r
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2015 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Damien Saucez <damien.saucez@inria.fr>
+# Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+#
+# Note that to run this experiment you need to have a PlanetLab account.
+#
+# This experiment consists of a simulated wireless sensor network (ns-3)
+# with one fixed access point (AP), running an agent application, and several
+# mobile stations that run a transmitter application to send messages to
+# the AP.
+#
+# One of the transmitter applications runs outside the simulation, on
+# the host, and sends messages to the AP through the FdNetDevice/TAP
+# link.
+#
+
+#
+# command line:
+#
+# PYTHONPATH=$PYTHONPATH:src python examples/ns3/multi_host/hybrid.py
+#
+
+from __future__ import print_function
+
+import os
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceState, ResourceManager
+
+from topology import *
+
+# tunning
+os.environ["NEPI_NTHREADS"] = "1"
+ResourceManager._reschedule_delay = "0s"
+
+# list of hosts for running the experiment on
+hostname1 = "onelab4.warsaw.rd.tp.pl"
+hostname2 = "planet2.servers.ua.pt"
+
+(username, pl_user, pl_password, ssh_key, node_count) = get_options()
+
+ec = ExperimentController(exp_id="hybrid")
+
+host, simu = add_host_simu(ec, hostname1, username, pl_user, pl_password,
+ ssh_key)
+
+ap, agent = build_ns3_topology(ec, simu, node_count, network="192.168.3.0",
+ prefixlen="25", agent_ip="192.168.3.1")
+
+fddev = add_fdnet_device(ec, ap, "192.168.3.129", "25")
+
+tap = ec.register_resource("planetlab::Tap")
+ec.set(tap, "ip", "192.168.3.130")
+ec.set(tap, "prefix", "25")
+ec.set(tap, "pointopoint", "192.168.3.129")
+ec.register_connection(host, tap)
+
+connect_with_virtual_link(ec, tap, fddev)
+
+add_ns3_route(ec, ap, network="192.168.3.128", prefixlen="25", nexthop="192.168.3.1")
+add_planetlab_route(ec, tap, network="192.168.3.0", prefixlen="25", nexthop="192.168.3.129")
+
+transmitter = ec.register_resource("linux::Application")
+ec.set(transmitter, "sources", "code/transmitter.c")
+ec.set(transmitter, "build", "gcc ${SRC}/transmitter.c -o ${BIN}/transmitter")
+ec.set(transmitter, "command", "${BIN}/transmitter 192.168.3.1")
+ec.register_connection(transmitter, host)
+
+ec.deploy()
+
+ec.wait_finished([simu, transmitter])
+
+stdout = ec.trace(agent, "stdout")
+print(" Agent says: ")
+print(stdout)
+
+stdout = ec.trace(transmitter, "stdout")
+print(" Live transmitter output: ")
+print(stdout)
+
+ec.shutdown()
+
+
-#!/usr/bin/env python\r
-#\r
-# NEPI, a framework to manage network experiments\r
-# Copyright (C) 2015 INRIA\r
-#\r
-# This program is free software: you can redistribute it and/or modify\r
-# it under the terms of the GNU General Public License version 2 as\r
-# published by the Free Software Foundation;\r
-#\r
-# This program is distributed in the hope that it will be useful,\r
-# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-# GNU General Public License for more details.\r
-#\r
-# You should have received a copy of the GNU General Public License\r
-# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
-#\r
-# Author: Damien Saucez <damien.saucez@inria.fr>\r
-# Alina Quereilhac <alina.quereilhac@inria.fr>\r
-#\r
-\r
-#\r
-# Note: To run this experiment you need to have a PlanetLab account.\r
-#\r
-# This experiment consists of a simulated wireless sensor network (ns-3)\r
-# with one fixed access point (AP), running an agent application, and several\r
-# mobile stations that run a transmitter application to send messages to\r
-# the AP.\r
-#\r
-# The same experiment described above is run in parallel with different\r
-# number of mobile stations in 2 PlanetLab hosts.\r
-#\r
-\r
-#\r
-# command line:\r
-#\r
-# PYTHONPATH=$PYTHONPATH:src python examples/ns3/multi_host/parallel.py\r
-#\r
-\r
-import os\r
-\r
-from topology import *\r
-\r
-from nepi.execution.ec import ExperimentController\r
-from nepi.execution.resource import ResourceState, ResourceManager\r
-\r
-# tunning\r
-os.environ["NEPI_NTHREADS"] = "1"\r
-ResourceManager._reschedule_delay = "0s"\r
-\r
-# list of hosts for running the experiment on\r
-hostname1 = "onelab4.warsaw.rd.tp.pl"\r
-hostname2 = "planet2.servers.ua.pt"\r
-\r
-(username, pl_user, pl_password, ssh_key, node_count) = get_options()\r
-\r
-ec = ExperimentController(exp_id="parallel")\r
-counts = [node_count, 10]\r
-hosts = [hostname1, hostname2]\r
-\r
-simulations = []\r
-agents = []\r
-\r
-for hostname in hosts:\r
- host, simu = add_host_simu(ec, hostname, username, pl_user, pl_password, \r
- ssh_key)\r
- simulations.append(simu)\r
-\r
- node_count = counts.pop()\r
- ap, agent = build_ns3_topology(ec, simu, node_count, network="10.1.0.0", \r
- prefixlen="24", agent_ip="10.1.0.1")\r
- agents.append(agent)\r
-\r
-ec.deploy()\r
-\r
-ec.wait_finished(simulations)\r
-\r
-for agent in agents:\r
- stdout = ec.trace(agent, "stdout")\r
- print " Agent says:"\r
- print stdout\r
-\r
-ec.shutdown()\r
-\r
-\r
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2015 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Damien Saucez <damien.saucez@inria.fr>
+# Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+#
+# Note: To run this experiment you need to have a PlanetLab account.
+#
+# This experiment consists of a simulated wireless sensor network (ns-3)
+# with one fixed access point (AP), running an agent application, and several
+# mobile stations that run a transmitter application to send messages to
+# the AP.
+#
+# The same experiment described above is run in parallel with different
+# number of mobile stations in 2 PlanetLab hosts.
+#
+
+#
+# command line:
+#
+# PYTHONPATH=$PYTHONPATH:src python examples/ns3/multi_host/parallel.py
+#
+
+from __future__ import print_function
+
+import os
+
+from topology import *
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceState, ResourceManager
+
+# tunning
+os.environ["NEPI_NTHREADS"] = "1"
+ResourceManager._reschedule_delay = "0s"
+
+# list of hosts for running the experiment on
+hostname1 = "onelab4.warsaw.rd.tp.pl"
+hostname2 = "planet2.servers.ua.pt"
+
+(username, pl_user, pl_password, ssh_key, node_count) = get_options()
+
+ec = ExperimentController(exp_id="parallel")
+counts = [node_count, 10]
+hosts = [hostname1, hostname2]
+
+simulations = []
+agents = []
+
+for hostname in hosts:
+ host, simu = add_host_simu(ec, hostname, username, pl_user, pl_password,
+ ssh_key)
+ simulations.append(simu)
+
+ node_count = counts.pop()
+ ap, agent = build_ns3_topology(ec, simu, node_count, network="10.1.0.0",
+ prefixlen="24", agent_ip="10.1.0.1")
+ agents.append(agent)
+
+ec.deploy()
+
+ec.wait_finished(simulations)
+
+for agent in agents:
+ stdout = ec.trace(agent, "stdout")
+ print(" Agent says:")
+ print(stdout)
+
+ec.shutdown()
+
+
# Alina Quereilhac <alina.quereilhac@inria.fr>\r
#\r
\r
-import ipaddr\r
+import ipaddress\r
from optparse import OptionParser\r
import os\r
from random import randint\r
def build_ns3_topology(ec, simu, node_count, network, prefixlen, agent_ip):\r
channel = add_ns3_wifi_channel(ec)\r
\r
- net = ipaddr.IPv4Network("%s/%s" % (network, prefixlen)) \r
- itr = net.iterhosts()\r
+ net = ipaddress.IPv4Network("%s/%s" % (network, prefixlen)) \r
+ itr = net.hosts()\r
\r
- ap_ip = itr.next().exploded\r
+ ap_ip = itr.__next__().exploded\r
ap = add_ns3_node(ec, simu, ap_ip, prefixlen, channel, ap_mode=True)\r
\r
agent = None\r
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from optparse import OptionParser
ec.shutdown()
-print "PING OUTPUT", stdout
+print("PING OUTPUT", stdout)
# - t3 (t2 + 2s) : Kill the application
#
+from __future__ import print_function
+
from nepi.execution.resource import ResourceAction, ResourceState
from nepi.execution.ec import ExperimentController
# Retrieve the output of the ping command
ping_output = ec.trace(app1, "stdout")
-print "\n PING OUTPUT\n", ping_output, "\n"
+print("\n PING OUTPUT\n", ping_output, "\n")
# Stop Experiment
ec.shutdown()
# - t3 (t2 + 2s) : Kill the application
#
+from __future__ import print_function
+
from nepi.execution.resource import ResourceAction, ResourceState
from nepi.execution.ec import ExperimentController
# Retrieve the bytes transmitted output and print it
byte_count = ec.trace(app3, "stdout")
-print "BYTES transmitted", byte_count
+print("BYTES transmitted", byte_count)
# Stop Experiment
ec.shutdown()
# - t3 (t2 + 2s) : Kill the application
#
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
# Retrieve the output of the ping command
ping_output = ec.trace(app1, "stdout")
-print "\n PING OUTPUT\n", ping_output, "\n"
+print("\n PING OUTPUT\n", ping_output, "\n")
# Stop Experiment
ec.shutdown()
# - t3 (t2 + 2s) : Kill the application
#
+from __future__ import print_function
+
from nepi.execution.resource import ResourceAction, ResourceState
from nepi.execution.ec import ExperimentController
# Retrieve the bytes transmitted count and print it
byte_count = ec.trace(app2, "stdout")
-print "BYTES transmitted", byte_count
+print("BYTES transmitted", byte_count)
## If you redirected the video to standard output, you can try to
## retrieve the stdout of the VLC client
## video = ec.trace(app2, "stdout")
-#f = open("video.ts", "w")
-#f.write(video)
-#f.close()
+#with open("video.ts", "w") as f:
+# f.write(video)
# Stop Experiment
ec.shutdown()
-#!/usr/bin/env python\r
-#\r
-# NEPI, a framework to manage network experiments\r
-# Copyright (C) 2013 INRIA\r
-#\r
-# This program is free software: you can redistribute it and/or modify\r
-# it under the terms of the GNU General Public License version 2 as\r
-# published by the Free Software Foundation;\r
-#\r
-# This program is distributed in the hope that it will be useful,\r
-# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-# GNU General Public License for more details.\r
-#\r
-# You should have received a copy of the GNU General Public License\r
-# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
-#\r
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>\r
-# Maksym Gabielkov <maksym.gabielkovc@inria.fr>\r
-#\r
-\r
-## This is a maintenance script used to bootstrap the nodes from\r
-## Nitos testbed (NITLab) before running a OMF experiment using\r
-## Nitos nodes. This fixes the problem of Resource Controller \r
-## misbehaving by restarting it and it also loads the ath5k driver.\r
-\r
-# Example of how to run this experiment (replace with your information):\r
-#\r
-# $ cd <path-to-nepi>\r
-# python examples/linux/nitos_testbed_bootstrap.py -H <omf.nitos.node0XX,omf.nitos.node0ZZ,..> -U <node-username> -i <ssh-key> -g <nitos-gateway> -u <nitos-slice>\r
-#\r
-\r
-from nepi.execution.ec import ExperimentController\r
-from nepi.execution.resource import ResourceAction, ResourceState\r
-\r
-from optparse import OptionParser\r
-import os\r
-\r
-usage = ("usage: %prog -H <list-of-nitos-hosts> -U <node-username> -i <ssh-key> -g <nitos-gateway> -u <slicename>")\r
-\r
-parser = OptionParser(usage = usage)\r
-parser.add_option("-H", "--hosts", dest="hosts", \r
- help="Space separated list of hosts", type="str")\r
-parser.add_option("-U", "--username", dest="username", \r
- help="Username for the nitos hosts (usually root)", \r
- type="str", default="root" )\r
-parser.add_option("-g", "--gateway", dest="gateway", \r
- help="Nitos gateway hostname", \r
- type="str", default="nitlab.inf.uth.gr")\r
-parser.add_option("-u", "--gateway-user", dest="gateway_username", \r
- help="Nitos gateway username (slicename)", \r
- type="str", default="nitlab.inf.uth.gr")\r
-parser.add_option("-i", "--ssh-key", dest="ssh_key", \r
- help="Path to private SSH key to be used for connection", \r
- type="str")\r
-(options, args) = parser.parse_args()\r
-\r
-hosts = options.hosts\r
-username = options.username\r
-gateway = options.gateway\r
-gateway_username = options.gateway_username\r
-identity = options.ssh_key\r
-\r
-apps = []\r
-\r
-ec = ExperimentController(exp_id="nitos_bootstrap")\r
-\r
-gw_node = ec.register_resource("linux::Node")\r
-ec.set(gw_node, "username", gateway_username)\r
-ec.set(gw_node, "hostname", gateway)\r
-ec.set(gw_node, "identity", identity)\r
-ec.set(gw_node, "cleanExperiment", True)\r
-\r
-load_cmd = "omf load -i nepi_OMF6_VLC_baseline_grid.ndz -t %s" % hosts \r
-load_app = ec.register_resource("linux::Application")\r
-ec.set(load_app, "command", load_cmd)\r
-ec.register_connection(load_app, gw_node)\r
-\r
-reboot_cmd = "omf tell -a on -t %s" % hosts \r
-reboot_app = ec.register_resource("linux::Application")\r
-ec.set(reboot_app, "command", reboot_cmd)\r
-ec.register_connection(reboot_app, gw_node)\r
-\r
-ec.register_condition(reboot_app, ResourceAction.START, load_app, \r
- ResourceState.STOPPED, time="60s") \r
-\r
-hosts = hosts.split(",")\r
-\r
-for hostname in hosts:\r
- host = hostname.split(".")[-1]\r
- node = ec.register_resource("linux::Node")\r
- ec.set(node, "username", username)\r
- ec.set(node, "hostname", host)\r
- ec.set(node, "identity", identity)\r
- ec.set(node, "gateway", gateway)\r
- ec.set(node, "gatewayUser", gateway_username)\r
- ec.set(node, "cleanExperiment", True)\r
- ec.register_condition(node, ResourceAction.DEPLOY, reboot_app, \r
- ResourceState.STOPPED, time="300s") \r
- \r
- modprobe_app = ec.register_resource("linux::Application")\r
- ec.set(modprobe_app, "command", "modprobe ath5k && ip a | grep wlan0")\r
- ec.register_connection(modprobe_app, node)\r
- apps.append(modprobe_app)\r
-\r
- rc_app = ec.register_resource("linux::Application")\r
- ec.set(rc_app, "command", "service omf_rc stop; service omf_rc start")\r
- ec.register_connection(rc_app, node)\r
- apps.append(rc_app)\r
-\r
-print "This might take time..."\r
-\r
-ec.deploy(wait_all_ready=False)\r
-\r
-ec.wait_finished(apps)\r
-\r
-print ec.trace(load_app, "stdout")\r
-print ec.trace(reboot_app, "stdout")\r
-\r
-for app in apps:\r
- print ec.trace(app, "stdout")\r
-\r
-ec.shutdown()\r
-\r
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation;
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Maksym Gabielkov <maksym.gabielkovc@inria.fr>
+#
+
+## This is a maintenance script used to bootstrap the nodes from
+## Nitos testbed (NITLab) before running a OMF experiment using
+## Nitos nodes. This fixes the problem of Resource Controller
+## misbehaving by restarting it and it also loads the ath5k driver.
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/linux/nitos_testbed_bootstrap.py -H <omf.nitos.node0XX,omf.nitos.node0ZZ,..> -U <node-username> -i <ssh-key> -g <nitos-gateway> -u <nitos-slice>
+#
+
+from __future__ import print_function
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -H <list-of-nitos-hosts> -U <node-username> -i <ssh-key> -g <nitos-gateway> -u <slicename>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-H", "--hosts", dest="hosts",
+ help="Space separated list of hosts", type="str")
+parser.add_option("-U", "--username", dest="username",
+ help="Username for the nitos hosts (usually root)",
+ type="str", default="root" )
+parser.add_option("-g", "--gateway", dest="gateway",
+ help="Nitos gateway hostname",
+ type="str", default="nitlab.inf.uth.gr")
+parser.add_option("-u", "--gateway-user", dest="gateway_username",
+ help="Nitos gateway username (slicename)",
+ type="str", default="nitlab.inf.uth.gr")
+parser.add_option("-i", "--ssh-key", dest="ssh_key",
+ help="Path to private SSH key to be used for connection",
+ type="str")
+(options, args) = parser.parse_args()
+
+hosts = options.hosts
+username = options.username
+gateway = options.gateway
+gateway_username = options.gateway_username
+identity = options.ssh_key
+
+apps = []
+
+ec = ExperimentController(exp_id="nitos_bootstrap")
+
+gw_node = ec.register_resource("linux::Node")
+ec.set(gw_node, "username", gateway_username)
+ec.set(gw_node, "hostname", gateway)
+ec.set(gw_node, "identity", identity)
+ec.set(gw_node, "cleanExperiment", True)
+
+load_cmd = "omf load -i nepi_OMF6_VLC_baseline_grid.ndz -t %s" % hosts
+load_app = ec.register_resource("linux::Application")
+ec.set(load_app, "command", load_cmd)
+ec.register_connection(load_app, gw_node)
+
+reboot_cmd = "omf tell -a on -t %s" % hosts
+reboot_app = ec.register_resource("linux::Application")
+ec.set(reboot_app, "command", reboot_cmd)
+ec.register_connection(reboot_app, gw_node)
+
+ec.register_condition(reboot_app, ResourceAction.START, load_app,
+ ResourceState.STOPPED, time="60s")
+
+hosts = hosts.split(",")
+
+for hostname in hosts:
+ host = hostname.split(".")[-1]
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "username", username)
+ ec.set(node, "hostname", host)
+ ec.set(node, "identity", identity)
+ ec.set(node, "gateway", gateway)
+ ec.set(node, "gatewayUser", gateway_username)
+ ec.set(node, "cleanExperiment", True)
+ ec.register_condition(node, ResourceAction.DEPLOY, reboot_app,
+ ResourceState.STOPPED, time="300s")
+
+ modprobe_app = ec.register_resource("linux::Application")
+ ec.set(modprobe_app, "command", "modprobe ath5k && ip a | grep wlan0")
+ ec.register_connection(modprobe_app, node)
+ apps.append(modprobe_app)
+
+ rc_app = ec.register_resource("linux::Application")
+ ec.set(rc_app, "command", "service omf_rc stop; service omf_rc start")
+ ec.register_connection(rc_app, node)
+ apps.append(rc_app)
+
+print("This might take time...")
+
+ec.deploy(wait_all_ready=False)
+
+ec.wait_finished(apps)
+
+print(ec.trace(load_app, "stdout"))
+print(ec.trace(reboot_app, "stdout"))
+
+for app in apps:
+ print(ec.trace(app, "stdout"))
+
+ec.shutdown()
+
CCNR_DIRECTORY="/root" CCNR_STATUS_PORT="8080"'
# BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
- peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ peers = ["ccndc add ccnx:/ udp %s" % peer for peer in peers]
#command += " ; ".join(peers) + " && "
command = peers[0]
nodes = dict()
chann = add_channel(ec, channel, xmpp_slice, xmpp_host)
- for i in xrange(len(all_hosts)):
- node = add_node(ec,all_hosts[i], xmpp_slice, xmpp_host)
+ for i, host in enumerate(all_hosts):
+ node = add_node(ec ,host, xmpp_slice, xmpp_host)
iface = add_interface(ec, all_ip[i], xmpp_slice, xmpp_host)
ec.register_connection(node, iface)
ec.register_connection(iface, chann)
- nodes[all_hosts[i]] = node
+ nodes[host] = node
# CCN setup for the node
ccnds = dict()
ccnrs = dict()
- for i in xrange(len(all_hosts)):
- ccndstart = add_app(ec, nodes[all_hosts[i]], "#ccndstart", "ccndstart &",
+ for i, host in enumerate(all_hosts):
+ ccndstart = add_app(ec, nodes[host], "#ccndstart", "ccndstart &",
env, xmpp_slice, xmpp_host)
- ccnr = add_app(ec, nodes[all_hosts[i]], "#ccnr", "ccnr &",
+ ccnr = add_app(ec, nodes[host], "#ccnr", "ccnr &",
env, xmpp_slice, xmpp_host)
- ccnds[all_hosts[i]] = ccndstart
- ccnrs[all_hosts[i]] = ccnr
+ ccnds[host] = ccndstart
+ ccnrs[host] = ccnr
ec.register_condition(ccnr, ResourceAction.START, ccndstart, ResourceState.STARTED, "1s")
# CCNDC setup
link = [l1u, l1d, l2u, l2d, l3u, l3d, l4u, l4d, l5u, l5d, b1u, b1d, b2u, b2d]
# List of condition
- for i in xrange(len(all_hosts)):
- ec.register_condition(ccnrs[all_hosts[i]], ResourceAction.START, ccnds[all_hosts[i]], ResourceState.STARTED, "1s")
- ec.register_condition(link, ResourceAction.START, ccnrs[all_hosts[i]], ResourceState.STARTED, "1s")
+ for host in all_hosts:
+ ec.register_condition(ccnrs[host], ResourceAction.START, ccnds[host], ResourceState.STARTED, "1s")
+ ec.register_condition(link, ResourceAction.START, ccnrs[host], ResourceState.STARTED, "1s")
# Streaming Server
pub = add_publish(ec, nodes[host5], movie, xmpp_slice, xmpp_host)
# Cleaning when the experiment stop
ccndstops = []
- for i in xrange(len(all_hosts)):
- ccndstop = add_app(ec, nodes[all_hosts[i]], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
+ for host all_hosts:
+ ccndstop = add_app(ec, nodes[host], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
ccndstops.append(ccndstop)
killall = add_app(ec, nodes[host6], "#kill", "killall sh", "", xmpp_slice, xmpp_host)
# Condition to stop and clean the experiment
apps = []
- for i in xrange(len(all_hosts)):
- apps.append(ccnds[all_hosts[i]])
- apps.append(ccnrs[all_hosts[i]])
+ for host in all_hosts:
+ apps.append(ccnds[host])
+ apps.append(ccnrs[host])
apps += link
apps.append(pub)
apps.append(stream)
nodes = dict()
chann = add_channel(ec, channel, xmpp_slice, xmpp_host)
- for i in xrange(len(all_hosts)):
- node = add_node(ec,all_hosts[i], xmpp_slice, xmpp_host)
+ for i, host in enumerate(all_hosts):
+ node = add_node(ec,host, xmpp_slice, xmpp_host)
iface = add_interface(ec, all_ip[i], xmpp_slice, xmpp_host)
ec.register_connection(node, iface)
ec.register_connection(iface, chann)
- nodes[all_hosts[i]] = node
+ nodes[host] = node
#### CCN setup for the node
### ccnds = dict()
# Do the iperf
iperfserv = dict()
iperfclient = dict()
- for i in xrange(len(all_hosts)):
- perfserv = add_app(ec, nodes[all_hosts[i]], "#perfserv", "iperf -s > /opt/iperfserv.txt",
+ for i, host in enumerate(all_hosts):
+ perfserv = add_app(ec, nodes[host], "#perfserv", "iperf -s > /opt/iperfserv.txt",
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]] = []
+ iperfclient[host] = []
if i > 0:
cmd = "iperf -c " + all_ip[i-1] + " > /opt/iperclient1.txt"
- perfclient1 = add_app(ec, nodes[all_hosts[i]], "#perfclient1", cmd,
+ perfclient1 = add_app(ec, nodes[host], "#perfclient1", cmd,
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]].append(perfclient1)
+ iperfclient[host].append(perfclient1)
if i < (len(all_hosts)-1):
cmd = "iperf -c " + all_ip[i+1] + " > /opt/iperclient2.txt"
- perfclient2 = add_app(ec, nodes[all_hosts[i]], "#perfclient2", cmd,
+ perfclient2 = add_app(ec, nodes[host], "#perfclient2", cmd,
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]].append(perfclient2)
+ iperfclient[host].append(perfclient2)
- iperfserv[all_hosts[i]] = perfserv
+ iperfserv[host] = perfserv
- for i in xrange(len(all_hosts)):
- #ec.register_condition(iperfserv[all_hosts[i]], ResourceAction.START, link, ResourceState.STARTED, "2s")
- for elt in iperfclient[all_hosts[i]]:
- ec.register_condition(elt, ResourceAction.START, iperfserv[all_hosts[i]], ResourceState.STARTED, "3s")
+ for host in all_hosts:
+ #ec.register_condition(iperfserv[host], ResourceAction.START, link, ResourceState.STARTED, "2s")
+ for elt in iperfclient[host]:
+ ec.register_condition(elt, ResourceAction.START, iperfserv[host], ResourceState.STARTED, "3s")
## Streaming Server
## ccndstop = add_app(ec, nodes[all_hosts[i]], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
## ccndstops.append(ccndstop)
perfkill = dict()
- for i in xrange(len(all_hosts)):
- kill = add_app(ec, nodes[all_hosts[i]], "#kill", "killall iperf", "", xmpp_slice, xmpp_host)
- perfkill[all_hosts[i]] = kill
+ for host in all_hosts:
+ kill = add_app(ec, nodes[host], "#kill", "killall iperf", "", xmpp_slice, xmpp_host)
+ perfkill[host] = kill
# Condition to stop and clean the experiment
apps = []
- for i in xrange(len(all_hosts)):
-# apps.append(ccnds[all_hosts[i]])
-# apps.append(ccnrs[all_hosts[i]])
- apps.append(iperfserv[all_hosts[i]])
- for elt in iperfclient[all_hosts[i]]:
+ for host in all_hosts:
+# apps.append(ccnds[host])
+# apps.append(ccnrs[host])
+ apps.append(iperfserv[host])
+ for elt in iperfclient[host]:
apps.append(elt)
# apps += link
#apps.append(pub)
# ec.register_condition(ccndstops + [killall], ResourceAction.STOP, ccndstops, ResourceState.STARTED, "1s")
killall = []
- for i in xrange(len(all_hosts)):
- killall.append(perfkill[all_hosts[i]])
+ for host in all_hosts:
+ killall.append(perfkill[host])
ec.register_condition(killall, ResourceAction.START, apps, ResourceState.STOPPED, "1s")
ec.register_condition(killall, ResourceAction.STOP, killall, ResourceState.STARTED, "1s")
ec.set(app3, 'command', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc rtp://192.168.0.3:1234")
- "echo -e 'new TEST broadcast enabled loop\\n"\
- "setup TEST input %s\\n"\
- "setup TEST output #rtp{mux=ts,sdp=rtsp://0.0.0.0:8554/TEST}\\n\\n"\
- "new test_sched schedule enabled\\n"\
- "setup test_sched append control TEST play' > ${SOURCES}/VOD.vlm" % mv)
+# "echo -e 'new TEST broadcast enabled loop\\n"\
+# "setup TEST input %s\\n"\
+# "setup TEST output #rtp{mux=ts,sdp=rtsp://0.0.0.0:8554/TEST}\\n\\n"\
+# "new test_sched schedule enabled\\n"\
+# "setup test_sched append control TEST play' > ${SOURCES}/VOD.vlm" % mv)
# Choose a directory to store the traces, by default
# It it the folder ehere you run Nepi.
-f = open("app1.txt", "w")
-f.write(stdout_1)
-f.close()
+with open("app1.txt", "w") as f:
+ f.write(stdout_1)
-g = open("app2.txt", "w")
-g.write(stdout_2)
-g.close()
+with open("app2.txt", "w") as g:
+ g.write(stdout_2)
# Stop Experiment
ec.shutdown()
#
#
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
ec.wait_finished([app1, app2])
-print ec.trace(app1, "stdout")
+print(ec.trace(app1, "stdout"))
# Stop Experiment
ec.shutdown()
def get_broad_values(list_files, type_file):
for s in list_files:
nb = nb_client(s)
- o = open(s, 'r')
- for l in o:
- if 'udp' in l:
- row = l.split(':')
- f = row[1].split(' ')
- frame = int(f[0])
- byte = int(row[2])
-
- res = {}
- res['frames'] = frame
- res['bytes'] = byte
- if frame < 20 :
- continue
- overall_stats_broad[nb][type_file].append(res)
- o.close()
+ with open(s, 'r') as o:
+ for l in o:
+ if 'udp' in l:
+ row = l.split(':')
+ f = row[1].split(' ')
+ frame = int(f[0])
+ byte = int(row[2])
+
+ res = {}
+ res['frames'] = frame
+ res['bytes'] = byte
+ if frame < 20 :
+ continue
+ overall_stats_broad[nb][type_file].append(res)
get_broad_values(stats_broad_wlan, 'wlan')
get_broad_values(stats_broad_eth, 'eth')
def get_vod_values(list_files, type_file):
for s in list_files:
nb = nb_client(s)
- o = open(s, 'r')
- for l in o:
- if 'udp' in l:
- row = l.split(':')
- f = row[1].split(' ')
- frame = int(f[0])
- byte = int(row[2])
-
- res = {}
- res['frames'] = frame
- res['bytes'] = byte
- if frame < 100 :
- continue
- overall_stats_vod[nb][type_file].append(res)
- o.close()
+ with open(s, 'r') as o:
+ for l in o:
+ if 'udp' in l:
+ row = l.split(':')
+ f = row[1].split(' ')
+ frame = int(f[0])
+ byte = int(row[2])
+
+ res = {}
+ res['frames'] = frame
+ res['bytes'] = byte
+ if frame < 100 :
+ continue
+ overall_stats_vod[nb][type_file].append(res)
get_vod_values(stats_vod_wlan, 'wlan')
get_vod_values(stats_vod_eth, 'eth')
# $ PYTHONPATH=$PYTHONPATH:src/ python examples/openvswitch/ovs_ping.py -n "192.168.3.0/24" -s <slicename> -i /~/.ssh/id_rsa
#
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
import os
# Retreive ping results and save them in a file
ping = ec.trace(app, "stdout")
-print ping
+print(ping)
# Delete the overlay network
ec.shutdown()
-
-
# $ PYTHONPATH=$PYTHONPATH:src/ python examples/openvswitch/ovs_ping_2_switches.py -n "192.168.3.0/24" -C "1.1.1.1" -s <slicename> -i /~/.ssh/id_rsa
#
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
import os
# Delete the overlay network
ec.shutdown()
-print stdout
-
+print(stdout)
# $ PYTHONPATH=$PYTHONPATH:src/ python examples/openvswitch/ovs_ping_3_switches.py -n "192.168.3.0/24" -C "1.1.1.1" -s <slicename> -i /~/.ssh/id_rsa
#
-
+from __future__ import print_function
from nepi.execution.ec import ExperimentController
})
# Ping from all resources to all other resources
-for r1, (n1, ip1) in r2ip.iteritems():
- for r2, (n2, ip2) in r2ip.iteritems():
+for r1, (n1, ip1) in r2ip.items():
+ for r2, (n2, ip2) in r2ip.items():
if r1 == r2:
continue
ec.deploy()
+# py3: no need to transform into a list
+# as wait_finished (wait in fact) will do it anyway
ec.wait_finished(apps.values())
# collect results
-for key, app in apps.iteritems():
+for key, app in apps.items():
stdout = ec.trace(app, "stdout")
- print "***************************", key, "************************"
- print stdout
- print "\n"
+ print("***************************", key, "************************")
+ print(stdout)
+ print("\n")
# Delete the overlay network
ec.shutdown()
-
-
ping12 = ec.trace(app12, 'stdout')
-f = open("examples/openvswitch/ovs_ping_3switches_loop.txt", 'w')
-
-if not ping12:
- ec.shutdown()
-
-f.write("************ Ping From Switch 1 : 192.168.3.2 ********************\n\n")
-f.write(ping1)
-f.write("--------------------------------------\n")
-f.write(ping2)
-f.write("************ Ping From Switch 2 : 192.168.3.4 ********************\n\n")
-f.write(ping3)
-f.write("--------------------------------------\n")
-f.write(ping4)
-f.write("************ Ping From Switch 3 : 192.168.3.6 ********************\n\n")
-f.write(ping5)
-f.write("--------------------------------------\n")
-f.write(ping6)
-f.write("************ Ping From Host 1 : 192.168.3.1 ********************\n\n")
-f.write(ping7)
-f.write("--------------------------------------\n")
-f.write(ping8)
-f.write("************ Ping From Host 2 : 192.168.3.3 ********************\n\n")
-f.write(ping9)
-f.write("--------------------------------------\n")
-f.write(ping10)
-f.write("************ Ping From Host 3 : 192.168.3.5 ********************\n\n")
-f.write(ping11)
-f.write("--------------------------------------\n")
-f.write(ping12)
-f.close()
+with open("examples/openvswitch/ovs_ping_3switches_loop.txt", 'w') as f:
+
+ if not ping12:
+ ec.shutdown()
+
+ f.write("************ Ping From Switch 1 : 192.168.3.2 ********************\n\n")
+ f.write(ping1)
+ f.write("--------------------------------------\n")
+ f.write(ping2)
+ f.write("************ Ping From Switch 2 : 192.168.3.4 ********************\n\n")
+ f.write(ping3)
+ f.write("--------------------------------------\n")
+ f.write(ping4)
+ f.write("************ Ping From Switch 3 : 192.168.3.6 ********************\n\n")
+ f.write(ping5)
+ f.write("--------------------------------------\n")
+ f.write(ping6)
+ f.write("************ Ping From Host 1 : 192.168.3.1 ********************\n\n")
+ f.write(ping7)
+ f.write("--------------------------------------\n")
+ f.write(ping8)
+ f.write("************ Ping From Host 2 : 192.168.3.3 ********************\n\n")
+ f.write(ping9)
+ f.write("--------------------------------------\n")
+ f.write(ping10)
+ f.write("************ Ping From Host 3 : 192.168.3.5 ********************\n\n")
+ f.write(ping11)
+ f.write("--------------------------------------\n")
+ f.write(ping12)
# Delete the overlay network
ec.shutdown()
ping24 = ec.trace(app24, 'stdout')
ping25 = ec.trace(app25, 'stdout')
-f = open("examples/openvswitch/ping_over_udpTapTunnel_performance_test.txt", 'w')
+with open("examples/openvswitch/ping_over_udpTapTunnel_performance_test.txt", 'w') as f:
-if not ping25:
- ec.shutdown()
+ if not ping25:
+ ec.shutdown()
-f.write("************ Ping From Host 1 : 192.168.3.1 ********************\n\n")
-f.write(ping1)
-f.write("----------------------------------------\n\n")
-f.write(ping2)
-f.write("----------------------------------------\n\n")
-f.write(ping3)
-f.write("----------------------------------------\n\n")
-f.write(ping4)
-f.write("----------------------------------------\n\n")
-f.write(ping5)
-f.write("************ Ping From Host 2 : 192.168.3.13 ********************\n\n")
-f.write(ping11)
-f.write("----------------------------------------\n\n")
-f.write(ping12)
-f.write("----------------------------------------\n\n")
-f.write(ping13)
-f.write("----------------------------------------\n\n")
-f.write(ping14)
-f.write("----------------------------------------\n\n")
-f.write(ping15)
-f.write("************ Ping From Host 3 : 192.168.3.25 ********************\n\n")
-f.write(ping21)
-f.write("----------------------------------------\n\n")
-f.write(ping22)
-f.write("----------------------------------------\n\n")
-f.write(ping23)
-f.write("----------------------------------------\n\n")
-f.write(ping24)
-f.write("----------------------------------------\n\n")
-f.write(ping25)
-
-f.close()
+ f.write("************ Ping From Host 1 : 192.168.3.1 ********************\n\n")
+ f.write(ping1)
+ f.write("----------------------------------------\n\n")
+ f.write(ping2)
+ f.write("----------------------------------------\n\n")
+ f.write(ping3)
+ f.write("----------------------------------------\n\n")
+ f.write(ping4)
+ f.write("----------------------------------------\n\n")
+ f.write(ping5)
+ f.write("************ Ping From Host 2 : 192.168.3.13 ********************\n\n")
+ f.write(ping11)
+ f.write("----------------------------------------\n\n")
+ f.write(ping12)
+ f.write("----------------------------------------\n\n")
+ f.write(ping13)
+ f.write("----------------------------------------\n\n")
+ f.write(ping14)
+ f.write("----------------------------------------\n\n")
+ f.write(ping15)
+ f.write("************ Ping From Host 3 : 192.168.3.25 ********************\n\n")
+ f.write(ping21)
+ f.write("----------------------------------------\n\n")
+ f.write(ping22)
+ f.write("----------------------------------------\n\n")
+ f.write(ping23)
+ f.write("----------------------------------------\n\n")
+ f.write(ping24)
+ f.write("----------------------------------------\n\n")
+ f.write(ping25)
# Delete the overlay network
ec.shutdown()
# 0 ------- network -------- 1
#
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from optparse import OptionParser, SUPPRESS_HELP
ec.wait_finished([ccncat])
stdout = ec.trace(ccncat, "stdout")
-f = open("video.ts", "w")
-f.write(stdout)
-f.close()
+with open("video.ts", "w") as f:
+ f.write(stdout)
ec.shutdown()
-print "Transfered FILE stored localy at video.ts"
+print("Transfered FILE stored localy at video.ts")
# $ cd <path-to-nepi>
# python examples/planetlab/ping.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>
+from __future__ import print_function
from nepi.execution.ec import ExperimentController
trace = ec.trace(app, "stdout")
-print "PING outout ", trace
+print("PING outout ", trace)
# Do the experiment controller shutdown
ec.shutdown()
# $ cd <path-to-nepi>
# python examples/planetlab/ping_with_filters.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
# Wait until the applications are finish to retrive the traces:
ec.wait_finished(apps)
-print "Results stored at", ec.exp_dir
+print("Results stored at", ec.exp_dir)
# Do the experiment controller shutdown:
ec.shutdown()
# $ cd <path-to-nepi>
# python examples/planetlab/select_nodes.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key> -c <country> -o <operating-system> -n <node-count>
+from __future__ import print_function
from nepi.execution.ec import ExperimentController
nodes = []
-for i in xrange(node_count):
+for i in range(node_count):
node = add_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, country, os)
nodes.append(node)
ec.wait_deployed(nodes)
-print "SELECTED HOSTS"
+print("SELECTED HOSTS")
for node in nodes:
- print ec.get(node, "hostname")
+ print(ec.get(node, "hostname"))
ec.shutdown()
# $ cd <path-to-nepi>
# python examples/planetlab/update_fedora_repo.py -H <host1,host2,..> -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from optparse import OptionParser, SUPPRESS_HELP
(options, args) = parser.parse_args()
-proceed = raw_input ("Executing this script will modify the fedora yum repositories in the selected PlanetLab hosts. Are you sure to continue? [y/N] ")
+proceed = input ("Executing this script will modify the fedora yum repositories in the selected PlanetLab hosts. Are you sure to continue? [y/N] ")
if proceed.lower() not in ['yes', 'y']:
os._exit(1)
pl_password = options.pl_password
hosts = options.hosts
-hosts = map(str.strip, hosts.split(","))
+hosts = [host.strip() for host in hosts.split(",")]
apps = []
## Create the experiment controller
for app in apps:
try:
- print ec.trace(app, "stderr")
+ print(ec.trace(app, "stderr"))
except:
- print "NO stderr"
+ print("NO stderr")
ec.shutdown()
"nepi.resources.linux.ns3" : [ "dependencies/*.tar.gz" ]
},
install_requires = [
- "ipaddr",
"networkx",
# refrain from mentioning these ones that are not exactly crucial
# and that have additional, non-python, dependencies
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
import logging
import os
import traceback
log.setLevel(LOGLEVEL)
except:
err = traceback.format_exc()
- print "ERROR ", err
+ print("ERROR ", err)
else:
# Set the logging level defined by the user for all
# components
# \ nid3.log
#
+from __future__ import print_function
+
import collections
import functools
import networkx
faces = dict()
sep = " "
- f = open(filename, "r")
-
- data = []
-
- for line in f:
- cols = line.strip().split(sep)
-
- # CCN_PEEK
- # MESSAGE interest_from
- # 1374181938.808523 ccnd[9245]: debug.4352 interest_from 6 ccnx:/test/bunny.ts (23 bytes,sim=0CDCC1D7)
- #
- # MESSAGE interest_to
- # 1374181938.812750 ccnd[9245]: debug.3502 interest_to 5 ccnx:/test/bunny.ts (39 bytes,i=2844,sim=0CDCC1D7)
- #
- # MESSAGE CONTENT FROM
- # 1374181938.868682 ccnd[9245]: debug.4643 content_from 5 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
- #
- # MESSAGE CONTENT_TO
- # 1374181938.868772 ccnd[9245]: debug.1619 content_to 6 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
- #
- # 1375596708.222304 ccnd[9758]: debug.3692 interest_expiry ccnx:/test/bunny.ts/%FD%05%1E%86%B1GS/%00%0A%F7 (44 bytes,c=0:1,i=2819,sim=49FA8048)
-
- # External face creation
- # 1374181452.965961 ccnd[9245]: accepted datagram client id=5 (flags=0x40012) 204.85.191.10 port 9695
-
- if line.find("accepted datagram client") > -1:
- face_id = (cols[5]).replace("id=",'')
- ip = cols[7]
- port = cols[9]
- faces[face_id] = (ip, port)
- continue
-
- # 1374181452.985296 ccnd[9245]: releasing face id 4 (slot 4)
- if line.find("releasing face id") > -1:
- face_id = cols[5]
- if face_id in faces:
- del faces[face_id]
- continue
-
- if len(cols) < 6:
- continue
-
- timestamp = cols[0]
- message_type = cols[3]
+ with open(filename, "r") as f:
+
+ data = []
+
+ for line in f:
+ cols = line.strip().split(sep)
+
+ # CCN_PEEK
+ # MESSAGE interest_from
+ # 1374181938.808523 ccnd[9245]: debug.4352 interest_from 6 ccnx:/test/bunny.ts (23 bytes,sim=0CDCC1D7)
+ #
+ # MESSAGE interest_to
+ # 1374181938.812750 ccnd[9245]: debug.3502 interest_to 5 ccnx:/test/bunny.ts (39 bytes,i=2844,sim=0CDCC1D7)
+ #
+ # MESSAGE CONTENT FROM
+ # 1374181938.868682 ccnd[9245]: debug.4643 content_from 5 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
+ #
+ # MESSAGE CONTENT_TO
+ # 1374181938.868772 ccnd[9245]: debug.1619 content_to 6 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
+ #
+ # 1375596708.222304 ccnd[9758]: debug.3692 interest_expiry ccnx:/test/bunny.ts/%FD%05%1E%86%B1GS/%00%0A%F7 (44 bytes,c=0:1,i=2819,sim=49FA8048)
+
+ # External face creation
+ # 1374181452.965961 ccnd[9245]: accepted datagram client id=5 (flags=0x40012) 204.85.191.10 port 9695
+
+ if line.find("accepted datagram client") > -1:
+ face_id = (cols[5]).replace("id=",'')
+ ip = cols[7]
+ port = cols[9]
+ faces[face_id] = (ip, port)
+ continue
- if message_type not in ["interest_from", "interest_to", "content_from",
- "content_to", "interest_dupnonce", "interest_expiry"]:
- continue
+ # 1374181452.985296 ccnd[9245]: releasing face id 4 (slot 4)
+ if line.find("releasing face id") > -1:
+ face_id = cols[5]
+ if face_id in faces:
+ del faces[face_id]
+ continue
- face_id = cols[4]
- content_name = cols[5]
+ if len(cols) < 6:
+ continue
- # Interest Nonce ? -> 412A74-0844-0008-50AA-F6EAD4
- nonce = ""
- if message_type in ["interest_from", "interest_to", "interest_dupnonce"]:
- last = cols[-1]
- if len(last.split("-")) == 5:
- nonce = last
+ timestamp = cols[0]
+ message_type = cols[3]
- try:
- size = int((cols[6]).replace('(',''))
- except:
- print "interest_expiry without face id!", line
- continue
+ if message_type not in ["interest_from", "interest_to", "content_from",
+ "content_to", "interest_dupnonce", "interest_expiry"]:
+ continue
- # If no external IP address was identified for this face
- # asume it is a local face
- peer = "localhost"
+ face_id = cols[4]
+ content_name = cols[5]
- if face_id in faces:
- peer, port = faces[face_id]
+ # Interest Nonce ? -> 412A74-0844-0008-50AA-F6EAD4
+ nonce = ""
+ if message_type in ["interest_from", "interest_to", "interest_dupnonce"]:
+ last = cols[-1]
+ if len(last.split("-")) == 5:
+ nonce = last
- data.append((content_name, timestamp, message_type, peer, face_id,
- size, nonce, line))
+ try:
+ size = int((cols[6]).replace('(',''))
+ except:
+ print("interest_expiry without face id!", line)
+ continue
- f.close()
+ # If no external IP address was identified for this face
+ # asume it is a local face
+ peer = "localhost"
+
+ if face_id in faces:
+ peer, port = faces[face_id]
+
+ data.append((content_name, timestamp, message_type, peer, face_id,
+ size, nonce, line))
return data
return f.name
def load_content_history(fname):
- f = open(fname, "r")
- content_history = pickle.load(f)
- f.close()
+ with open(fname, "r") as f:
+ content_history = pickle.load(f)
os.remove(fname)
return content_history
if not found_files:
msg = "No CCND output files were found to parse at %s " % logs_dir
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if parse_ping_logs:
ping_parser.annotate_cn_graph(logs_dir, graph)
fname = graph.node[nid]["history"]
history = load_content_history(fname)
- for content_name in history.keys():
+ for content_name in history:
hist = history[content_name]
for (timestamp, message_type, nid1, nid2, nonce, size, line) in hist:
# Compute the time elapsed between the time an interest is sent
# in the consumer node and when the content is received back
- for content_name in content_names.keys():
+ for content_name in content_names:
# order content and interest messages by timestamp
content_names[content_name]["content"] = sorted(
content_names[content_name]["content"])
for nonce, timestamps in content_names[content_name][
- "interest"].iteritems():
+ "interest"].items():
content_names[content_name]["interest"][nonce] = sorted(
timestamps)
graph = annotate_cn_graph(logs_dir, graph,
parse_ping_logs = parse_ping_logs)
except:
- print "Skipping: Error parsing ccnd logs", logs_dir
+ print("Skipping: Error parsing ccnd logs", logs_dir)
raise
source = ccn_consumers(graph)[0]
interest_count,
content_count) = process_content_history(graph)
except:
- print "Skipping: Error processing ccn data", logs_dir
+ print("Skipping: Error processing ccn data", logs_dir)
raise
return (graph,
"""
- f = open(filename, "r")
+ with open(filename, "r") as f:
- # Traceroute info
- target_ip = None
- target_hostname = None
+ # Traceroute info
+ target_ip = None
+ target_hostname = None
- data = []
-
- for line in f:
- # match traceroute to ...
- m = re.match(_rre, line)
- if not m:
- continue
-
- target_ip = m.groupdict()["ip"]
- # FIX THIS: Make sure the regular expression does not inlcude
- # the ')' in the ip group
- target_ip = target_ip.replace(")","")
- target_hostname = m.groupdict()["hostname"]
- time = m.groupdict()["time"]
- data.append((target_ip, target_hostname, time))
-
- f.close()
+ data = []
+
+ for line in f:
+ # match traceroute to ...
+ m = re.match(_rre, line)
+ if not m:
+ continue
+
+ target_ip = m.groupdict()["ip"]
+ # FIX THIS: Make sure the regular expression does not inlcude
+ # the ')' in the ip group
+ target_ip = target_ip.replace(")","")
+ target_hostname = m.groupdict()["hostname"]
+ time = m.groupdict()["time"]
+ data.append((target_ip, target_hostname, time))
return data
if not found_files:
msg = "No PING output files were found to parse at %s " % logs_dir
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# Take as weight the most frequent value
for nid1, nid2 in graph.edges():
"""
rms = []
- for guid, rm in self._resources.iteritems():
+ for guid, rm in self._resources.items():
if rm.get_rtype() == rtype:
rms.append(rm)
return rms
:rtype: list
"""
- keys = self._resources.keys()
+ keys = list(self._resources.keys())
return keys
"""
rms = []
- for guid, rm in self._resources.iteritems():
+ for guid, rm in self._resources.items():
if rm.get_rtype() == rtype:
rms.append(rm.guid)
return rms
"""
# Get next available guid
- guid = self._guid_generator.next(guid)
+ guid = self._guid_generator.__next__(guid)
# Instantiate RM
rm = ResourceFactory.create(rtype, self, guid)
if not guids:
# If no guids list was passed, all 'NEW' RMs will be deployed
guids = []
- for guid, rm in self._resources.iteritems():
+ for guid, rm in self._resources.items():
if rm.state == ResourceState.NEW:
guids.append(guid)
new_group = False
if not group:
new_group = True
- group = self._group_id_generator.next()
+ group = next(self._group_id_generator)
if group not in self._groups:
self._groups[group] = []
"""
if self._state == ECState.RELEASED:
- return
+ return
if isinstance(guids, int):
guids = [guids]
try:
self._cond.acquire()
- task = self._scheduler.next()
+ task = next(self._scheduler)
if not task:
# No task to execute. Wait for a new task to be scheduled.
""" Returns a copy of the attributes
"""
- return copy.deepcopy(cls._attributes.values())
+ return copy.deepcopy(list(cls._attributes.values()))
@classmethod
def get_attribute(cls, name):
""" Returns a copy of the traces
"""
- return copy.deepcopy(cls._traces.values())
+ return copy.deepcopy(list(cls._traces.values()))
@classmethod
def get_help(cls):
if not isinstance(group, list):
group = [group]
- for act, conditions in self.conditions.iteritems():
+ for act, conditions in self.conditions.items():
if action and act != action:
continue
if not rm.get('critical'):
continue
msg = "Resource can not wait for FAILED RM %d. Setting Resource to FAILED"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# If the RM state is lower than the requested state we must
# reschedule (e.g. if RM is READY but we required STARTED).
if (not max_runs or max_runs < 0) and not compute_metric_callback:
msg = "Undefined STOP condition, set stop_callback or max_runs"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if compute_metric_callback and not evaluate_convergence_callback:
evaluate_convergence_callback = self.evaluate_normal_convergence
if len(metrics) == 0:
msg = "0 samples collected"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
x = numpy.array(metrics)
n = len(metrics)
ec.release()
if ec.state == ECState.FAILED:
- raise RuntimeError, "Experiment failed"
+ raise RuntimeError("Experiment failed")
return ec
:type task: task
"""
if task.id == None:
- task.id = self._idgen.next()
+ task.id = next(self._idgen)
entry = (task.timestamp, task.id, task)
self._valid.add(task.id)
except:
pass
- def next(self):
+ def __next__(self):
""" Get the next task in the queue by timestamp and arrival order
"""
while self._queue:
msg = "No traceName was specified"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._store_path = self.ec.run_dir
try:
result = self.ec.trace(rm.guid, trace_name)
- f = open(fpath, "w")
- f.write(result)
- f.close()
+ with open(fpath, "w") as f:
+ f.write(result)
except:
import traceback
err = traceback.format_exc()
if not node:
msg = "Application %s guid %d NOT connected to Node" % (
self._rtype, self.guid)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._node = node[0]
for line in out.strip().split("\n"):
parts = line.strip().split(" ")
procs[parts[0]] = parts[1]
- pickle.dump(procs, open("/tmp/save.proc", "wb"))
+ with open("/tmp/save.proc", "wb") as pickle_file:
+ pickle.dump(procs, pickle_file)
# create run dir for application
self.node.mkdir(self.run_home)
if sources:
self.info("Uploading sources ")
- sources = map(str.strip, sources.split(";"))
+ sources = [str.strip(source) for source in sources.split(";")]
# Separate sources that should be downloaded from
# the web, from sources that should be uploaded from
if self._proc.poll():
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def _run_in_background(self):
command = self.get("command")
if proc.poll():
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# Wait for pid file to be generated
pid, ppid = self.node.wait_pid(self.run_home)
if err:
msg = " Failed to start command '%s' " % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
""" Stops application execution
if proc.poll():
msg = "Failed to execute command"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_start(self):
if self.state == ResourceState.READY:
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
@property
def _start_command(self):
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
command = self.get('command') or ''
})
env = self.path
- env += " ".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))) \
- if self.get(k) else "", envs.keys()))
+ env += " ".join(["%s=%s" % (envs.get(k), str(self.get(k))) \
+ if self.get(k) else "" for k in list(envs.keys())])
return env
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
@property
def _start_command(self):
})
env = self.ccnd.path
- env += " ".join(map(lambda k: "%s=%s" % (envs.get(k), self.get(k)) \
- if self.get(k) else "", envs.keys()))
+ env += " ".join(["%s=%s" % (envs.get(k), self.get(k)) \
+ if self.get(k) else "" for k in list(envs.keys())])
return env
if proc.poll():
msg = "Failed to execute command"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def configure(self):
if self.trace_enabled("ping") and not self.ping:
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
command = self.get('command')
if err:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
@property
def _start_command(self):
if not isinstance(packages, list):
packages = [packages]
- cmd = " && ".join(map(lambda p:
- " { dpkg -s %(package)s || sudo -S apt-get -y install %(package)s ; } " % {
- 'package': p}, packages))
+ cmd = " && ".join([" { dpkg -s %(package)s || sudo -S apt-get -y install %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { dpkg -s vim || sudo -S apt-get -y install vim ; } && ..
return cmd
if not isinstance(packages, list):
packages = [packages]
- cmd = " && ".join(map(lambda p:
- " { dpkg -s %(package)s && sudo -S apt-get -y purge %(package)s ; } " % {
- 'package': p}, packages))
+ cmd = " && ".join([" { dpkg -s %(package)s && sudo -S apt-get -y purge %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { dpkg -s vim && sudo -S apt-get -y purge vim ; } && ..
return cmd
if not m or int(m.groups()[0]) == 100:
msg = " Error establishing GRE Tunnel"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def terminate_connection(self, endpoint, remote_endpoint):
pass
if err and proc.poll():
msg = " Error retrieving interface information "
self.error(msg, out, err)
- raise RuntimeError, "%s - %s - %s" % (msg, out, err)
+ raise RuntimeError("%s - %s - %s" % (msg, out, err))
# Check if an interface is found matching the RM attributes
ifaces = out.split("\n\n")
if not self.get("deviceName"):
msg = "Unable to resolve interface "
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
super(LinuxInterface, self).do_discover()
if err and proc.poll():
msg = "Error configuring interface with command '%s'" % cmd
self.error(msg, out, err)
- raise RuntimeError, "%s - %s - %s" % (msg, out, err)
+ raise RuntimeError("%s - %s - %s" % (msg, out, err))
if ip6 and mask6:
cmd = "ifconfig %(devname)s inet6 add %(ip6)s/%(mask6)d" % ({
if err and proc.poll():
msg = "Error seting ipv6 for interface using command '%s' " % cmd
self.error(msg, out, err)
- raise RuntimeError, "%s - %s - %s" % (msg, out, err)
+ raise RuntimeError("%s - %s - %s" % (msg, out, err))
super(LinuxInterface, self).do_provision()
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
else:
super(LinuxMtr, self).do_start()
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import os
import socket
msg = [msg_type, args, kwargs]
def encode(item):
- item = cPickle.dumps(item)
+ item = pickle.dumps(item)
return base64.b64encode(item)
encoded = "|".join(map(encode, msg))
self.emulation.error(msg, reply, err)
raise RuntimeError(msg)
- reply = cPickle.loads(base64.b64decode(reply))
+ reply = pickle.loads(base64.b64decode(reply))
return reply
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
""" Stops simulation execution
"""
command = " [ -e %s ] && echo 'DONE' " % self.remote_socket
- for i in xrange(200):
+ for i in range(200):
(out, err), proc = self.node.execute(command, retry = 1,
with_lock = True)
if not self.localhost and not self.get("username"):
msg = "Can't resolve OS, insufficient data "
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
out = self.get_os()
trace = traceback.format_exc()
msg = "Deploy failed. Unresponsive node {} -- traceback {}".format(self.get("hostname"), trace)
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self.find_home()
"sudo -S killall -u {} || /bin/true ; ".format(self.get("username")))
else:
if self.state >= ResourceState.READY:
+ ########################
+ #Collect all process (must change for a more intelligent way)
+ ppid = []
+ pids = []
+ avoid_pids = "ps axjf | awk '{print $1,$2}'"
+ (out, err), proc = self.execute(avoid_pids)
+ if len(out) != 0:
+ for line in out.strip().split("\n"):
+ parts = line.strip().split(" ")
+ ppid.append(parts[0])
+ pids.append(parts[1])
+
+ #Collect all process below ssh -D
+ tree_owner = 0
+ ssh_pids = []
+ sshs = "ps aux | grep 'sshd' | awk '{print $2,$12}'"
+ (out, err), proc = self.execute(sshs)
+ if len(out) != 0:
+ for line in out.strip().split("\n"):
+ parts = line.strip().split(" ")
+ if parts[1].startswith('root@pts'):
+ ssh_pids.append(parts[0])
+ elif parts[1] == "-D":
+ tree_owner = parts[0]
+
+ avoid_kill = []
+ temp = []
+ #Search for the child process of the pid's collected at the first block.
+ for process in ssh_pids:
+ temp = self.search_for_child(process, pids, ppid)
+ avoid_kill = list(set(temp))
+
+ if len(avoid_kill) > 0:
+ avoid_kill.append(tree_owner)
+ ########################
+
import pickle
- pids = pickle.load(open("/tmp/save.proc", "rb"))
+ with open("/tmp/save.proc", "rb") as pickle_file:
+ pids = pickle.load(pickle_file)
pids_temp = dict()
ps_aux = "ps aux | awk '{print $2,$11}'"
(out, err), proc = self.execute(ps_aux)
for line in out.strip().split("\n"):
parts = line.strip().split(" ")
pids_temp[parts[0]] = parts[1]
+ # creates the difference between the machine pids freezed (pickle) and the actual
+ # adding the avoided pids filtered above (avoid_kill) to allow users keep process
+ # alive when using besides ssh connections
kill_pids = set(pids_temp.items()) - set(pids.items())
- kill_pids = ' '.join(dict(kill_pids).keys())
+ # py2/py3 : keep it simple
+ kill_pids = ' '.join(kill_pids)
+
+ # removing pids from beside connections and its process
+ kill_pids = kill_pids.split(' ')
+ kill_pids = list(set(kill_pids) - set(avoid_kill))
+ kill_pids = ' '.join(kill_pids)
cmd = ("killall tcpdump || /bin/true ; " +
"kill $(ps aux | grep '[.]nepi' | awk '{print $2}') || /bin/true ; " +
(out, err), proc = self.execute(cmd, retry = 1, with_lock = True)
+ def search_for_child(self, pid, pids, ppid, family=[]):
+ """ Recursive function to search for child. List A contains the pids and list B the parents (ppid)
+ """
+ family.append(pid)
+ for key, value in enumerate(ppid):
+ if value == pid:
+ child = pids[key]
+ self.search_for_child(child, pids, ppid)
+ return family
+
def clean_home(self):
""" Cleans all NEPI related folders in the Linux host
"""
if text and not os.path.isfile(src):
# src is text input that should be uploaded as file
# create a temporal file with the content to upload
- f = tempfile.NamedTemporaryFile(delete=False)
+ # in python3 we need to open in binary mode if str is bytes
+ mode = 'w' if isinstance(src, str) else 'wb'
+ f = tempfile.NamedTemporaryFile(mode=mode, delete=False)
f.write(src)
f.close()
src = f.name
# If dst files should not be overwritten, check that the files do not
- # exits already
+ # exist already
if isinstance(src, str):
- src = map(str.strip, src.split(";"))
+ src = [s.strip() for s in src.split(";")]
if overwrite == False:
src = self.filter_existing_files(src, dst)
msg = "{} out: {} err: {}".format(msg, out, err)
if raise_on_error:
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return ((out, err), proc)
self.error(msg, out, err)
if raise_on_error:
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return ((out, err), proc)
else:
msg = "Error installing packages ( OS not known ) "
self.error(msg, self.os)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return command
else:
msg = "Error removing packages ( OS not known ) "
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
run_home = run_home or home
if isinstance(paths, str):
paths = [paths]
- cmd = " ; ".join(map(lambda path: "rm -rf {}".format(path), paths))
+ cmd = " ; ".join(["rm -rf {}".format(path) for path in paths])
return self.execute(cmd, with_lock = True)
msg = " Failed to run command '{}' ".format(command)
self.error(msg, out, err)
if raise_on_error:
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# Wait for pid file to be generated
pid, ppid = self.wait_pid(
self.error(msg, eout, err)
if raise_on_error:
- raise RuntimeError, msg
+ raise RuntimeError(msg)
(out, oerr), proc = self.check_output(home, stdout)
pid = ppid = None
delay = 1.0
- for i in xrange(2):
+ for i in range(2):
pidtuple = self.getpid(home = home, pidfile = pidfile)
if pidtuple:
self.error(msg)
if raise_on_error:
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return pid, ppid
if not self._home_dir:
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def filter_existing_files(self, src, dst):
""" Removes files that already exist in the Linux host from src list
if len(src) > 1 else {dst: src[0]}
command = []
- for d in dests.keys():
+ for d in dests:
command.append(" [ -f {dst} ] && echo '{dst}' ".format(dst=d) )
command = ";".join(command)
(out, err), proc = self.execute(command, retry = 1, with_lock = True)
- for d in dests.keys():
+ for d in dests:
if out.find(d) > -1:
del dests[d]
if not dests:
return []
- return dests.values()
+ # list(..) here added by 2to3 - leaving for safety
+ return list(dests.values())
"prefix" : "CCND_PREFIX",
})
- env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
- [k for k in envs.keys() if self.get(k)]))
+ env = ";".join(["%s=%s" % (envs.get(k), str(self.get(k))) for k in [k for k in list(envs.keys()) if self.get(k)]])
return env
"ccnsSyncScope": "CCNS_SYNC_SCOPE",
})
- env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
- [k for k in envs.keys() if self.get(k)]))
+ env = ";".join(["%s=%s" % (envs.get(k), str(self.get(k))) for k in [k for k in list(envs.keys()) if self.get(k)]])
return env
"home": "HOME",
})
- env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
- [k for k in envs.keys() if self.get(k)]))
+ env = ";".join(["%s=%s" % (envs.get(k), str(self.get(k))) for k in [k for k in list(envs.keys()) if self.get(k)]])
return env
if not devices or len(devices) != 2:
msg = "linux::ns3::TunTapFdLink must be connected to exactly one FdNetDevice"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._fd1 = devices[0]
self._fd2 = devices[1]
self._fd2node.get("hostname"):
msg = "linux::ns3::FdUdpTunnel requires endpoints on different hosts"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return [self._fd1, self._fd2]
node = self.endpoint_node(endpoint)
run_home = self.run_home(endpoint)
- for i in xrange(20):
+ for i in range(20):
(out, err), proc = node.check_output(run_home, filename)
if out:
else:
msg = "Couldn't retrieve %s" % filename
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return result
if proc.poll():
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# Wait for pid file to be generated
pid, ppid = node.wait_pid(run_home)
if err:
msg = " Failed to start command '%s' " % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
port = self.wait_local_port(endpoint)
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import os
import socket
msg = [msg_type, args, kwargs]
def encode(item):
- item = cPickle.dumps(item)
+ item = pickle.dumps(item)
return base64.b64encode(item)
encoded = "|".join(map(encode, msg))
self.simulation.error(msg, reply, err)
raise RuntimeError(msg)
- reply = cPickle.loads(base64.b64decode(reply))
+ reply = pickle.loads(base64.b64decode(reply))
return reply
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
""" Stops simulation execution
"""
command = " [ -e %s ] && echo 'DONE' " % self.remote_socket
- for i in xrange(200):
+ for i in range(200):
(out, err), proc = self.node.execute(command, retry = 1,
with_lock = True)
if not devices or len(devices) != 1:
msg = "TunTapFdLink must be connected to exactly one FdNetDevice"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._fdnetdevice = devices[0]
if not devices or len(devices) != 1:
msg = "TunTapLink must be connected to exactly one Tap or Tun"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._tap = devices[0]
self.fdnode.get("hostname"):
msg = "Tap and FdNetDevice are not in the same host"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self.send_address = self.fdnetdevice.recv_fd()
self.set("command", self._start_command)
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
@property
def _start_command(self):
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
else:
super(LinuxPing, self).do_start()
elif tun: self._device = tun[0]
elif interface: self._device = interface[0]
else:
- raise RuntimeError, "linux::Routes must be connected to a "\
- "linux::TAP, linux::TUN, or linux::Interface"
+ raise RuntimeError("linux::Routes must be connected to a "\
+ "linux::TAP, linux::TUN, or linux::Interface")
return self._device
@property
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
command = self.get('command') or ''
cmd = install_rpmfusion_command(os)
if cmd: cmd += " ; "
- cmd += " && ".join(map(lambda p:
- " { rpm -q %(package)s || sudo -S yum -y install --nogpgcheck %(package)s ; } " % {
- 'package': p}, packages))
+ cmd += " && ".join([" { rpm -q %(package)s || sudo -S yum -y install --nogpgcheck %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { rpm -q rpmfusion-free-release || sudo -s rpm -i ... ; } && { rpm -q vim || sudo yum -y install vim ; } && ..
return cmd
if not isinstance(packages, list):
packages = [packages]
- cmd = " && ".join(map(lambda p:
- " { rpm -q %(package)s && sudo -S yum -y remove %(package)s ; } " % {
- 'package': p}, packages))
+ cmd = " && ".join([" { rpm -q %(package)s && sudo -S yum -y remove %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { rpm -q vim && sudo yum -y remove vim ; } && ..
return cmd
(local_host, local_port) = rsock.getsockname()
# Save local port information to file
- f = open(local_port_file, 'w')
- f.write("%d\n" % local_port)
- f.close()
+ with open(local_port_file, 'w') as f:
+ f.write("%d\n" % local_port)
# Wait until remote port information is available
while not os.path.exists(remote_port_file):
# even if the file exists and had the port number,
# the read operation returns empty string!
# Maybe a race condition?
- for i in xrange(10):
- f = open(remote_port_file, 'r')
- remote_port = f.read()
- f.close()
+ for i in range(10):
+ with open(remote_port_file, 'r') as f:
+ remote_port = f.read()
if remote_port:
break
# TODO: Test connectivity!
# Create a ret_file to indicate success
- f = open(ret_file, 'w')
- f.write("0")
- f.close()
+ with open(ret_file, 'w') as f:
+ f.write("0")
STARTED = True
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
import base64
import fcntl
import errno
while '\n' not in chunk:
try:
chunk = conn.recv(1024)
- except (OSError, socket.error), e:
+ except (OSError, socket.error) as e:
if e[0] != errno.EINTR:
raise
# Ignore eintr errors
if not pi:
flags |= IFF_NO_PI
+ # xxx : Thierry : not quite sure where this gets closed
fd = os.open("/dev/net/tun", os.O_RDWR)
ifreq = struct.pack("16sH", vif_name, flags)
to another process through a unix socket.
"""
address = args.pop(0)
- print address
+ print(address)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.connect(address)
passfd.sendfd(sock, fd, '0')
while not stop:
try:
(msg, args) = recv_msg(conn)
- except socket.timeout, e:
+ except socket.timeout as e:
# Ingore time-out
continue
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
import base64
import socket
sock.send("%s\n" % encoded)
reply = sock.recv(1024)
reply = base64.b64decode(reply)
- print reply
+ print(reply)
except:
- print "Did not properly shutdown device"
+ print("Did not properly shutdown device")
(local_host, local_port) = sock.getsockname()
# Save local port information to file
- f = open(local_port_file, 'w')
- f.write("%d\n" % local_port)
- f.close()
+ with open(local_port_file, 'w') as f:
+ f.write("%d\n" % local_port)
# Wait until remote port information is available
while not os.path.exists(remote_port_file):
# even if the file exists and had the port number,
# the read operation returns empty string!
# Maybe a race condition?
- for i in xrange(10):
- f = open(remote_port_file, 'r')
- remote_port = f.read()
- f.close()
+ for i in range(10):
+ with open(remote_port_file, 'r') as f:
+ remote_port = f.read()
if remote_port:
break
# TODO: Test connectivity!
# Create a ret_file to indicate success
- f = open(ret_file, 'w')
- f.write("0")
- f.close()
+ with open(ret_file, 'w') as f:
+ f.write("0")
# Establish tunnel
tunchannel.tun_fwd(tun, remote,
# Claudio Freire <claudio-daniel.freire@inria.fr>
#
+from __future__ import print_function
import select
import sys
import time
def ipfmt(ip):
- ipbytes = map(ord,ip.decode("hex"))
- return '.'.join(map(str,ipbytes))
+ return '.'.join(str(ord(x)) for x in ip.decode("hex"))
tagtype = {
'0806' : 'arp',
buf,
))
-_padmap = [ chr(padding) * padding for padding in xrange(127) ]
+_padmap = [ chr(padding) * padding for padding in range(127) ]
del padding
def encrypt(packet, crypter, len=len, padmap=_padmap):
padding = ord(packet[-1])
if not (0 < padding <= crypter.block_size):
# wrong padding
- raise RuntimeError, "Truncated packet %s"
+ raise RuntimeError("Truncated packet %s")
packet = packet[:-padding]
return packet
if stderr is not None:
if crypto_mode:
- print >>stderr, "Packets are transmitted in CIPHER"
+ print("Packets are transmitted in CIPHER", file=stderr)
else:
- print >>stderr, "Packets are transmitted in PLAINTEXT"
+ print("Packets are transmitted in PLAINTEXT", file=stderr)
if hasattr(remote, 'fileno'):
remote_fd = remote.fileno()
try:
rdrdy, wrdy, errs = select(rset,wset,eset,1)
- except selecterror, e:
+ except selecterror as e:
if e.args[0] == errno.EINTR:
# just retry
continue
sent = 0
try:
try:
- for x in xrange(maxbatch):
+ for x in range(maxbatch):
packet = pullPacket(fwbuf)
if crypto_mode:
if not rnonblock or not fpacketReady(fwbuf):
break
- except OSError,e:
+ except OSError as e:
# This except handles the entire While block on PURPOSE
# as an optimization (setting a try/except block is expensive)
# The only operation that can raise this exception is rwrite
bwfree -= sent
if tun in wrdy:
try:
- for x in xrange(maxtbatch):
+ for x in range(maxtbatch):
packet = pullPacket(bkbuf)
twrite(tunfd, packet)
#wt += 1
if slowlocal:
# Give some time for the kernel to process the packets
time.sleep(0)
- except OSError,e:
+ except OSError as e:
# This except handles the entire While block on PURPOSE
# as an optimization (setting a try/except block is expensive)
# The only operation that can raise this exception is os_write
# check incoming data packets
if tun in rdrdy:
try:
- for x in xrange(maxbatch):
+ for x in range(maxbatch):
packet = tread(tunfd,2000) # tun.read blocks until it gets 2k!
if not packet:
continue
if not tnonblock or len(fwbuf) >= maxfwbuf:
break
- except OSError,e:
+ except OSError as e:
# This except handles the entire While block on PURPOSE
# as an optimization (setting a try/except block is expensive)
# The only operation that can raise this exception is os_read
if remote in rdrdy:
try:
try:
- for x in xrange(maxbatch):
+ for x in range(maxbatch):
packet = rread(remote,2000)
#rr += 1
elif not packet:
if not udp and packet == "":
# Connection broken, try to reconnect (or just die)
- raise RuntimeError, "Connection broken"
+ raise RuntimeError("Connection broken")
else:
continue
if not rnonblock or len(bkbuf) >= maxbkbuf:
break
- except OSError,e:
+ except OSError as e:
# This except handles the entire While block on PURPOSE
# as an optimization (setting a try/except block is expensive)
# The only operation that can raise this exception is rread
if e.errno not in retrycodes:
raise
- except Exception, e:
+ except Exception as e:
if reconnect is not None:
# in UDP mode, sometimes connected sockets can return a connection refused
# on read. Give the caller a chance to reconnect
def udp_connect(TERMINATE, local_addr, local_port, peer_addr, peer_port):
rsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
retrydelay = 1.0
- for i in xrange(30):
+ for i in range(30):
# TERMINATE is a array. An item can be added to TERMINATE, from
# outside this function to force termination of the loop
if TERMINATE:
- raise OSError, "Killed"
+ raise OSError("Killed")
try:
rsock.bind((local_addr, local_port))
break
except socket.error:
# wait a while, retry
- print >>sys.stderr, "%s: Could not bind. Retrying in a sec..." % (time.strftime('%c'),)
+ print("%s: Could not bind. Retrying in a sec..." % (time.strftime('%c'),), file=sys.stderr)
time.sleep(min(30.0,retrydelay))
retrydelay *= 1.1
else:
rsock.bind((local_addr, local_port))
- print >>sys.stderr, "Listening UDP at: %s:%d" % (local_addr, local_port)
- print >>sys.stderr, "Connecting UDP to: %s:%d" % (peer_addr, peer_port)
+ print("Listening UDP at: %s:%d" % (local_addr, local_port), file=sys.stderr)
+ print("Connecting UDP to: %s:%d" % (peer_addr, peer_port), file=sys.stderr)
rsock.connect((peer_addr, peer_port))
return rsock
pass
keepalive_thread = threading.Thread(target=keepalive)
keepalive_thread.start()
- for i in xrange(900):
+ for i in range(900):
if TERMINATE:
- raise OSError, "Killed"
+ raise OSError("Killed")
try:
heartbeat = rsock.recv(10)
break
# will be forever blocked in the connect, so we put a reasonable timeout.
rsock.settimeout(10)
# We wait for
- for i in xrange(30):
+ for i in range(30):
if stop:
break
if TERMINATE:
- raise OSError, "Killed"
+ raise OSError("Killed")
try:
rsock.connect((peer_addr, peer_port))
sock = rsock
break
except socket.error:
# wait a while, retry
- print >>sys.stderr, "%s: Could not connect. Retrying in a sec..." % (time.strftime('%c'),)
+ print("%s: Could not connect. Retrying in a sec..." % (time.strftime('%c'),), file=sys.stderr)
time.sleep(min(30.0,retrydelay))
retrydelay *= 1.1
else:
rsock.connect((peer_addr, peer_port))
sock = rsock
if sock:
- print >>sys.stderr, "tcp_connect: TCP sock connected to remote %s:%s" % (peer_addr, peer_port)
+ print("tcp_connect: TCP sock connected to remote %s:%s" % (peer_addr, peer_port), file=sys.stderr)
sock.settimeout(0)
- print >>sys.stderr, "tcp_connect: disabling NAGLE"
+ print("tcp_connect: disabling NAGLE", file=sys.stderr)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sock
retrydelay = 1.0
# We try to bind to the local virtual interface.
# It might not exist yet so we wait in a loop.
- for i in xrange(30):
+ for i in range(30):
if stop:
break
if TERMINATE:
- raise OSError, "Killed"
+ raise OSError("Killed")
try:
lsock.bind((local_addr, local_port))
break
except socket.error:
# wait a while, retry
- print >>sys.stderr, "%s: Could not bind. Retrying in a sec..." % (time.strftime('%c'),)
+ print("%s: Could not bind. Retrying in a sec..." % (time.strftime('%c'),), file=sys.stderr)
time.sleep(min(30.0,retrydelay))
retrydelay *= 1.1
else:
lsock.bind((local_addr, local_port))
- print >>sys.stderr, "tcp_listen: TCP sock listening in local sock %s:%s" % (local_addr, local_port)
+ print("tcp_listen: TCP sock listening in local sock %s:%s" % (local_addr, local_port), file=sys.stderr)
# Now we wait until the other side connects.
# The other side might not be ready yet, so we also wait in a loop for timeouts.
timeout = 1
lsock.listen(1)
- for i in xrange(30):
+ for i in range(30):
if TERMINATE:
- raise OSError, "Killed"
+ raise OSError("Killed")
rlist, wlist, xlist = select.select([lsock], [], [], timeout)
if stop:
break
if lsock in rlist:
sock,raddr = lsock.accept()
- print >>sys.stderr, "tcp_listen: TCP connection accepted in local sock %s:%s" % (local_addr, local_port)
+ print("tcp_listen: TCP connection accepted in local sock %s:%s" % (local_addr, local_port), file=sys.stderr)
break
timeout += 5
return sock
rsock.send(hand)
peer_hand = rsock.recv(4)
if not peer_hand:
- print >>sys.stderr, "tcp_handshake: connection reset by peer"
+ print("tcp_handshake: connection reset by peer", file=sys.stderr)
return False
else:
- print >>sys.stderr, "tcp_handshake: hand %r, peer_hand %r" % (hand, peer_hand)
+ print("tcp_handshake: hand %r, peer_hand %r" % (hand, peer_hand), file=sys.stderr)
if hand < peer_hand:
if listen:
win = True
end = False
sock = None
- for i in xrange(0, 50):
+ for i in range(0, 50):
if end:
break
if TERMINATE:
- raise OSError, "Killed"
+ raise OSError("Killed")
hand = struct.pack("!L", random.randint(0, 2**30))
stop = []
lresult = []
end = True
if not sock:
- raise OSError, "Error: tcp_establish could not establish connection."
+ raise OSError("Error: tcp_establish could not establish connection.")
return sock
-
-
def node(self):
node = self.get_connected(LinuxNode.get_rtype())
if node: return node[0]
- raise RuntimeError, "linux::TAP/TUN devices must be connected to a linux::Node"
+ raise RuntimeError("linux::TAP/TUN devices must be connected to a linux::Node")
@property
def gre_enabled(self):
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
command = self.get('command') or ''
if proc.poll() or err:
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# Wait for pid file to be generated
pid, ppid = self.node.wait_pid(connection_run_home,
if err:
msg = " Failed to start command '%s' " % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return True
if proc.poll():
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
# Wait for pid file to be generated
self._pid, self._ppid = self.node.wait_pid(
if err:
msg = " Failed to start command '%s' " % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return self.wait_file(connection_run_home, "local_port")
if proc.poll() and err:
msg = " Failed to Kill the Tap"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def check_status(self):
return self.node.status(self._pid, self._ppid)
result = None
delay = 1.0
- for i in xrange(20):
+ for i in range(20):
(out, err), proc = self.node.check_output(home, filename)
if out:
result = out.strip()
else:
msg = "Couldn't retrieve %s" % filename
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return result
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
else:
super(LinuxTraceroute, self).do_start()
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
""" Stops application execution
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, err
+ raise RuntimeError(err)
else:
super(LinuxUdpTest, self).do_start()
if not node:
msg = "Route not connected to Node!!"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return node[0]
objects to be deployed before proceeding with the deployment
"""
- raise RuntimeError, "No dependencies defined!"
+ raise RuntimeError("No dependencies defined!")
def _instantiate_object(self):
pass
else:
msg = " Failed "
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
if self.state == ResourceState.STARTED:
if not interface:
msg = "IPv4Address not connected to Interface!!"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return interface[0]
msg = "Node not connected to Emulation"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
@property
def _rms_to_wait(self):
if not node:
msg = "Route not connected to Node!!"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return node[0]
if not node:
msg = "Route not connected to Node!!"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return node[0]
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import logging
import os
while '\n' not in chunk:
try:
chunk = conn.recv(1024)
- except (OSError, socket.error), e:
+ except (OSError, socket.error) as e:
if e[0] != errno.EINTR:
raise
# Ignore eintr errors
def decode(item):
item = base64.b64decode(item).rstrip()
- return cPickle.loads(item)
+ return pickle.loads(item)
- decoded = map(decode, msg.split("|"))
+ decoded = list(map(decode, msg.split("|")))
# decoded message
dmsg_type = decoded.pop(0)
return (dmsg_type, dargs, dkwargs)
def send_reply(conn, reply):
- encoded = base64.b64encode(cPickle.dumps(reply))
+ encoded = base64.b64encode(pickle.dumps(reply))
conn.send("%s\n" % encoded)
def get_options():
try:
(msg_type, args, kwargs) = recv_msg(conn)
- except socket.timeout, e:
+ except socket.timeout as e:
# Ingore time-out
continue
if not interface:
msg = "Switch not connected to any Interface!!"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return interface[0]
if clazzname == "open":
path = args[0]
mode = args[1]
+ # xxx Thierry: not sure where this gets closed
obj = open(path, mode)
else:
clazz = getattr(netns, clazzname)
# If the result is an object (not a base value),
# then keep track of the object a return the object
# reference (newuuid)
+ # xxx the two instances of `int` are from 2to3
if not (result is None or type(result) in [
- bool, float, long, str, int]):
+ bool, float, int, str, int]):
self._objects[newuuid] = result
result = newuuid
def replace_kwargs(self, kwargs):
realkwargs = dict([(k, self.get_object(v) \
if str(v).startswith("uuid") else v) \
- for k,v in kwargs.iteritems()])
+ for k,v in kwargs.items()])
return realkwargs
return self._script_path
def dump_to_script(self, command):
- f = open(self.script_path, "a")
- f.write("%s" % command)
- f.close()
+ with open(self.script_path, "a") as f:
+ f.write("%s" % command)
def dump_header(self):
if not self.enabled:
return pprint.pformat(value)
def format_args(self, args):
- fargs = map(self.format_value, args)
- return "[%s]" % ",".join(fargs)
+ return "[%s]" % ",".join(self.format_value(arg) for arg in args)
def format_kwargs(self, kwargs):
- fkwargs = map(lambda (k,w):
- "%s: %s" % (self.format_value(k), self.format_value(w)),
- kwargs.iteritems())
+ fkwargs = ["%s: %s" % (self.format_value(k), self.format_value(v)) for (k, v) in kwargs.items()]
return "dict({%s})" % ",".join(fkwargs)
if not nodes:
msg = "Application not connected to node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._node = nodes[0]
if not nodes:
msg = "ArpL3Protocol not connected to node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return nodes[0]
else:
msg = "Failed"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
if self.state == ResourceState.STARTED:
if not devices:
msg = "Channel not connected to devices"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return devices
if not devices:
msg = "ErrorModel not connected to device"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return devices[0]
if not phys:
msg = "ErrorRateModel not connected to phy"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return phys[0]
if mode != "ns3::RealtimeSimulatorImpl":
msg = "The simulation must run in real time!!"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
super(NS3BaseFdNetDevice, self)._instantiate_object()
if not nodes:
msg = "Icmp4L4Protocol not connected to node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return nodes[0]
if not nodes:
msg = "Ipv4L3Protocol not connected to node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return nodes[0]
from nepi.execution.trace import Trace
from nepi.resources.ns3.ns3base import NS3Base
-import ipaddr
+import ipaddress
@clsinit_copy
class NS3BaseNetDevice(NS3Base):
if not nodes:
msg = "Device not connected to node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return nodes[0]
if not channels:
msg = "Device not connected to channel"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return channels[0]
if not queue:
msg = "Device not connected to queue"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return queue[0]
ip = self.get("ip")
prefix = self.get("prefix")
- i = ipaddr.IPAddress(ip)
+ i = ipaddress.ip_address(ip)
if i.version == 4:
# IPv4
ipv4 = self.node.ipv4
if not self._simulation:
msg = "Node not connected to simulation"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return self._simulation
if not devices:
msg = "Node not connected to devices"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._devices = devices
if not devices or len(devices) != 2:
msg = "PipeChannel must be connected to exactly to two FdNetDevices"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._devices = devices
if not channels:
msg = "PropagationDelayModel not connected to channel"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return channels[0]
if not channels:
msg = "PropagationLossModel not connected to channel"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return channels[0]
if not devices:
msg = "Queue not connected to device"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return devices[0]
from nepi.execution.trace import Trace
from nepi.resources.ns3.ns3base import NS3Base
-import ipaddr
-
@clsinit_copy
class NS3Route(NS3Base):
_rtype = "ns3::Route"
if not nodes:
msg = "Device not connected to node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return nodes[0]
msg = "Could not configure route %s/%s hop: %s" % (network, prefix,
nexthop)
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def _connect_object(self):
node = self.node
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import logging
import os
while '\n' not in chunk:
try:
chunk = conn.recv(1024)
- except (OSError, socket.error), e:
+ except (OSError, socket.error) as e:
if e[0] != errno.EINTR:
raise
# Ignore eintr errors
def decode(item):
item = base64.b64decode(item).rstrip()
- return cPickle.loads(item)
+ return pickle.loads(item)
- decoded = map(decode, msg.split("|"))
+ decoded = [ decode(x) for x in msg.split("|") ]
# decoded message
dmsg_type = decoded.pop(0)
return (dmsg_type, dargs, dkwargs)
def send_reply(conn, reply):
- encoded = base64.b64encode(cPickle.dumps(reply))
+ encoded = base64.b64encode(pickle.dumps(reply))
conn.send("%s\n" % encoded)
def get_options():
try:
(msg_type, args, kwargs) = recv_msg(conn)
- except socket.timeout, e:
+ except socket.timeout as e:
# Ingore time-out
close_socket(conn)
continue
if not phys:
msg = "Channel not connected to phy"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return phys
def _register_attributes(cls):
standard = Attribute("Standard", "Wireless standard",
default = "WIFI_PHY_STANDARD_80211a",
- allowed = WIFI_STANDARDS.keys(),
+ # staying safe with 2to3's generated list
+ allowed = list(WIFI_STANDARDS.keys()),
type = Types.Enumerate,
flags = Flags.Design)
if not devices:
msg = "WifiMac not connected to device"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return devices[0]
def _register_attributes(cls):
standard = Attribute("Standard", "Wireless standard",
default = "WIFI_PHY_STANDARD_80211a",
- allowed = WIFI_STANDARDS.keys(),
+ allowed = list(WIFI_STANDARDS.keys()),
type = Types.Enumerate,
flags = Flags.Design)
if not devices:
msg = "WifiPhy not connected to device"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return devices[0]
if not channels:
msg = "WifiPhy not connected to channel"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return channels[0]
if not devices:
msg = "WifiRemoteStationManager not connected to device"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return devices[0]
tid_count = type_id.GetRegisteredN()
base = type_id.LookupByName("ns3::Object")
- for i in xrange(tid_count):
+ for i in range(tid_count):
tid = type_id.GetRegistered(i)
if tid.MustHideFromDocumentation() or \
factory = self.ns3.ObjectFactory()
factory.SetTypeId(type_name)
- for name, value in kwargs.iteritems():
+ for name, value in kwargs.items():
ns3_value = self._attr_from_string_to_ns3_value(type_name, name, value)
factory.Set(name, ns3_value)
# If the result is an object (not a base value),
# then keep track of the object a return the object
# reference (newuuid)
+ # xxx the two instances of `int` are from 2to3
if not (result is None or type(result) in [
- bool, float, long, str, int]):
+ bool, float, int, str, int]):
self._objects[newuuid] = result
result = newuuid
condition.release()
# contextId is defined as general context
- contextId = long(0xffffffff)
+ contextId = int(0xffffffff)
# delay 0 means that the event is expected to execute inmediately
delay = self.ns3.Seconds(0)
def replace_kwargs(self, kwargs):
realkwargs = dict([(k, self.get_object(v) \
if str(v).startswith("uuid") else v) \
- for k,v in kwargs.iteritems()])
+ for k,v in kwargs.items()])
realkwargs = dict([(k, self._singleton(v) \
if str(v).startswith(SINGLETON) else v )\
- for k, v in realkwargs.iteritems()])
+ for k, v in realkwargs.items()])
return realkwargs
# For all the interfaces registered with the ipv4 object, find
# the one that matches the network of the nexthop
nifaces = ipv4.GetNInterfaces()
- for ifidx in xrange(nifaces):
+ for ifidx in range(nifaces):
iface = ipv4.GetInterface(ifidx)
naddress = iface.GetNAddresses()
- for addridx in xrange(naddress):
+ for addridx in range(naddress):
ifaddr = iface.GetAddress(addridx)
ifmask = ifaddr.GetMask()
newuuid = None
if search:
# search object
- for ouuid, oobj in self._objects.iteritems():
+ for ouuid, oobj in self._objects.items():
if nobj == oobj:
newuuid = ouuid
break
return self._script_path
def dump_to_script(self, command):
- f = open(self.script_path, "a")
- f.write("%s" % command)
- f.close()
+ with open(self.script_path, "a") as f:
+ f.write("%s" % command)
def dump_header(self):
if not self.enabled:
return pprint.pformat(value)
def format_args(self, args):
- fargs = map(self.format_value, args)
- return "[%s]" % ",".join(fargs)
+ return "[%s]" % ",".join(self.format_value(arg) for arg in args)
def format_kwargs(self, kwargs):
- fkwargs = map(lambda (k,w):
- "%s: %s" % (self.format_value(k), self.format_value(w)),
- kwargs.iteritems())
+ fkwargs = ["%s: %s" % (self.format_value(k), self.format_value(v)) for (k, v) in kwargs.items()]
return "dict({%s})" % ",".join(fkwargs)
# PYTHONPATH=$PYTHONPATH:~/repos/nepi/src python src/nepi/resources/ns3/resource_manager_generator.py
#
+from __future__ import print_function
+
# Force the load of ns3 libraries
from nepi.resources.ns3.ns3wrapper import load_ns3_module
base = type_id.LookupByName("ns3::Object")
# Create a .py file using the ns-3 RM template for each ns-3 TypeId
- for i in xrange(tid_count):
+ for i in range(tid_count):
tid = type_id.GetRegistered(i)
(base_class_import, base_class) = select_base_class(ns3, tid)
short_rtype = uncamm_rtype.replace("::","-")
d = os.path.dirname(os.path.realpath(__file__))
- ftemp = open(os.path.join(d, "templates", "resource_manager_template.txt"), "r")
- template = ftemp.read()
- ftemp.close()
+ with open(os.path.join(d, "templates", "resource_manager_template.txt"), "r") as ftemp:
+ template = ftemp.read()
template = template. \
replace("<CLASS_NAME>", classname). \
replace('::', ''). \
replace("-","_").lower() + ".py"
- f = open(os.path.join(d, "classes", fname), "w")
- print os.path.join(d, fname)
- print template
- f.write(template)
- f.close()
+ with open(os.path.join(d, "classes", fname), "w") as f:
+ print(os.path.join(d, fname))
+ print(template)
+ f.write(template)
def template_attributes(ns3, tid):
d = os.path.dirname(os.path.realpath(__file__))
- ftemp = open(os.path.join(d, "templates", "attribute_template.txt"), "r")
- template = ftemp.read()
- ftemp.close()
+ with open(os.path.join(d, "templates", "attribute_template.txt"), "r") as ftemp:
+ template = ftemp.read()
attributes = ""
attr_count = tid.GetAttributeN()
- for i in xrange(attr_count):
+ for i in range(attr_count):
attr_info = tid.GetAttribute(i)
if not attr_info.accessor.HasGetter():
continue
elif isinstance(value, ns3.EnumValue):
attr_type = "Types.Enumerate"
allowed = checker.GetUnderlyingTypeInformation().split("|")
- attr_allowed = "[%s]" % ",".join(map(lambda x: "\"%s\"" % x, allowed))
+ attr_allowed = "[%s]" % ",".join(["\"%s\"" % x for x in allowed])
elif isinstance(value, ns3.DoubleValue):
attr_type = "Types.Double"
# TODO: range
def template_traces(ns3, tid):
d = os.path.dirname(os.path.realpath(__file__))
- ftemp = open(os.path.join(d, "templates", "trace_template.txt"), "r")
- template = ftemp.read()
- ftemp.close()
+ with open(os.path.join(d, "templates", "trace_template.txt"), "r") as ftemp:
+ template = ftemp.read()
traces = ""
trace_count = tid.GetTraceSourceN()
- for i in xrange(trace_count):
+ for i in range(trace_count):
trace_info = tid.GetTraceSource(i)
trace_name = trace_info.name
trace_help = trace_info.help.replace('"', '\\"').replace("'", "\\'")
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
# Julien Tribino <julien.tribino@inria.fr>
+from __future__ import print_function
+
import os
from nepi.util.timefuncs import tnow
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not (self.get('xmppUser') or self.get('xmppPort')
or self.get('xmppPassword')):
if not self.get('command') :
msg = "Application's Command is not initialized"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not self._omf_api :
self._omf_api = OMFAPIFactory.get_api(self.get('version'),
if self._create_cnt > confirmation_counter:
msg = "Couldn't retrieve the confirmation of the creation"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
uid = self.check_deploy(self.create_id)
if not uid:
if attr == TraceAttr.ALL:
try:
- f = open(trace_path ,'r')
+ with open(trace_path ,'r') as f:
+ return f.read()
except IOError:
- print "File with traces has not been found"
+ print("File with traces has not been found")
return False
- out = f.read()
- f.close()
- return out
def do_start(self):
if self._start_cnt > confirmation_counter:
msg = "Couldn't retrieve the confirmation that the application started"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
res = self.check_start(self._topic_app)
if not res:
if not self.get('channel'):
msg = "Channel's value is not initialized"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if self.get('version') == "6":
self.frequency = self.get_frequency(self.get('channel'))
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not (self.get('xmppUser') or self.get('xmppPort')
or self.get('xmppPassword')):
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not (self.get('xmppUser') or self.get('xmppPort')
or self.get('xmppPassword')):
if not (self.get('name')):
msg = "Interface's name is not initialized"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not (self.get('mode') and self.get('essid') \
and self.get('hw_mode') and self.get('ip')):
msg = "Interface's variable are not initialized"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if self.get('version') == "5":
res = self.configure_on_omf5()
if self._create_cnt > confirmation_counter:
msg = "Couldn't retrieve the confirmation of the creation"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
uid = self.check_deploy(self.create_id)
if not uid:
else:
properties = self._attr_element(payload,"props","")
- for prop in props.keys():
+ for prop in props:
if isinstance(props[prop],str):
self._attr_element(properties,prop,props[prop],type_key="type", type_value = "string")
elif isinstance(props[prop],dict):
key = self._attr_element(properties,prop,"",type_key="type", type_value = "hash")
- for comp in props[prop].keys():
+ for comp in props[prop]:
self._attr_element(key,comp,props[prop][comp],type_key="type", type_value = "string")
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in guards:
self._attr_element(guardians,guard,guards[guard],type_key="type", type_value = "string")
return payload
if props :
properties = self._attr_element(payload,"props","")
- for prop in props.keys():
+ for prop in props:
self._attr_element(properties,prop,props[prop],type_key="type", type_value = "symbol")
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in guards:
self._attr_element(guardians,guard,guards[guard],type_key="type", type_value = "string")
return payload
if props :
properties = self._attr_element(payload,"props","")
- for prop in props.keys():
+ for prop in list(props.keys()):
self._attr_element(properties,prop,props[prop])
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in list(guards.keys()):
self._attr_element(guardians,guard,guards[guard])
return payload
if props :
properties = self._id_element(payload,"props","xmlns:frcp",
"http://schema.mytestbed.net/omf/6.0/protocol")
- for prop in props.keys():
+ for prop in list(props.keys()):
self._attr_element(properties,prop,props[prop])
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in list(guards.keys()):
self._attr_element(guardians,guard,guards[guard])
return payload
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not self.get('version'):
msg = "Version of OMF is not indicated"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if not (self.get('xmppUser') or self.get('xmppPort')
or self.get('xmppPassword')):
if not self.get('hostname') :
msg = "Hostname's value is not initialized"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if self.get('version') == "5":
self._omf_api.enroll_host(self.get('hostname'))
def check_ready(self, xmpp):
delay = 1.0
- for i in xrange(15):
+ for i in range(15):
if xmpp.ready:
break
else:
else:
msg = "XMPP Client is not ready after long time"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
@property
def _nepi_topic(self):
event = self._check_for_tag(root, namespaces, "event")
log = "STATUS -- "
- for elt in props.keys():
+ for elt in props:
ns, tag = elt.split('}')
if tag == "it":
log = log + "membership : " + props[elt]+" -- "
elif tag == "msg":
if event == "STDOUT" :
filename = os.path.join("/tmp", "%s.out" % uid)
- f = open(filename,'a+')
- # XXX: Adding fake \n for visual formatting
- msg = props[elt] # + "\n"
- f.write(msg)
- f.close()
+ with open(filename,'a+') as f:
+ # XXX: Adding fake \n for visual formatting
+ msg = props[elt] # + "\n"
+ f.write(msg)
elif event == "STDERR" :
filename = os.path.join("/tmp", "%s.err" % uid)
- f = open(filename,'a+')
- # XXX: Adding fake \n for visual formatting
- msg = props[elt] # + "\n"
- f.write(msg)
- f.close()
+ with open(filename,'a+') as f:
+ # XXX: Adding fake \n for visual formatting
+ msg = props[elt] # + "\n"
+ f.write(msg)
log = log + tag +" : " + props[elt]+" -- "
else:
log = log + tag +" : " + props[elt]+" -- "
attribute hostname, it will over write the previous value.
"""
hosts_hrn = self.sfaapi.get_resources_hrn()
- for host, hrn in hosts_hrn.iteritems():
+ for host, hrn in hosts_hrn.items():
if hrn == node:
host = host + '.wilab2.ilabt.iminds.be'
self.set("host", host)
def fail_discovery(self):
msg = "Discovery failed. No candidates found for node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_node_not_alive(self, host=None):
msg = "Node %s not alive" % host
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_node_not_available(self, host):
msg = "Some nodes not available for provisioning"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_not_enough_nodes(self):
msg = "Not enough nodes available for provisioning"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_sfaapi(self):
msg = "Failing while trying to instanciate the SFA API."
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def valid_connection(self, guid):
# TODO: Validate!
plblacklist_file = os.path.join(nepi_home, "plblacklist.txt")
if not os.path.exists(plblacklist_file):
if os.path.isdir(nepi_home):
- open(plblacklist_file, 'w').close()
+ with open(plblacklist_file, 'w') as clear:
+ pass
else:
os.makedirs(nepi_home)
- open(plblacklist_file, 'w').close()
+ with open(plblacklist_file, 'w') as clear:
+ pass
def _skip_provision(self):
pl_user = self.get("pluser")
nodes_id = []
filters = {}
- for attr_name, attr_obj in self._attrs.iteritems():
+ for attr_name, attr_obj in self._attrs.items():
attr_value = self.get(attr_name)
if attr_value is not None and attr_obj.has_flag(Flags.Filter) and \
def fail_discovery(self):
msg = "Discovery failed. No candidates found for node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_node_not_alive(self, hostname=None):
msg = "Node %s not alive" % hostname
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_node_not_available(self, hostname):
msg = "Node %s not available for provisioning" % hostname
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_not_enough_nodes(self):
msg = "Not enough nodes available for provisioning"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_plapi(self):
msg = "Failing while trying to instanciate the PLC API.\nSet the" + \
" attributes pluser and plpassword."
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def valid_connection(self, guid):
# TODO: Validate!
if not devices or len(devices) != 2:
msg = "Tunnel must be connected to exactly two FdNetDevices"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._fd1 = devices[0]
self._fd2 = devices[1]
self._fd2node.get("hostname"):
msg = "Tunnel requires endpoints on different hosts"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return [self._fd1, self._fd2]
if not devices or len(devices) != 1:
msg = "planetlab::ns3::TunTapFdLink must be connected to exactly one FdNetDevice"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._fdnetdevice = devices[0]
if not devices or len(devices) != 1:
msg = "planetlab::ns3::TunTapFdLink must be connected to exactly one PlanetlabTap"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._tap = devices[0]
if not nodes or len(nodes) != 1:
msg = "PlanetlabOVSSwitch must be connected to exactly one PlanetlabNode"
#self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self._node = nodes[0]
except RuntimeError:
msg = "Command sliver-ovs does not exist on the VM"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def servers_on(self):
""" Start the openvswitch servers and check it
except RuntimeError:
msg = "Failed to start ovs-server on VM"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
command = "ps -A | grep ovsdb-server"
shfile = os.path.join(self.app_home, "ovsdb_status.sh")
except RuntimeError:
msg = "ovsdb-server not running on VM"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self.info("Server OVS Started...")
if not (self.get("bridge_name") and self.get("virtual_ip_pref")):
msg = "No assignment in one or both attributes"
self.error(msg)
- raise AttributeError, msg
+ raise AttributeError(msg)
command = "sliver-ovs create-bridge '%s' '%s'" % (
self.get("bridge_name"),
except RuntimeError:
msg = "No such pltap netdev\novs-appctl: ovs-vswitchd: server returned an error"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self.info(" Bridge %s Created and Assigned to %s" %\
(self.get("bridge_name"), self.get("virtual_ip_pref")) )
except RuntimeError:
msg = "SSH connection in the method assign_controller"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self.info("Controller assigned to the bridge %s" % self.get("bridge_name"))
except RuntimeError:
msg = "Error when checking the status of the OpenVswitch"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_release(self):
""" Delete the bridge and close the server.
if not self.get('port_name'):
msg = "The port name is not assigned"
self.error(msg)
- raise AttributeError, msg
+ raise AttributeError(msg)
if not self.ovsswitch:
msg = "The OVSwitch RM is not running"
self.error(msg)
- raise AttributeError, msg
+ raise AttributeError(msg)
command = "sliver-ovs create-port %s %s" % (
self.ovsswitch.get('bridge_name'),
except RuntimeError:
msg = "Could not create ovs-port"
self.debug(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
self.info("Created port %s on switch %s" % (
self.get('port_name'),
if err != "":
msg = "Error retrieving the local endpoint of the port"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
if out:
self._port_number = out.strip()
import os
import time
import threading
-import xmlrpclib
+import xmlrpc.client
def _retry(fn):
def rv(*p, **kw):
- for x in xrange(5):
+ for x in range(5):
try:
return fn(*p, **kw)
except (socket.error, IOError, OSError):
self._url = urlpattern % {'hostname':hostname}
if (proxy is not None):
- import urllib2
- class HTTPSProxyTransport(xmlrpclib.Transport):
+ import urllib.request, urllib.error, urllib.parse
+ class HTTPSProxyTransport(xmlrpc.client.Transport):
def __init__(self, proxy, use_datetime=0):
- opener = urllib2.build_opener(urllib2.ProxyHandler({"https" : proxy}))
- xmlrpclib.Transport.__init__(self, use_datetime)
+ opener = urllib.request.build_opener(urllib.request.ProxyHandler({"https" : proxy}))
+ xmlrpc.client.Transport.__init__(self, use_datetime)
self.opener = opener
def request(self, host, handler, request_body, verbose=0):
- req = urllib2.Request('https://%s%s' % (host, handler), request_body)
+ req = urllib.request.Request('https://%s%s' % (host, handler), request_body)
req.add_header('User-agent', self.user_agent)
self.verbose = verbose
return self.parse_response(self.opener.open(req))
@property
def api(self):
# Cannot reuse same proxy in all threads, py2.7 is not threadsafe
- return xmlrpclib.ServerProxy(
+ return xmlrpc.client.ServerProxy(
self._url ,
transport = self._proxy_transport(),
allow_none = True)
try:
# test authorization
network_types = _retry(self.mcapi.GetNetworkTypes)(self.auth)
- except (xmlrpclib.ProtocolError, xmlrpclib.Fault),e:
+ except (xmlrpc.client.ProtocolError, xmlrpc.client.Fault) as e:
warnings.warn(str(e))
return True
* nodefamily : string, the nodefamily this node should be based upon
* plain : boolean, use plain bootstrapfs image if set (for tests)
"""
- if not isinstance(node, (str, int, long)):
- raise ValueError, "Node must be either a non-unicode string or an int"
+ if not isinstance(node, (str, int)):
+ raise ValueError("Node must be either a non-unicode string or an int")
return _retry(self.mcapi.GetNodeFlavour)(self.auth, node)
def get_nodes(self, node_id_or_name = None, fields = None, **kw):
filters = filters, peer=None, **kw)
)
else:
- peer_filter = map(name_to_id, peer)
+ peer_filter = [name_to_id(x) for x in peer]
elif peer is None or peer == self._local_peer:
peer_filter = None
return _retry(self.mcapi.DeleteSliceFromNodes)(self.auth, slice_id_or_name, node_id_or_hostname)
def start_multicall(self):
- self.threadlocal.mc = xmlrpclib.MultiCall(self.mcapi)
+ self.threadlocal.mc = xmlrpc.client.MultiCall(self.mcapi)
def finish_multicall(self):
mc = self.threadlocal.mc
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
import base64
import errno
import passfd
while '\n' not in chunk:
try:
chunk = conn.recv(1024)
- except (OSError, socket.error), e:
+ except (OSError, socket.error) as e:
if e[0] != errno.EINTR:
raise
# Ignore eintr errors
to another process through a unix socket.
"""
address = args.pop(0)
- print address
+ print(address)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.connect(address)
passfd.sendfd(sock, fd, '0')
pointopoint = pointopoint, txqueuelen = txqueuelen)
# Saving interface name to vif_name_file
- f = open(vif_name_file, 'w')
- f.write(vif_name)
- f.close()
+ with open(vif_name_file, 'w') as f:
+ f.write(vif_name)
# create unix socket to receive instructions
sock = create_socket(socket_name)
while not stop:
try:
(msg, args) = recv_msg(conn)
- except socket.timeout, e:
+ except socket.timeout as e:
# Ingore time-out
continue
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
import base64
import socket
import vsys
sock.send("%s\n" % encoded)
reply = sock.recv(1024)
reply = base64.b64decode(reply)
- print reply
+ print(reply)
except:
- print "Did not properly shutdown device"
+ print("Did not properly shutdown device")
# If a slicename is provided, use it to remove a GRE device
elif slicename:
import pwd
sys.exit(1)
# Saving interface name to vif_name_file
- f = open(vif_name_file, 'w')
- f.write(vif_name)
- f.close()
-
+ with open(vif_name_file, 'w') as f:
+ f.write(vif_name)
#
# Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
+from __future__ import print_function
+
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
ResourceState
plblacklist_file = os.path.join(nepi_home, "plblacklist.txt")
if not os.path.exists(plblacklist_file):
if os.path.isdir(nepi_home):
- open(plblacklist_file, 'w').close()
+ with open(plblacklist_file, 'w') as clear:
+ pass
else:
os.makedirs(nepi_home)
- open(plblacklist_file, 'w').close()
+ with open(plblacklist_file, 'w') as clear:
+ pass
def _skip_provision(self):
sfa_user = self.get("sfauser")
# and perform ping to check that is really alive
if not self._blacklisted(host_hrn):
if not self._reserved(host_hrn):
- print self.sfaapi._reserved ,self.guid
- for hostname, hrn in nodes.iteritems():
+ print(self.sfaapi._reserved ,self.guid)
+ for hostname, hrn in nodes.items():
if host_hrn == hrn:
- print 'hostname' ,hostname
+ print('hostname' ,hostname)
ping_ok = self._do_ping(hostname)
if not ping_ok:
attribute hostname, it will over write the previous value.
"""
hosts_hrn = self.sfaapi.get_resources_hrn()
- for hostname, hrn in hosts_hrn.iteritems():
+ for hostname, hrn in hosts_hrn.items():
if hrn == node:
self.set("hostname", hostname)
def fail_discovery(self):
msg = "Discovery failed. No candidates found for node"
self.error(msg)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_node_not_alive(self, hostname=None):
msg = "Node %s not alive" % hostname
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_node_not_available(self, hostname):
msg = "Node %s not available for provisioning" % hostname
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_not_enough_nodes(self):
msg = "Not enough nodes available for provisioning"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def fail_sfaapi(self):
msg = "Failing while trying to instanciate the SFA API.\nSet the" + \
" attributes sfauser and sfaPrivateKey."
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def valid_connection(self, guid):
# TODO: Validate!
def node(self):
node = self.get_connected(PlanetlabNode.get_rtype())
if node: return node[0]
- raise RuntimeError, "TAP/TUN devices must be connected to Node"
+ raise RuntimeError("TAP/TUN devices must be connected to Node")
def upload_sources(self):
scripts = []
if not exec_run_home:
exec_run_home = self.run_home
- for i in xrange(20):
+ for i in range(20):
(out, err), proc = self.node.check_output(self.run_home, "vif_name")
if proc.poll() > 0:
(out, err), proc = self.node.check_errors(exec_run_home)
if err.strip():
- raise RuntimeError, err
+ raise RuntimeError(err)
if out:
vif_name = out.strip()
else:
msg = "Couldn't retrieve vif_name"
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
return vif_name
else:
msg = " Failed to execute command '%s'" % command
self.error(msg, out, err)
- raise RuntimeError, msg
+ raise RuntimeError(msg)
def do_stop(self):
try:
os.stat(d + "/" + name)
return d + "/" + name
- except OSError, e:
+ except OSError as e:
if e.errno != os.errno.ENOENT:
raise
return None
try:
os.stat(d + "/" + name)
return d + "/" + name
- except OSError, e:
+ except OSError as e:
if e.errno != os.errno.ENOENT:
raise
return None
def execute(cmd):
# FIXME: create a global debug variable
#print "[pid %d]" % os.getpid(), " ".join(cmd)
- null = open("/dev/null", "r+")
- p = subprocess.Popen(cmd, stdout = null, stderr = subprocess.PIPE)
- out, err = p.communicate()
- if p.returncode != 0:
- raise RuntimeError("Error executing `%s': %s" % (" ".join(cmd), err))
+ with open("/dev/null", "r+") as null:
+ p = subprocess.Popen(cmd, stdout = null, stderr = subprocess.PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError("Error executing `%s': %s" % (" ".join(cmd), err))
def backticks(cmd):
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
def add_key_to_agent(filename):
ssh_add = nepi.util.environ.find_bin_or_die("ssh-add")
args = [ssh_add, filename]
- null = file("/dev/null", "w")
- assert subprocess.Popen(args, stderr = null).wait() == 0
- null.close()
+ with open("/dev/null", "w") as null:
+ assert subprocess.Popen(args, stderr = null).wait() == 0
def get_free_port():
s = socket.socket()
"""
def gen_sshd_config(filename, port, server_key, auth_keys):
- conf = open(filename, "w")
- text = _SSH_CONF % (port, server_key, auth_keys)
- conf.write(text)
- conf.close()
+ with open(filename, "w") as conf:
+ text = _SSH_CONF % (port, server_key, auth_keys)
+ conf.write(text)
return filename
def gen_auth_keys(pubkey, output, environ):
for k, v in environ.items():
opts.append('environment="%s=%s"' % (k, v))
- lines = file(pubkey).readlines()
+ with open(pubkey) as f:
+ lines = f.readlines()
pubkey = lines[0].split()[0:2]
- out = file(output, "w")
- out.write("%s %s %s\n" % (",".join(opts), pubkey[0], pubkey[1]))
- out.close()
+ with open(output, "w") as out:
+ out.write("%s %s %s\n" % (",".join(opts), pubkey[0], pubkey[1]))
return output
def start_ssh_agent():
# No need to gather the pid, ssh-agent knows how to kill itself; after we
# had set up the environment
ssh_agent = nepi.util.environ.find_bin_or_die("ssh-agent")
- null = file("/dev/null", "w")
- proc = subprocess.Popen([ssh_agent, "-k"], stdout = null)
- null.close()
- assert proc.wait() == 0
+ with open("/dev/null", "w") as null:
+ proc = subprocess.Popen([ssh_agent, "-k"], stdout = null)
+ assert proc.wait() == 0
for k in data:
del os.environ[k]
"""
if env:
export = ''
- for envkey, envval in env.iteritems():
+ for envkey, envval in env.items():
export += '%s=%s ' % (envkey, envval)
command = "%s %s" % (export, command)
(out,err), proc = lexec(cmd)
if proc.wait():
- raise RuntimeError, "Failed to set up application on host %s: %s %s" % (host, out,err,)
+ raise RuntimeError("Failed to set up application on host %s: %s %s" % (host, out,err,))
return ((out,err), proc)
if out:
try:
- return map(int,out.strip().split(' ',1))
+ return [ int(x) for x in out.strip().split(' ',1))]
except:
# Ignore, many ways to fail that don't matter that much
return None
def __init__(self):
self._last_guid = 0
- def next(self, guid = None):
+ def __next__(self, guid = None):
if guid == None:
guid = self._last_guid + 1
#
# Author: Lucia Guevgeozian Odizzio <lucia.guevgeozian_odizzio@inria.fr>
-import xmlrpclib
+from __future__ import print_function
+
+
+import xmlrpc.client
import hashlib
import threading
@property
def api(self):
- return xmlrpclib.Server(self._url, allow_none = True)
+ return xmlrpc.client.Server(self._url, allow_none = True)
def get_session_key(self):
"""
if not session['value']:
msg = "Can not authenticate in Manifold API"
- raise RuntimeError, msg
+ raise RuntimeError(msg)
session_key = session['value'][0]['session']
return dict(AuthMethod='session', session=session_key)
filters = self._map_attr_to_resource_filters(filters)
qfilters = list()
- for filtername, filtervalue in filters.iteritems():
+ for filtername, filtervalue in filters.items():
newfilter = [filtername, "==", filtervalue]
qfilters.append(newfilter)
return True
else:
msg = "Failed while trying to add %s to slice" % resource_urn
- print msg
+ print(msg)
# check how to do warning
return False
}
mapped_filters = dict()
- for filtername, filtervalue in filters.iteritems():
+ for filtername, filtervalue in filters.items():
if attr_to_filter[filtername]:
new_filtername = attr_to_filter[filtername]
mapped_filters[new_filtername] = filtervalue
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-import ipaddr
+import ipaddress
import networkx
import math
import random
:param assign_st: Select source and target nodes on the graph.
:type assign_st: bool
- :param sources_targets: dictionary with the list of sources (key =
+ :param sources_targets: dictionary with the list of sources (key =
"sources") and list of targets (key = "targets") if defined, ignore
assign_st
- :type sources_targets: dictionary of lists
+ :type sources_targets: dictionary of lists
- :param leaf_source: if True, random sources will be selected only
+ :param leaf_source: if True, random sources will be selected only
from leaf nodes.
- :type leaf_source: bool
+ :type leaf_source: bool
NOTE: Only point-to-point like network topologies are supported for now.
(Wireless and Ethernet networks were several nodes share the same
self.assign_p2p_ips(network = network, prefix = prefix,
version = version)
- sources_targets = kwargs.get("sources_targets")
- if sources_targets:
+ sources_targets = kwargs.get("sources_targets")
+ if sources_targets:
[self.set_source(n) for n in sources_targets["sources"]]
- [self.set_target(n) for n in sources_targets["targets"]]
- elif kwargs.get("assign_st"):
+ [self.set_target(n) for n in sources_targets["targets"]]
+ elif kwargs.get("assign_st"):
self.select_target_zero()
self.select_random_source(is_leaf = kwargs.get("leaf_source"))
nodesinbranch = (node_count - 1)/ BRANCHES
c = 1
- for i in xrange(BRANCHES):
+ for i in range(BRANCHES):
prev = 0
- for n in xrange(1, nodesinbranch + 1):
+ for n in range(1, nodesinbranch + 1):
graph.add_node(c)
graph.add_edge(prev, c)
prev = c
def annotate_node(self, nid, name, value):
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, float) and not isinstance(value, bool):
- raise RuntimeError, "Non-serializable annotation"
+ raise RuntimeError("Non-serializable annotation")
self.topology.node[nid][name] = value
return self.topology.node[nid].get(name)
def node_annotations(self, nid):
- return self.topology.node[nid].keys()
+ return list(self.topology.node[nid].keys())
def del_node_annotation(self, nid, name):
del self.topology.node[nid][name]
def annotate_edge(self, nid1, nid2, name, value):
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, float) and not isinstance(value, bool):
- raise RuntimeError, "Non-serializable annotation"
+ raise RuntimeError("Non-serializable annotation")
self.topology.edge[nid1][nid2][name] = value
return self.topology.edge[nid1][nid2].get(name)
def edge_annotations(self, nid1, nid2):
- return self.topology.edge[nid1][nid2].keys()
+ return list(self.topology.edge[nid1][nid2].keys())
def del_edge_annotation(self, nid1, nid2, name):
del self.topology.edge[nid1][nid2][name]
# Assign IP addresses to host
netblock = "%s/%d" % (network, prefix)
if version == 4:
- net = ipaddr.IPv4Network(netblock)
+ net = ipaddress.ip_network(netblock)
new_prefix = 30
elif version == 6:
- net = ipaddr.IPv6Network(netblock)
+ net = ipaddress.ip_network(netblock)
new_prefix = 30
else:
- raise RuntimeError, "Invalid IP version %d" % version
+ raise RuntimeError("Invalid IP version %d" % version)
## Clear all previusly assigned IPs
for nid in self.topology.nodes():
#### Compute subnets for each link
# get a subnet of base_add with prefix /30
- subnet = sub_itr.next()
+ subnet = next(sub_itr)
mask = subnet.netmask.exploded
network = subnet.network.exploded
prefixlen = subnet.prefixlen
# get host addresses in that subnet
i = subnet.iterhosts()
- addr1 = i.next()
- addr2 = i.next()
+ addr1 = next(i)
+ addr2 = next(i)
ip1 = addr1.exploded
ip2 = addr2.exploded
source = leaves.pop(random.randint(0, len(leaves) - 1))
else:
# options must not be already sources or targets
- options = [ k for k,v in self.topology.degree().iteritems() \
+ options = [ k for k,v in self.topology.degree().items() \
if (not kwargs.get("is_leaf") or v == 1) \
and not self.topology.node[k].get("source") \
and not self.topology.node[k].get("target")]
#
import threading
-import Queue
+import queue
import traceback
import sys
import os
self.maxqueue = maxqueue
self.maxthreads = maxthreads
- self.queue = Queue.Queue(self.maxqueue or 0)
+ self.queue = queue.Queue(self.maxqueue or 0)
self.delayed_exceptions = []
if results:
- self.rvqueue = Queue.Queue()
+ self.rvqueue = queue.Queue()
else:
self.rvqueue = None
if maxthreads is None:
if N_PROCS is None:
try:
- f = open("/proc/cpuinfo")
- try:
+ with open("/proc/cpuinfo") as f:
N_PROCS = sum("processor" in l for l in f)
- finally:
- f.close()
except:
pass
maxthreads = N_PROCS
self.workers = []
# initialize workers
- for x in xrange(maxthreads):
+ for x in range(maxthreads):
worker = WorkerThread()
worker.attach(self.queue, self.rvqueue, self.delayed_exceptions)
worker.setDaemon(True)
try:
self.queue.get(block = False)
self.queue.task_done()
- except Queue.Empty:
+ except queue.Empty:
break
def destroy(self):
if self.delayed_exceptions:
typ,val,loc = self.delayed_exceptions[0]
del self.delayed_exceptions[:]
- raise typ,val,loc
+ raise typ(val).with_traceback(loc)
def __iter__(self):
if self.rvqueue is not None:
while True:
try:
yield self.rvqueue.get_nowait()
- except Queue.Empty:
+ except queue.Empty:
self.queue.join()
try:
yield self.rvqueue.get_nowait()
- except Queue.Empty:
+ except queue.Empty:
raise StopIteration
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.util.netgraph import NetGraph, TopologyType
from nepi.util.timefuncs import stformat, tsformat
rv = s.decode("latin1")
if isinstance(s, datetime.datetime):
rv = tsformat(s)
- elif not isinstance(s, unicode):
- rv = unicode(s)
+ elif not isinstance(s, str):
+ rv = str(s)
else:
rv = s
- return rv.replace(u'\x00',u'�')
+ return rv.replace('\x00','�')
def xmldecode(s, cast = str):
- ret = s.replace(u'�',u'\x00').encode("ascii")
+ ret = s.replace('�','\x00').encode("ascii")
ret = cast(ret)
if s == "None":
return None
try:
xml = doc.toprettyxml(indent=" ", encoding="UTF-8")
except:
- print >>sys.stderr, "Oops: generating XML from %s" % (data,)
+ print("Oops: generating XML from %s" % (data,), file=sys.stderr)
raise
return xml
rmsnode = doc.createElement("rms")
ecnode.appendChild(rmsnode)
- for guid, rm in ec._resources.iteritems():
+ for guid, rm in ec._resources.items():
self._rm_to_xml(doc, rmsnode, ec, guid, rm)
return doc
cnnode = doc.createElement("conditions")
conditions = False
- for action, conds in rm._conditions.iteritems():
+ for action, conds in rm._conditions.items():
conditions = True
for (group, state, time) in conds:
ccnnode = doc.createElement("condition")
networkx.draw(graph, pos = pos, node_color="white",
node_size = 500, with_labels=True)
- label = "\n".join(map(lambda v: "%s: %s" % (v[0], v[1]), labels.iteritems()))
+ label = "\n".join(["%s: %s" % (v[0], v[1]) for v in iter(labels.items())])
plt.annotate(label, xy=(0.05, 0.95), xycoords='axes fraction')
fpath += ".png"
labels = dict()
connections = set()
- for guid, rm in ec._resources.iteritems():
+ for guid, rm in ec._resources.items():
label = rm.get_rtype()
graph.add_node(guid,
from nepi.util.parsers.xml_parser import ECXMLParser
parser = ECXMLParser()
- f = open(filepath, "r")
- xml = f.read()
- f.close()
+ with open(filepath, "r") as f:
+ xml = f.read()
ec = parser.from_xml(xml)
if format == SFormats.XML:
filepath = os.path.join(dirpath, "%s.xml" % filename)
sec = self.serialize(ec, format = format)
- f = open(filepath, "w")
- f.write(sec)
- f.close()
+ with open(filepath, "w") as f:
+ f.write(sec)
return filepath
else: slice_resources = []
if slice_resources:
slice_resources_hrn = self.get_resources_hrn(slice_resources)
- for s_hrn_key, s_hrn_value in slice_resources_hrn.iteritems():
+ for s_hrn_key, s_hrn_value in slice_resources_hrn.items():
s_parts = s_hrn_value.split('.')
s_hrn = '.'.join(s_parts[:2]) + '.' + '\\.'.join(s_parts[2:])
resources_hrn_new.append(s_hrn)
resources_urn = self._get_resources_urn(resources_hrn_new)
rspec = self.rspec_proc.build_sfa_rspec(slicename, resources_urn, None, leases)
- f = open("/tmp/rspec_input.rspec", "w")
- f.truncate(0)
- f.write(rspec)
- f.close()
+ with open("/tmp/rspec_input.rspec", "w") as f:
+ f.truncate(0)
+ f.write(rspec)
if not os.path.getsize("/tmp/rspec_input.rspec") > 0:
raise RuntimeError("Fail to create rspec file to allocate resource in slice %s" % slicename)
# Re implementing urn from hrn because the library sfa-common doesn't work for wilabt
resources_urn = self._get_urn(resources_hrn_new)
rspec = self.rspec_proc.build_sfa_rspec(slicename, resources_urn, properties, leases)
- f = open("/tmp/rspec_input.rspec", "w")
- f.truncate(0)
- f.write(rspec)
- f.close()
+ with open("/tmp/rspec_input.rspec", "w") as f:
+ f.truncate(0)
+ f.write(rspec)
if not os.path.getsize("/tmp/rspec_input.rspec") > 0:
raise RuntimeError("Fail to create rspec file to allocate resources in slice %s" % slicename)
slice_res = self.get_slice_resources(slicename)['resource']
if slice_res:
if len(slice_res[0]['services']) != 0:
- slice_res_hrn = self.get_resources_hrn(slice_res).values()
+ # 2to3 added list() and it is useful
+ slice_res_hrn = list(self.get_resources_hrn(slice_res).values())
if self._compare_lists(slice_res_hrn, resources_hrn):
return True
else: return len(slice_res_hrn)
log.debug("Package sfa-common not installed.\
Could not import sfa.rspecs.rspec and sfa.util.xrn")
-from types import StringTypes, ListType
-
-
class SfaRSpecProcessing(object):
"""
Class to process SFA RSpecs, parse the RSpec replies such as Advertisement RSpecs,
self.config = config
def make_dict_rec(self, obj):
- if not obj or isinstance(obj, (StringTypes, bool)):
+ if not obj or isinstance(obj, (str, bool)):
return obj
if isinstance(obj, list):
objcopy = []
try:
nodes = rspec.version.get_nodes()
- except Exception, e:
+ except Exception as e:
self._log.warn("Could not retrieve nodes in RSpec: %s" % e)
try:
leases = rspec.version.get_leases()
- except Exception, e:
+ except Exception as e:
self._log.warn("Could not retrieve leases in RSpec: %s" % e)
try:
links = rspec.version.get_links()
- except Exception, e:
+ except Exception as e:
self._log.warn("Could not retrieve links in RSpec: %s" % e)
try:
channels = rspec.version.get_channels()
- except Exception, e:
+ except Exception as e:
self._log.warn("Could not retrieve channels in RSpec: %s" % e)
resources = []
elif resource_type == 'channel':
channels.append(resource)
else:
- raise Exception, "Not supported type of resource"
+ raise Exception("Not supported type of resource")
rspec.version.add_nodes(nodes, rspec_content_type="request")
#rspec.version.add_leases(leases)
logger = logging.getLogger("sshfuncs")
-def log(msg, level, out = None, err = None):
+def log(msg, level = logging.DEBUG, out = None, err = None):
if out:
msg += " - OUT: %s " % out
-
if err:
msg += " - ERROR: %s " % err
-
logger.log(level, msg)
if hasattr(os, "devnull"):
Special value that when given to rspawn in stderr causes stderr to
redirect to whatever stdout was redirected to.
"""
+ pass
class ProcStatus:
"""
ip = None
if host in ["localhost", "127.0.0.1", "::1"]:
- p = subprocess.Popen("ip -o addr list", shell=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ "ip -o addr list",
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines = True,
+ )
stdout, stderr = p.communicate()
m = _re_inet.findall(stdout)
ip = m[0][1].split("/")[0]
"""
global OPENSSH_HAS_PERSIST
if OPENSSH_HAS_PERSIST is None:
- proc = subprocess.Popen(["ssh","-v"],
+ proc = subprocess.Popen(
+ ["ssh", "-v"],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
- stdin = open("/dev/null","r") )
+ stdin = subprocess.DEVNULL,
+ universal_newlines = True,
+ )
out,err = proc.communicate()
proc.wait()
if os.environ.get('NEPI_STRICT_AUTH_MODE',"").lower() not in ('1','true','on'):
user_hosts_path = '%s/.ssh/known_hosts' % (os.environ.get('HOME',""),)
if os.access(user_hosts_path, os.R_OK):
- f = open(user_hosts_path, "r")
- tmp_known_hosts.write(f.read())
- f.close()
+ with open(user_hosts_path, "r") as f:
+ tmp_known_hosts.write(f.read())
tmp_known_hosts.flush()
return s
else:
# unsafe string - escape
- def escp(c):
+ def escape(c):
if (32 <= ord(c) < 127 or c in ('\r','\n','\t')) and c not in ("'",'"'):
return c
else:
return "'$'\\x%02x''" % (ord(c),)
- s = ''.join(map(escp,s))
+ s = ''.join(map(escape, s))
return "'%s'" % (s,)
def eintr_retry(func):
@functools.wraps(func)
def rv(*p, **kw):
retry = kw.pop("_retry", False)
- for i in xrange(0 if retry else 4):
+ for i in range(0 if retry else 4):
try:
return func(*p, **kw)
- except (select.error, socket.error), args:
+ except (select.error, socket.error) as args:
if args[0] == errno.EINTR:
continue
else:
raise
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR:
continue
else:
stdout = stderr = stdin = None
return _retry_rexec(args, log_msg,
- stderr = stderr,
- stdin = stdin,
- stdout = stdout,
- env = env,
- retry = retry,
- tmp_known_hosts = tmp_known_hosts,
- blocking = blocking)
+ stderr = stderr,
+ stdin = stdin,
+ stdout = stdout,
+ env = env,
+ retry = retry,
+ tmp_known_hosts = tmp_known_hosts,
+ blocking = blocking)
def rcopy(source, dest,
port = None,
elif isinstance(source, str) and ':' in source:
remspec, path = source.split(':',1)
else:
- raise ValueError, "Both endpoints cannot be local"
+ raise ValueError("Both endpoints cannot be local")
user,host = remspec.rsplit('@',1)
# plain scp
blocking = True)
def rspawn(command, pidfile,
- stdout = '/dev/null',
- stderr = STDOUT,
- stdin = '/dev/null',
- home = None,
- create_home = False,
- sudo = False,
- host = None,
- port = None,
- user = None,
- gwuser = None,
- gw = None,
- agent = None,
- identity = None,
- server_key = None,
- tty = False,
- strict_host_checking = True):
+ stdout = '/dev/null',
+ stderr = STDOUT,
+ stdin = '/dev/null',
+ home = None,
+ create_home = False,
+ sudo = False,
+ host = None,
+ port = None,
+ user = None,
+ gwuser = None,
+ gw = None,
+ agent = None,
+ identity = None,
+ server_key = None,
+ tty = False,
+ strict_host_checking = True):
"""
Spawn a remote command such that it will continue working asynchronously in
background.
)
if proc.wait():
- raise RuntimeError, "Failed to set up application on host %s: %s %s" % (host, out,err,)
+ raise RuntimeError("Failed to set up application on host %s: %s %s" % (host, out,err,))
return ((out, err), proc)
@eintr_retry
def rgetpid(pidfile,
- host = None,
- port = None,
- user = None,
- gwuser = None,
- gw = None,
- agent = None,
- identity = None,
- server_key = None,
- strict_host_checking = True):
+ host = None,
+ port = None,
+ user = None,
+ gwuser = None,
+ gw = None,
+ agent = None,
+ identity = None,
+ server_key = None,
+ strict_host_checking = True):
"""
Returns the pid and ppid of a process from a remote file where the
information was stored.
if out:
try:
- return map(int,out.strip().split(' ',1))
+ return [ int(x) for x in out.strip().split(' ',1)) ]
except:
# Ignore, many ways to fail that don't matter that much
return None
return (out, err), proc
def _retry_rexec(args,
- log_msg,
- stdout = subprocess.PIPE,
- stdin = subprocess.PIPE,
- stderr = subprocess.PIPE,
- env = None,
- retry = 3,
- tmp_known_hosts = None,
- blocking = True):
-
- for x in xrange(retry):
+ log_msg,
+ stdout = subprocess.PIPE,
+ stdin = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ env = None,
+ retry = 3,
+ tmp_known_hosts = None,
+ blocking = True):
+
+ for x in range(retry):
# display command actually invoked when debug is turned on
message = " ".join( [ "'{}'".format(arg) for arg in args ] )
log("sshfuncs: invoking {}".format(message), logging.DEBUG)
# connects to the remote host and starts a remote connection
- proc = subprocess.Popen(args,
- env = env,
- stdout = stdout,
- stdin = stdin,
- stderr = stderr)
-
+ proc = subprocess.Popen(
+ args,
+ env = env,
+ stdout = stdout,
+ stdin = stdin,
+ stderr = stderr,
+ universal_newlines = True,
+ )
# attach tempfile object to the process, to make sure the file stays
# alive until the process is finished with it
proc._known_hosts = tmp_known_hosts
# The method communicate was re implemented for performance issues
# when using python subprocess communicate method the ssh commands
# last one minute each
+ #log("BEFORE communicate", level=logging.INFO); import time; beg=time.time()
out, err = _communicate(proc, input=None)
+ #log("AFTER communicate - {}s".format(time.time()-beg), level=logging.INFO)
elif stdout:
out = proc.stdout.read()
time.sleep(t)
continue
break
- except RuntimeError, e:
+ except RuntimeError as e:
msg = " rexec EXCEPTION - TIMEOUT -> %s \n %s" % ( e.args, log_msg )
log(msg, logging.DEBUG, out, err)
try:
rlist, wlist, xlist = select.select(read_set, write_set, [], select_timeout)
- except select.error,e:
+ except select.error as e:
if e[0] != 4:
raise
else:
write_set.remove(proc.stdin)
if proc.stdout in rlist:
- data = os.read(proc.stdout.fileno(), 1024)
- if data == "":
+ # python2 version used to do this
+ # data = os.read(proc.stdout.fileno(), 1024)
+ # however this always returned bytes...
+ data = proc.stdout.read()
+ log('we have read {}'.format(data))
+ # data should be str and not bytes because we use
+ # universal_lines = True, but to be clean
+ # instead of saying data != ""
+ if not data:
+ log('closing stdout')
proc.stdout.close()
read_set.remove(proc.stdout)
stdout.append(data)
if proc.stderr in rlist:
- data = os.read(proc.stderr.fileno(), 1024)
- if data == "":
+ # likewise (see above)
+ # data = os.read(proc.stderr.fileno(), 1024)
+ data = proc.stderr.read()
+ if not data:
proc.stderr.close()
read_set.remove(proc.stderr)
stderr.append(data)
if stderr is not None:
stderr = ''.join(stderr)
- # Translate newlines, if requested. We cannot let the file
- # object do the translation: It is based on stdio, which is
- # impossible to combine with select (unless forcing no
- # buffering).
- if proc.universal_newlines and hasattr(file, 'newlines'):
- if stdout:
- stdout = proc._translate_newlines(stdout)
- if stderr:
- stderr = proc._translate_newlines(stderr)
+# # Translate newlines, if requested. We cannot let the file
+# # object do the translation: It is based on stdio, which is
+# # impossible to combine with select (unless forcing no
+# # buffering).
+# if proc.universal_newlines and hasattr(file, 'newlines'):
+# if stdout:
+# stdout = proc._translate_newlines(stdout)
+# if stderr:
+# stderr = proc._translate_newlines(stderr)
if killed and err_on_timeout:
errcode = proc.poll()
- raise RuntimeError, ("Operation timed out", errcode, stdout, stderr)
+ raise RuntimeError("Operation timed out", errcode, stdout, stderr)
else:
if killed:
proc.poll()
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
import math
import numpy
from scipy import stats
# TODO: Discard outliers !!!!
if not sample:
- print " CANNOT COMPUTE STATS for ", sample
+ print(" CANNOT COMPUTE STATS for ", sample)
return (0, 0, 0, 0)
x = numpy.array(sample)
time.sleep(1)
- self.assertEquals('hola!', task.result)
+ self.assertEqual('hola!', task.result)
ec.shutdown()
# When this task is executed and the error raise,
# the FailureManager should set its failure level to
# TASK_FAILURE
- raise RuntimeError, "NOT A REAL ERROR. JUST TESTING!"
+ raise RuntimeError("NOT A REAL ERROR. JUST TESTING!")
ec = ExperimentController()
time.sleep(1)
- self.assertEquals(task.status, TaskStatus.ERROR)
+ self.assertEqual(task.status, TaskStatus.ERROR)
if __name__ == '__main__':
unittest.main()
self.ec.schedule("0.5s", self.deploy)
else:
time.sleep(random.random() * 2)
- raise RuntimeError, "NOT A REAL ERROR. JUST TESTING"
+ raise RuntimeError("NOT A REAL ERROR. JUST TESTING")
class ResourceFactoryTestCase(unittest.TestCase):
def test_add_resource_factory(self):
ResourceFactory.register_type(AnotherResource)
# Take into account default 'Critical' attribute
- self.assertEquals(MyResource.get_rtype(), "MyResource")
- self.assertEquals(len(MyResource._attributes), 3)
+ self.assertEqual(MyResource.get_rtype(), "MyResource")
+ self.assertEqual(len(MyResource._attributes), 3)
- self.assertEquals(ResourceManager.get_rtype(), "Resource")
- self.assertEquals(len(ResourceManager._attributes), 2)
+ self.assertEqual(ResourceManager.get_rtype(), "Resource")
+ self.assertEqual(len(ResourceManager._attributes), 2)
- self.assertEquals(AnotherResource.get_rtype(), "AnotherResource")
- self.assertEquals(len(AnotherResource._attributes), 2)
+ self.assertEqual(AnotherResource.get_rtype(), "AnotherResource")
+ self.assertEqual(len(AnotherResource._attributes), 2)
- self.assertEquals(len(ResourceFactory.resource_types()), 2)
+ self.assertEqual(len(ResourceFactory.resource_types()), 2)
# restore factory state for other tests
from nepi.execution.resource import populate_factory
for (group, state, time) in conditions:
waiting_for.extend(group)
- self.assertEquals(waiting_for, [1, 3, 5, 7, 10, 8])
+ self.assertEqual(waiting_for, [1, 3, 5, 7, 10, 8])
group = [1, 2, 3, 4, 6]
rm.unregister_condition(group)
for (group, state, time) in conditions:
waiting_for.extend(group)
- self.assertEquals(waiting_for, [5, 7, 10, 8])
+ self.assertEqual(waiting_for, [5, 7, 10, 8])
def test_deploy_in_order(self):
"""
node = ec.register_resource("Node")
apps = list()
- for i in xrange(1000):
+ for i in range(1000):
app = ec.register_resource("Application")
ec.register_connection(app, node)
apps.append(app)
ec.shutdown()
- self.assertEquals(ec._fm._failure_level, FailureLevel.RM_FAILURE)
+ self.assertEqual(ec._fm._failure_level, FailureLevel.RM_FAILURE)
def test_critical(self):
from nepi.execution.resource import ResourceFactory
ec.register_connection(eapp, node)
apps.append(eapp)
- for i in xrange(10):
+ for i in range(10):
app = ec.register_resource("Application")
ec.register_connection(app, node)
apps.append(app)
ec.wait_finished(apps)
state = ec.state(eapp)
- self.assertEquals(state, ResourceState.FAILED)
+ self.assertEqual(state, ResourceState.FAILED)
apps.remove(eapp)
for app in apps:
state = ec.state(app)
- self.assertEquals(state, ResourceState.STOPPED)
+ self.assertEqual(state, ResourceState.STOPPED)
ec.shutdown()
- self.assertEquals(ec._fm._failure_level, FailureLevel.OK)
+ self.assertEqual(ec._fm._failure_level, FailureLevel.OK)
def test_start_with_condition(self):
from nepi.execution.resource import ResourceFactory
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
runs = rnr.run(ec, min_runs = 5, max_runs = 10, wait_guids = apps,
wait_time = 0)
- self.assertEquals(runs, 10)
+ self.assertEqual(runs, 10)
def test_runner_convergence(self):
node_count = 4
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
wait_guids = apps,
wait_time = 0)
- self.assertEquals(runs, 10)
+ self.assertEqual(runs, 10)
if __name__ == '__main__':
unittest.main()
scheduler.schedule(tsk1)
# Make sure tasks are retrieved in teh correct order
- tsk = scheduler.next()
- self.assertEquals(tsk.callback(), 1)
+ tsk = next(scheduler)
+ self.assertEqual(tsk.callback(), 1)
- tsk = scheduler.next()
- self.assertEquals(tsk.callback(), 2)
+ tsk = next(scheduler)
+ self.assertEqual(tsk.callback(), 2)
- tsk = scheduler.next()
- self.assertEquals(tsk.callback(), 3)
+ tsk = next(scheduler)
+ self.assertEqual(tsk.callback(), 3)
if __name__ == '__main__':
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.resources.linux.node import LinuxNode
import os
node, ec = create_node(hostname, username, identity)
if not node.is_alive():
- print "*** WARNING: Skipping test %s: Node %s is not alive\n" % (
- name, node.get("hostname"))
+ print("*** WARNING: Skipping test %s: Node %s is not alive\n" % (
+ name, node.get("hostname")))
return
return func(*args, **kwargs)
argss = list(args)
argss.pop(0)
- for i in xrange(len(argss)/2):
+ for i in range(len(argss)/2):
username = argss[i*2]
hostname = argss[i*2+1]
node, ec = create_node(hostname, username)
if not node.is_alive():
- print "*** WARNING: Skipping test %s: Node %s is not alive\n" % (
- name, node.get("hostname"))
+ print("*** WARNING: Skipping test %s: Node %s is not alive\n" % (
+ name, node.get("hostname")))
return
return func(*args, **kwargs)
def wrapped(*args, **kwargs):
argss = list(args)
argss.pop(0)
- for i in xrange(len(argss)/3):
+ for i in range(len(argss)/3):
username = argss[i*3]
hostname = argss[i*3+1]
identity = argss[i*3+2]
node, ec = create_node(hostname, username, identity)
if not node.is_alive():
- print "*** WARNING: Skipping test %s: Node %s is not alive\n" % (
- name, node.get("hostname"))
+ print("*** WARNING: Skipping test %s: Node %s is not alive\n" % (
+ name, node.get("hostname")))
return
return func(*args, **kwargs)
mode = os.environ.get("NEPI_INTERACTIVE_TEST", False)
mode = mode and mode.lower() in ['true', 'yes']
if not mode:
- print "*** WARNING: Skipping test %s: Interactive mode off \n" % name
+ print("*** WARNING: Skipping test %s: Interactive mode off \n" % name)
return
return func(*args, **kwargs)
pl_user = os.environ.get("PL_USER")
pl_pass = os.environ.get("PL_PASS")
if not (pl_user and pl_pass):
- print "*** WARNING: Skipping test %s: Planetlab user, password and slicename not defined\n" % name
+ print("*** WARNING: Skipping test %s: Planetlab user, password and slicename not defined\n" % name)
return
return func(*args, **kwargs)
name = func.__name__
def wrapped(*args, **kwargs):
if sys.version_info < 2.7:
- print "*** WARNING: Skipping test %s: total_seconds() method doesn't exist\n" % name
+ print("*** WARNING: Skipping test %s: total_seconds() method doesn't exist\n" % name)
return
return func(*args, **kwargs)
sfa_pk = os.environ.get("SFA_PK")
if not (sfa_user and os.path.exists(os.path.expanduser(sfa_pk))):
- print "*** WARNING: Skipping test %s: SFA path to private key doesn't exist\n" % name
+ print("*** WARNING: Skipping test %s: SFA path to private key doesn't exist\n" % name)
return
return func(*args, **kwargs)
from sfa.client.sfi import Sfi
from sfa.util.xrn import hrn_to_urn
except ImportError:
- print "*** WARNING: Skipping test %s: sfi-client or sfi-common not installed\n" % name
+ print("*** WARNING: Skipping test %s: sfi-client or sfi-common not installed\n" % name)
return
return func(*args, **kwargs)
stdout = ec.trace(app, "stdout")
size = ec.trace(app, "stdout", attr = TraceAttr.SIZE)
- self.assertEquals(len(stdout), size)
+ self.assertEqual(len(stdout), size)
block = ec.trace(app, "stdout", attr = TraceAttr.STREAM, block = 5, offset = 1)
- self.assertEquals(block, stdout[5:10])
+ self.assertEqual(block, stdout[5:10])
path = ec.trace(app, "stdout", attr = TraceAttr.PATH)
rm = ec.get_resource(app)
p = os.path.join(rm.run_home, "stdout")
- self.assertEquals(path, p)
+ self.assertEqual(path, p)
ec.shutdown()
ec.wait_finished(app)
out = ec.trace(app, 'stdout')
- self.assertEquals(out, "Hello, world!\n")
+ self.assertEqual(out, "Hello, world!\n")
ec.shutdown()
ec.set(node, "cleanProcesses", True)
apps = list()
- for i in xrange(50):
+ for i in range(50):
app = ec.register_resource("linux::Application")
cmd = "ping -c5 %s" % self.target
ec.set(app, "command", cmd)
for app in apps:
stdout = ec.trace(app, 'stdout')
size = ec.trace(app, 'stdout', attr = TraceAttr.SIZE)
- self.assertEquals(len(stdout), size)
+ self.assertEqual(len(stdout), size)
block = ec.trace(app, 'stdout', attr = TraceAttr.STREAM, block = 5, offset = 1)
- self.assertEquals(block, stdout[5:10])
+ self.assertEqual(block, stdout[5:10])
path = ec.trace(app, 'stdout', attr = TraceAttr.PATH)
rm = ec.get_resource(app)
p = os.path.join(rm.run_home, 'stdout')
- self.assertEquals(path, p)
+ self.assertEqual(path, p)
ec.shutdown()
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.resource import ResourceState, ResourceAction
from nepi.execution.ec import ExperimentController
from test_utils import skipIfAnyNotAlive
ec.wait_finished(peek)
stdout = ec.trace(peek, "stdout")
- print stdout
+ print(stdout)
expected = "DATA"
self.assertTrue(stdout.find(expected) > -1)
if os.path.isdir(path):
dircount += 1
logs = glob.glob(os.path.join(path, "*.stdout"))
- self.assertEquals(len(logs), 1)
+ self.assertEqual(len(logs), 1)
- self.assertEquals(runs, dircount)
+ self.assertEqual(runs, dircount)
shutil.rmtree(dirpath)
# Test based on netns test/test_core.py file test_run_ping_routing test
#
+from __future__ import print_function
+
from nepi.resources.netns.netnsserver import run_server
from nepi.resources.linux.netns.netnsclient import LinuxNetNSClient
p1 = client.invoke(a1, "poll")
p2 = client.invoke(a2, "poll")
- stdout1 = open(path1, "r")
- stdout2 = open(path2, "r")
-
- s1 = stdout1.read()
- s2 = stdout2.read()
+ with open(path1, "r") as stdout1:
+ with open(path2, "r") as stdout2:
+ s1 = stdout1.read()
+ s2 = stdout2.read()
- print s1, s2
+ print(s1, s2)
expected = "1 packets transmitted, 1 received, 0% packet loss"
self.assertTrue(s1.find(expected) > -1)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.trace import TraceAttr
ec.set(ip, "prefix", prefix)
ec.register_connection(ip, iface)
- print ec.get(ip, "ip"), addr
- print ec.get(ip, "prefix"), prefix
+ print(ec.get(ip, "ip"), addr)
+ print(ec.get(ip, "prefix"), prefix)
return node, iface
# get the pid of the process
ecode = node.exitcode(app_home)
- self.assertEquals(ecode, ExitCode.OK)
+ self.assertEqual(ecode, ExitCode.OK)
@skipIfNotAlive
def t_exitcode_kill(self, host, user):
# The process is still running, so no retfile has been created yet
ecode = node.exitcode(app_home)
- self.assertEquals(ecode, ExitCode.FILENOTFOUND)
+ self.assertEqual(ecode, ExitCode.FILENOTFOUND)
(out, err), proc = node.check_errors(app_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
# Now kill the app
pid, ppid = node.getpid(app_home)
node.kill(pid, ppid)
(out, err), proc = node.check_errors(app_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
@skipIfNotAlive
def t_exitcode_error(self, host, user):
ecode = node.exitcode(app_home)
# bash erro 127 - command not found
- self.assertEquals(ecode, 127)
+ self.assertEqual(ecode, 127)
(out, err), proc = node.check_errors(app_home)
node.find_home()
(out, err), proc = node.mkdir(node.node_home, clean = True)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
(out, err), proc = node.install_packages("gcc", node.node_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
(out, err), proc = node.remove_packages("gcc", node.node_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
(out, err), proc = node.rmdir(node.exp_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
@skipIfNotAlive
def t_clean(self, host, user):
command1 = " [ -d %s ] && echo 'Found'" % node.lib_dir
(out, err), proc = node.execute(command1)
- self.assertEquals(out.strip(), "Found")
+ self.assertEqual(out.strip(), "Found")
command2 = " [ -d %s ] && echo 'Found'" % node.node_home
(out, err), proc = node.execute(command2)
- self.assertEquals(out.strip(), "Found")
+ self.assertEqual(out.strip(), "Found")
node.clean_experiment()
(out, err), proc = node.execute(command2)
- self.assertEquals(out.strip(), "")
+ self.assertEqual(out.strip(), "")
node.clean_home()
(out, err), proc = node.execute(command1)
- self.assertEquals(out.strip(), "")
+ self.assertEqual(out.strip(), "")
@skipIfNotAlive
def t_xterm(self, host, user):
node.find_home()
(out, err), proc = node.mkdir(node.node_home, clean = True)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
node.install_packages("xterm", node.node_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
(out, err), proc = node.execute("xterm", forward_x11 = True)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
(out, err), proc = node.remove_packages("xterm", node.node_home)
- self.assertEquals(err, "")
+ self.assertEqual(err, "")
@skipIfNotAlive
def t_compile(self, host, user):
command = "%s/hello" % app_home
(out, err), proc = node.execute(command)
- self.assertEquals(out, "Hello, world!\n")
+ self.assertEqual(out, "Hello, world!\n")
# execute the program and get the output from a file
command = "%(home)s/hello > %(home)s/hello.out" % {
node.remove_packages("gcc", app_home)
node.rmdir(app_home)
- f = open(dst, "r")
- out = f.read()
- f.close()
+ with open(dst, "r") as f:
+ out = f.read()
- self.assertEquals(out, "Hello, world!\n")
+ self.assertEqual(out, "Hello, world!\n")
@skipIfNotAlive
def t_copy_files(self, host, user):
time.sleep(60)
stdout = ec.trace(ccncat, "stdout")
- f = open("bunny.ts", "w")
- f.write(stdout)
- f.close()
+ with open("bunny.ts", "w") as f:
+ f.write(stdout)
#expected = "DATA"
#self.assertTrue(stdout.find(expected) > -1)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceState, ResourceAction
from nepi.execution.trace import TraceAttr
ec.wait_finished([app])
stdout = ec.trace(app, "stdout")
- print stdout
+ print(stdout)
expected = "3 packets transmitted, 3 received, 0% packet loss"
self.assertTrue(stdout.find(expected) > -1)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.trace import TraceAttr
stdout = ec.trace(simu, "stdout")
- print stdout
+ print(stdout)
expected = "20 packets transmitted, 20 received, 0% packet loss"
self.assertTrue(stdout.find(expected) > -1)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.trace import TraceAttr
output = ec.trace(guid, trace)
size = ec.trace(guid, trace, attr = TraceAttr.SIZE)
- self.assertEquals(size, len(output))
+ self.assertEqual(size, len(output))
self.assertTrue(size > 100)
block = ec.trace(guid, trace, attr = TraceAttr.STREAM, block = 5, offset = 1)
- self.assertEquals(block, output[5:10])
+ self.assertEqual(block, output[5:10])
trace_path = ec.trace(guid, trace, attr = TraceAttr.PATH)
rm = ec.get_resource(guid)
path = os.path.join(rm_simu.run_home, rm._trace_filename.get(trace))
- self.assertEquals(trace_path, path)
+ self.assertEqual(trace_path, path)
ec.shutdown()
stdout = ec.trace(simu, "stdout")
- print stdout
+ print(stdout)
expected = "20 packets transmitted, 20 received, 0% packet loss"
self.assertTrue(stdout.find(expected) > -1)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from nepi.execution.trace import TraceAttr
ec.register_connection(ping, nsnode2)
filepath = ec.save(dirpath)
- print filepath
+ print(filepath)
ec.deploy()
ec2.wait_finished([ping])
- self.assertEquals(len(ec.resources), len(ec2.resources))
+ self.assertEqual(len(ec.resources), len(ec2.resources))
stdout = ec2.trace(simu, "stdout")
ec.register_connection(ping, nsnode1)
filepath = ec.save(dirpath)
- print filepath
+ print(filepath)
ec.deploy()
ec2.wait_finished([ping])
- self.assertEquals(len(ec.resources), len(ec2.resources))
+ self.assertEqual(len(ec.resources), len(ec2.resources))
stdout = ec2.trace(simu, "stdout")
# Give time to flush the streams
time.sleep(5)
- self.assertEquals(len(ec.resources), len(ec2.resources))
+ self.assertEqual(len(ec.resources), len(ec2.resources))
expected = "udp-perf --duration=10 --nodes=2"
cmdline = ec2.trace(udp_perf, "cmdline")
ec2.deploy()
ec2.wait_finished(apps)
- self.assertEquals(len(ec.resources), len(ec2.resources))
+ self.assertEqual(len(ec.resources), len(ec2.resources))
self.assertTrue(ec2.state(node) == ResourceState.STARTED)
self.assertTrue(ec2.state(server) == ResourceState.STOPPED)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from __future__ import print_function
+
from nepi.execution.ec import ExperimentController
from test_utils import skipIfAnyNotAliveWithIdentity
ec.wait_finished(app)
ping = ec.trace(app, "stdout")
- print ping
+ print(ping)
expected = """3 packets transmitted, 3 received, 0% packet loss"""
self.assertTrue(ping.find(expected) > -1)
ping = ec.trace(app, "stdout")
- print ping
+ print(ping)
expected = """3 packets transmitted, 3 received, 0% packet loss"""
self.assertTrue(ping.find(expected) > -1)
p1 = wrapper.invoke(a1, "poll")
p2 = wrapper.invoke(a2, "poll")
- stdout1 = open(path1, "r")
- stdout2 = open(path2, "r")
+ with open(path1, "r") as stdout1:
+ with open(path2, "r") as stdout2:
- s1 = stdout1.read()
- s2 = stdout2.read()
+ s1 = stdout1.read()
+ s2 = stdout2.read()
expected = "1 packets transmitted, 1 received, 0% packet loss"
self.assertTrue(s1.find(expected) > -1)
#
# node n0 sends IGMP traffic to node n3
+from __future__ import print_function
from nepi.resources.ns3.ns3wrapper import NS3Wrapper
wrapper.invoke(csma, "EnableAsciiAll", "/tmp/csma-ping-ascii")
def SinkRx(packet, address):
- print packet
+ print(packet)
def PingRtt(context, rtt):
- print context, rtt
+ print(context, rtt)
# XXX: No biding for MakeCallback
#Config::ConnectWithoutContext ("/NodeList/3/ApplicationList/0/$ns3::PacketSink/Rx",
stderr = subprocess.PIPE, shell = True)
(out, err) = p.communicate()
- self.assertEquals(int(out), 8)
+ self.assertEqual(int(out), 8)
p = subprocess.Popen("rm /tmp/csma-ping-*", shell = True)
p.communicate()
class OMFResourceFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(OMFNode.get_rtype(), "omf::Node")
- self.assertEquals(len(OMFNode._attributes), 8)
+ self.assertEqual(OMFNode.get_rtype(), "omf::Node")
+ self.assertEqual(len(OMFNode._attributes), 8)
- self.assertEquals(OMFWifiInterface.get_rtype(), "omf::WifiInterface")
- self.assertEquals(len(OMFWifiInterface._attributes), 12)
+ self.assertEqual(OMFWifiInterface.get_rtype(), "omf::WifiInterface")
+ self.assertEqual(len(OMFWifiInterface._attributes), 12)
- self.assertEquals(OMFChannel.get_rtype(), "omf::Channel")
- self.assertEquals(len(OMFChannel._attributes), 8)
+ self.assertEqual(OMFChannel.get_rtype(), "omf::Channel")
+ self.assertEqual(len(OMFChannel._attributes), 8)
- self.assertEquals(OMFApplication.get_rtype(), "omf::Application")
- self.assertEquals(len(OMFApplication._attributes), 14)
+ self.assertEqual(OMFApplication.get_rtype(), "omf::Application")
+ self.assertEqual(len(OMFApplication._attributes), 14)
class OMFEachTestCase(unittest.TestCase):
def setUp(self):
self.ec.shutdown()
def test_creation_and_configuration_node(self):
- self.assertEquals(self.ec.get(self.node1, 'hostname'), 'omf.plexus.wlab17')
- self.assertEquals(self.ec.get(self.node1, 'xmppUser'), 'nepi')
- self.assertEquals(self.ec.get(self.node1, 'xmppServer'), 'xmpp-plexus.onelab.eu')
- self.assertEquals(self.ec.get(self.node1, 'xmppPort'), '5222')
- self.assertEquals(self.ec.get(self.node1, 'xmppPassword'), '1234')
- self.assertEquals(self.ec.get(self.node1, 'version'), '5')
+ self.assertEqual(self.ec.get(self.node1, 'hostname'), 'omf.plexus.wlab17')
+ self.assertEqual(self.ec.get(self.node1, 'xmppUser'), 'nepi')
+ self.assertEqual(self.ec.get(self.node1, 'xmppServer'), 'xmpp-plexus.onelab.eu')
+ self.assertEqual(self.ec.get(self.node1, 'xmppPort'), '5222')
+ self.assertEqual(self.ec.get(self.node1, 'xmppPassword'), '1234')
+ self.assertEqual(self.ec.get(self.node1, 'version'), '5')
def test_creation_and_configuration_interface(self):
- self.assertEquals(self.ec.get(self.iface1, 'name'), 'wlan0')
- self.assertEquals(self.ec.get(self.iface1, 'mode'), 'adhoc')
- self.assertEquals(self.ec.get(self.iface1, 'hw_mode'), 'g')
- self.assertEquals(self.ec.get(self.iface1, 'essid'), 'vlcexp')
- self.assertEquals(self.ec.get(self.iface1, 'ip'), '10.0.0.17/24')
- self.assertEquals(self.ec.get(self.iface1, 'version'), '5')
+ self.assertEqual(self.ec.get(self.iface1, 'name'), 'wlan0')
+ self.assertEqual(self.ec.get(self.iface1, 'mode'), 'adhoc')
+ self.assertEqual(self.ec.get(self.iface1, 'hw_mode'), 'g')
+ self.assertEqual(self.ec.get(self.iface1, 'essid'), 'vlcexp')
+ self.assertEqual(self.ec.get(self.iface1, 'ip'), '10.0.0.17/24')
+ self.assertEqual(self.ec.get(self.iface1, 'version'), '5')
def test_creation_and_configuration_channel(self):
- self.assertEquals(self.ec.get(self.channel, 'channel'), '6')
- self.assertEquals(self.ec.get(self.channel, 'xmppUser'), 'nepi')
- self.assertEquals(self.ec.get(self.channel, 'xmppServer'), 'xmpp-plexus.onelab.eu')
- self.assertEquals(self.ec.get(self.channel, 'xmppPort'), '5222')
- self.assertEquals(self.ec.get(self.channel, 'xmppPassword'), '1234')
- self.assertEquals(self.ec.get(self.channel, 'version'), '5')
+ self.assertEqual(self.ec.get(self.channel, 'channel'), '6')
+ self.assertEqual(self.ec.get(self.channel, 'xmppUser'), 'nepi')
+ self.assertEqual(self.ec.get(self.channel, 'xmppServer'), 'xmpp-plexus.onelab.eu')
+ self.assertEqual(self.ec.get(self.channel, 'xmppPort'), '5222')
+ self.assertEqual(self.ec.get(self.channel, 'xmppPassword'), '1234')
+ self.assertEqual(self.ec.get(self.channel, 'version'), '5')
def test_creation_and_configuration_application(self):
- self.assertEquals(self.ec.get(self.app1, 'appid'), 'Vlc#1')
- self.assertEquals(self.ec.get(self.app1, 'command'), "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
- self.assertEquals(self.ec.get(self.app1, 'env'), 'DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority')
- self.assertEquals(self.ec.get(self.app1, 'version'), '5')
+ self.assertEqual(self.ec.get(self.app1, 'appid'), 'Vlc#1')
+ self.assertEqual(self.ec.get(self.app1, 'command'), "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
+ self.assertEqual(self.ec.get(self.app1, 'env'), 'DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority')
+ self.assertEqual(self.ec.get(self.app1, 'version'), '5')
def test_connection(self):
- self.assertEquals(len(self.ec.get_resource(self.node1).connections), 6)
- self.assertEquals(len(self.ec.get_resource(self.iface1).connections), 2)
- self.assertEquals(len(self.ec.get_resource(self.channel).connections), 1)
- self.assertEquals(len(self.ec.get_resource(self.app1).connections), 1)
- self.assertEquals(len(self.ec.get_resource(self.app2).connections), 1)
+ self.assertEqual(len(self.ec.get_resource(self.node1).connections), 6)
+ self.assertEqual(len(self.ec.get_resource(self.iface1).connections), 2)
+ self.assertEqual(len(self.ec.get_resource(self.channel).connections), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app1).connections), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app2).connections), 1)
def test_condition(self):
- self.assertEquals(len(self.ec.get_resource(self.app1).conditions[ResourceAction.STOP]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app2).conditions[ResourceAction.START]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app3).conditions[ResourceAction.START]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app4).conditions[ResourceAction.STOP]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app5).conditions[ResourceAction.START]), 2)
+ self.assertEqual(len(self.ec.get_resource(self.app1).conditions[ResourceAction.STOP]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app2).conditions[ResourceAction.START]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app3).conditions[ResourceAction.START]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app4).conditions[ResourceAction.STOP]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app5).conditions[ResourceAction.START]), 2)
class OMFVLCNormalCase(unittest.TestCase):
def test_deploy(self):
ec.wait_finished([self.app1, self.app2, self.app3,self.app4, self.app5])
- self.assertEquals(round(tdiffsec(ec.get_resource(self.app2).start_time, ec.get_resource(self.app1).start_time),0), 3.0)
- self.assertEquals(round(tdiffsec(ec.get_resource(self.app3).start_time, ec.get_resource(self.app2).start_time),0), 2.0)
- self.assertEquals(round(tdiffsec(ec.get_resource(self.app4).start_time, ec.get_resource(self.app3).start_time),0), 3.0)
- self.assertEquals(round(tdiffsec(ec.get_resource(self.app5).start_time, ec.get_resource(self.app3).start_time),0), 20.0)
- self.assertEquals(round(tdiffsec(ec.get_resource(self.app5).start_time, ec.get_resource(self.app1).start_time),0), 25.0)
-
- self.assertEquals(ec.get_resource(self.node1).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.channel).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.app1).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app2).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app3).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app4).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app5).state, ResourceState.STOPPED)
+ self.assertEqual(round(tdiffsec(ec.get_resource(self.app2).start_time, ec.get_resource(self.app1).start_time),0), 3.0)
+ self.assertEqual(round(tdiffsec(ec.get_resource(self.app3).start_time, ec.get_resource(self.app2).start_time),0), 2.0)
+ self.assertEqual(round(tdiffsec(ec.get_resource(self.app4).start_time, ec.get_resource(self.app3).start_time),0), 3.0)
+ self.assertEqual(round(tdiffsec(ec.get_resource(self.app5).start_time, ec.get_resource(self.app3).start_time),0), 20.0)
+ self.assertEqual(round(tdiffsec(ec.get_resource(self.app5).start_time, ec.get_resource(self.app1).start_time),0), 25.0)
+
+ self.assertEqual(ec.get_resource(self.node1).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.iface1).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.channel).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.app1).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app2).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app3).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app4).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app5).state, ResourceState.STOPPED)
ec.shutdown()
- self.assertEquals(ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app3).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app4).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app5).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app4).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app5).state, ResourceState.RELEASED)
if __name__ == '__main__':
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_hostname(self):
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_iface(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_ip(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_channel(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_app(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_wo_app_path(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
if __name__ == '__main__':
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_and_iface_nc(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_iface_channel_nc(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_app_nc(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_with_all_nc_and_app_critical(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
class OMFVLCWrongCaseWithNonCriticalDep(unittest.TestCase):
self.ec.wait_finished([self.app1, self.app2, self.app3])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
def test_deploy_with_app_nc_and_critical_right(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2, self.app3])
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
def test_deploy_with_many_app_nc_and_critical(self):
self.ec.wait_finished([self.app1, self.app2, self.app3, self.app4, self.app5, self.app6])
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app5).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.app6).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app5).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app6).state, ResourceState.STOPPED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app4).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app5).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app6).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app4).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app5).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app6).state, ResourceState.RELEASED)
if __name__ == '__main__':
class OMFResourceFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(OMFNode.get_rtype(), "omf::Node")
- self.assertEquals(len(OMFNode._attributes), 8)
+ self.assertEqual(OMFNode.get_rtype(), "omf::Node")
+ self.assertEqual(len(OMFNode._attributes), 8)
- self.assertEquals(OMFWifiInterface.get_rtype(), "omf::WifiInterface")
- self.assertEquals(len(OMFWifiInterface._attributes), 12)
+ self.assertEqual(OMFWifiInterface.get_rtype(), "omf::WifiInterface")
+ self.assertEqual(len(OMFWifiInterface._attributes), 12)
- self.assertEquals(OMFChannel.get_rtype(), "omf::Channel")
- self.assertEquals(len(OMFChannel._attributes), 8)
+ self.assertEqual(OMFChannel.get_rtype(), "omf::Channel")
+ self.assertEqual(len(OMFChannel._attributes), 8)
- self.assertEquals(OMFApplication.get_rtype(), "omf::Application")
- self.assertEquals(len(OMFApplication._attributes), 14)
+ self.assertEqual(OMFApplication.get_rtype(), "omf::Application")
+ self.assertEqual(len(OMFApplication._attributes), 14)
class OMFEachTestCase(unittest.TestCase):
def setUp(self):
self.ec.shutdown()
def test_creation_and_configuration_node(self):
- self.assertEquals(self.ec.get(self.node1, 'hostname'), 'wlab12')
- self.assertEquals(self.ec.get(self.node1, 'xmppUser'), 'nepi')
- self.assertEquals(self.ec.get(self.node1, 'xmppServer'), 'xmpp-plexus.onelab.eu')
- self.assertEquals(self.ec.get(self.node1, 'xmppPort'), '5222')
- self.assertEquals(self.ec.get(self.node1, 'xmppPassword'), '1234')
- self.assertEquals(self.ec.get(self.node1, 'version'), '6')
+ self.assertEqual(self.ec.get(self.node1, 'hostname'), 'wlab12')
+ self.assertEqual(self.ec.get(self.node1, 'xmppUser'), 'nepi')
+ self.assertEqual(self.ec.get(self.node1, 'xmppServer'), 'xmpp-plexus.onelab.eu')
+ self.assertEqual(self.ec.get(self.node1, 'xmppPort'), '5222')
+ self.assertEqual(self.ec.get(self.node1, 'xmppPassword'), '1234')
+ self.assertEqual(self.ec.get(self.node1, 'version'), '6')
def test_creation_and_configuration_interface(self):
- self.assertEquals(self.ec.get(self.iface1, 'name'), 'wlan0')
- self.assertEquals(self.ec.get(self.iface1, 'mode'), 'adhoc')
- self.assertEquals(self.ec.get(self.iface1, 'hw_mode'), 'g')
- self.assertEquals(self.ec.get(self.iface1, 'essid'), 'vlcexp')
- self.assertEquals(self.ec.get(self.iface1, 'ip'), '10.0.0.17/24')
- self.assertEquals(self.ec.get(self.iface1, 'version'), '6')
+ self.assertEqual(self.ec.get(self.iface1, 'name'), 'wlan0')
+ self.assertEqual(self.ec.get(self.iface1, 'mode'), 'adhoc')
+ self.assertEqual(self.ec.get(self.iface1, 'hw_mode'), 'g')
+ self.assertEqual(self.ec.get(self.iface1, 'essid'), 'vlcexp')
+ self.assertEqual(self.ec.get(self.iface1, 'ip'), '10.0.0.17/24')
+ self.assertEqual(self.ec.get(self.iface1, 'version'), '6')
def test_creation_and_configuration_channel(self):
- self.assertEquals(self.ec.get(self.channel, 'channel'), '6')
- self.assertEquals(self.ec.get(self.channel, 'xmppUser'), 'nepi')
- self.assertEquals(self.ec.get(self.channel, 'xmppServer'), 'xmpp-plexus.onelab.eu')
- self.assertEquals(self.ec.get(self.channel, 'xmppPort'), '5222')
- self.assertEquals(self.ec.get(self.channel, 'xmppPassword'), '1234')
- self.assertEquals(self.ec.get(self.channel, 'version'), '6')
+ self.assertEqual(self.ec.get(self.channel, 'channel'), '6')
+ self.assertEqual(self.ec.get(self.channel, 'xmppUser'), 'nepi')
+ self.assertEqual(self.ec.get(self.channel, 'xmppServer'), 'xmpp-plexus.onelab.eu')
+ self.assertEqual(self.ec.get(self.channel, 'xmppPort'), '5222')
+ self.assertEqual(self.ec.get(self.channel, 'xmppPassword'), '1234')
+ self.assertEqual(self.ec.get(self.channel, 'version'), '6')
def test_creation_and_configuration_application(self):
- self.assertEquals(self.ec.get(self.app1, 'appid'), 'Vlc#1')
- self.assertEquals(self.ec.get(self.app1, 'command'), "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
- self.assertEquals(self.ec.get(self.app1, 'env'), 'DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority')
- self.assertEquals(self.ec.get(self.app1, 'version'), '6')
+ self.assertEqual(self.ec.get(self.app1, 'appid'), 'Vlc#1')
+ self.assertEqual(self.ec.get(self.app1, 'command'), "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
+ self.assertEqual(self.ec.get(self.app1, 'env'), 'DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority')
+ self.assertEqual(self.ec.get(self.app1, 'version'), '6')
def test_connection(self):
- self.assertEquals(len(self.ec.get_resource(self.node1).connections), 6)
- self.assertEquals(len(self.ec.get_resource(self.iface1).connections), 2)
- self.assertEquals(len(self.ec.get_resource(self.channel).connections), 1)
- self.assertEquals(len(self.ec.get_resource(self.app1).connections), 1)
- self.assertEquals(len(self.ec.get_resource(self.app2).connections), 1)
+ self.assertEqual(len(self.ec.get_resource(self.node1).connections), 6)
+ self.assertEqual(len(self.ec.get_resource(self.iface1).connections), 2)
+ self.assertEqual(len(self.ec.get_resource(self.channel).connections), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app1).connections), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app2).connections), 1)
def test_condition(self):
- self.assertEquals(len(self.ec.get_resource(self.app1).conditions[ResourceAction.STOP]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app2).conditions[ResourceAction.START]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app3).conditions[ResourceAction.START]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app4).conditions[ResourceAction.STOP]), 1)
- self.assertEquals(len(self.ec.get_resource(self.app5).conditions[ResourceAction.START]), 2)
+ self.assertEqual(len(self.ec.get_resource(self.app1).conditions[ResourceAction.STOP]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app2).conditions[ResourceAction.START]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app3).conditions[ResourceAction.START]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app4).conditions[ResourceAction.STOP]), 1)
+ self.assertEqual(len(self.ec.get_resource(self.app5).conditions[ResourceAction.START]), 2)
class OMFVLCNormalCase(unittest.TestCase):
def test_deploy(self):
self.assertGreaterEqual(round(tdiffsec(ec.get_resource(self.app5).start_time, ec.get_resource(self.app3).start_time),0), 2.0)
self.assertGreaterEqual(round(tdiffsec(ec.get_resource(self.app5).start_time, ec.get_resource(self.app1).start_time),0), 25.0)
- self.assertEquals(ec.get_resource(self.node1).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.channel).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.app1).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app2).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app3).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app4).state, ResourceState.STOPPED)
- self.assertEquals(ec.get_resource(self.app5).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.node1).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.iface1).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.channel).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.app1).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app2).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app3).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app4).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.app5).state, ResourceState.STOPPED)
ec.shutdown()
- self.assertEquals(ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app3).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app4).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app5).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app4).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app5).state, ResourceState.RELEASED)
if __name__ == '__main__':
stderr_1 = ec.trace(self.app1, "stderr")
if stdout_1:
- f = open("app1_out.txt", "w")
- f.write(stdout_1)
- f.close()
+ with open("app1_out.txt", "w") as f:
+ f.write(stdout_1)
if stderr_1:
- f = open("app1_err.txt", "w")
- f.write(stderr_1)
- f.close()
+ with open("app1_err.txt", "w") as f:
+ f.write(stderr_1)
- self.assertEquals(ec.get_resource(self.node1).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.channel).state, ResourceState.STARTED)
- self.assertEquals(ec.get_resource(self.app1).state, ResourceState.STOPPED)
+ self.assertEqual(ec.get_resource(self.node1).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.iface1).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.channel).state, ResourceState.STARTED)
+ self.assertEqual(ec.get_resource(self.app1).state, ResourceState.STOPPED)
ec.shutdown()
- self.assertEquals(ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(ec.get_resource(self.app1).state, ResourceState.RELEASED)
t = open("app1_out.txt", "r")
l = t.readlines()
- self.assertEquals(l[0], "PING 10.0.0.17 (10.0.0.17) 56(84) bytes of data.\n")
+ self.assertEqual(l[0], "PING 10.0.0.17 (10.0.0.17) 56(84) bytes of data.\n")
self.assertIn("5 packets transmitted, 5 received, 0% packet loss, time", l[-2])
self.assertIn("rtt min/avg/max/mdev = ", l[-1])
- t.close()
os.remove("app1_out.txt")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_hostname(self):
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_iface(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_channel(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_ip(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_app(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_wo_app_path(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
if __name__ == '__main__':
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_and_iface_nc(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_iface_channel_nc(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1])
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_app_nc(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.STOPPED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_with_all_nc_and_app_critical(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
class OMFVLCWrongCaseWithNonCriticalDep(unittest.TestCase):
self.ec.wait_finished([self.app1, self.app2, self.app3])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.FAILED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
def test_deploy_with_app_nc_and_critical_right(self):
self.node2 = self.ec.register_resource("omf::Node")
self.ec.wait_finished([self.app1, self.app2, self.app3])
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
#self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
def test_deploy_with_many_app_nc_and_critical(self):
self.ec.wait_finished([self.app1, self.app2, self.app3, self.app4, self.app5, self.app6])
#self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.FAILED)
#self.assertEquals(self.ec.get_resource(self.app4).state, ResourceState.STOPPED)
- self.assertEquals(self.ec.get_resource(self.app5).state, ResourceState.FAILED)
+ self.assertEqual(self.ec.get_resource(self.app5).state, ResourceState.FAILED)
#self.assertEquals(self.ec.get_resource(self.app6).state, ResourceState.STOPPED)
self.ec.shutdown()
- self.assertEquals(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app4).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app5).state, ResourceState.RELEASED)
- self.assertEquals(self.ec.get_resource(self.app6).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.node2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.iface2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app4).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app5).state, ResourceState.RELEASED)
+ self.assertEqual(self.ec.get_resource(self.app6).state, ResourceState.RELEASED)
if __name__ == '__main__':
#
# Author: Julien Tribino <julien.tribino@inria.fr>
+from __future__ import print_function
-from nepi.execution.resource import ResourceFactory, clsinit_copy, ResourceManager, ResourceAction, ResourceState
+from nepi.execution.resource import (ResourceFactory, clsinit_copy,
+ ResourceManager, ResourceAction, ResourceState)
from nepi.execution.ec import ExperimentController
from nepi.execution.attribute import Attribute, Flags
@classmethod
def test_hook(cls, old_value, new_value):
new_value *= 10
- print "Change the value of test from "+ str(old_value) +" to : " + str(new_value)
+ print("Change the value of test from "+ str(old_value) +" to : " + str(new_value))
return new_value
self.ec.deploy()
time.sleep(3)
- print "First try to change the STDIN"
+ print("First try to change the STDIN")
self.ec.set(self.app1, 'test', 3)
- self.assertEquals(self.ec.get(self.app1, 'test'), 30)
+ self.assertEqual(self.ec.get(self.app1, 'test'), 30)
time.sleep(3)
- print "Second try to change the STDIN"
+ print("Second try to change the STDIN")
self.ec.set(self.app1, 'test', 101)
- self.assertEquals(self.ec.get(self.app1, 'test'), 1010)
+ self.assertEqual(self.ec.get(self.app1, 'test'), 1010)
self.ec.wait_finished([self.app1])
class WilabtSfaNodeFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(WilabtSfaNode._rtype, "WilabtSfaNode")
- self.assertEquals(len(WilabtSfaNode._attributes), 17)
+ self.assertEqual(WilabtSfaNode._rtype, "WilabtSfaNode")
+ self.assertEqual(len(WilabtSfaNode._attributes), 17)
class WilabtSfaNodeTestCase(unittest.TestCase):
"""
api1 = wnode_rm1.sfaapi
self.assertIsInstance(api1, SFAAPI)
- self.assertEquals(len(api1._reserved), 0)
- self.assertEquals(len(api1._blacklist), 0)
+ self.assertEqual(len(api1._reserved), 0)
+ self.assertEqual(len(api1._blacklist), 0)
node2 = self.ec.register_resource("WilabtSfaNode")
self.ec.set(node2, "host", "zotacM20")
wnode_rm2 = self.ec.get_resource(node2)
api2 = wnode_rm2.sfaapi
- self.assertEquals(api1, api2)
+ self.assertEqual(api1, api2)
wnode_rm1.sfaapi._reserved = set()
wnode_rm1.sfaapi._blacklist = set()
host = wnode_rm.get("host")
self.assertIsNotNone(host)
- self.assertEquals(wnode_rm.sfaapi._reserved, set())
+ self.assertEqual(wnode_rm.sfaapi._reserved, set())
wnode_rm.do_discover()
- self.assertEquals(len(wnode_rm.sfaapi._reserved), 1)
- self.assertEquals(wnode_rm._node_to_provision, 'wilab2.ilabt.iminds.be.zotacE5')
+ self.assertEqual(len(wnode_rm.sfaapi._reserved), 1)
+ self.assertEqual(wnode_rm._node_to_provision, 'wilab2.ilabt.iminds.be.zotacE5')
wnode_rm.sfaapi._reserved = set()
wnode_rm.sfaapi._blacklist = set()
wnode_rm = self.ec.get_resource(node)
- self.assertEquals(wnode_rm.sfaapi._reserved, set())
+ self.assertEqual(wnode_rm.sfaapi._reserved, set())
self.assertIsNone(wnode_rm._node_to_provision)
wnode_rm.do_discover()
if not self.ec.abort and self.ec.state(node) > 2:
cmd = 'echo "IT WORKED"'
((out, err), proc) = wnode_rm.execute(cmd)
- self.assertEquals(out.strip(), "IT WORKED")
+ self.assertEqual(out.strip(), "IT WORKED")
#wnode_rm.sfaapi._reserved = set()
#wnode_rm.sfaapi._blacklist = set()
state1 = self.ec.state(node1)
state2 = self.ec.state(node2)
if not self.ec.abort:
- self.assertEquals(state1, (3, 4))
- self.assertEquals(state2, (3, 4))
+ self.assertEqual(state1, (3, 4))
+ self.assertEqual(state2, (3, 4))
self.ec.shutdown()
class PLNodeFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(PlanetlabNode._rtype, "planetlab::Node")
- self.assertEquals(len(PlanetlabNode._attributes), 32)
+ self.assertEqual(PlanetlabNode._rtype, "planetlab::Node")
+ self.assertEqual(len(PlanetlabNode._attributes), 32)
class PLNodeTestCase(unittest.TestCase):
"""
api1 = plnode_rm1.plapi
self.assertIsInstance(api1, PLCAPI)
- self.assertEquals(len(api1.reserved()), 0)
- self.assertEquals(len(api1.blacklisted()), 0)
+ self.assertEqual(len(api1.reserved()), 0)
+ self.assertEqual(len(api1.blacklisted()), 0)
node2 = create_node(self.ec, self.username, pl_user=self.pl_user,
pl_password=self.pl_password, pl_url=self.pl_url,
plnode_rm2 = self.ec.get_resource(node2)
api2 = plnode_rm2.plapi
- self.assertEquals(api1, api2)
+ self.assertEqual(api1, api2)
# Set hostname attribute in order for the shutdown method not to fail
plnode_rm1._set_hostname_attr(3)
self.assertIsNone(hostname)
plnode_rm.do_discover()
- self.assertEquals(plnode_rm._node_to_provision, 3)
+ self.assertEqual(plnode_rm._node_to_provision, 3)
# Set hostname attribute in order for the shutdown method not to fail
plnode_rm._set_hostname_attr(plnode_rm._node_to_provision)
plnode_rm = self.ec.get_resource(node)
plnode_rm.do_discover()
- self.assertEquals(plnode_rm._node_to_provision, 3)
- self.assertEquals(plnode_rm.plapi.reserved(), set([3]))
+ self.assertEqual(plnode_rm._node_to_provision, 3)
+ self.assertEqual(plnode_rm.plapi.reserved(), set([3]))
@skipIfNotPLCredentials
def test_discover_ip(self):
plnode_rm = self.ec.get_resource(node)
plnode_rm.do_discover()
- self.assertEquals(plnode_rm._node_to_provision, 3)
- self.assertEquals(plnode_rm.plapi.reserved(), set([3]))
- self.assertEquals(plnode_rm.get("hostname"), "nepi2.pl.sophia.inria.fr")
+ self.assertEqual(plnode_rm._node_to_provision, 3)
+ self.assertEqual(plnode_rm.plapi.reserved(), set([3]))
+ self.assertEqual(plnode_rm.get("hostname"), "nepi2.pl.sophia.inria.fr")
@skipIfNotPLCredentials
def test_discover_with_ranges(self):
hostname="nepi1.pl.sophia.inria.fr")
plnode_rm = self.ec.get_resource(node)
- self.assertEquals(plnode_rm.plapi.blacklisted(), set())
+ self.assertEqual(plnode_rm.plapi.blacklisted(), set())
# check that the node is actually malfunctioning
api = plnode_rm.plapi
if not node_id:
with self.assertRaises(RuntimeError):
plnode_rm.do_discover()
- self.assertEquals(plnode_rm.plapi.blacklisted(), set([1]))
+ self.assertEqual(plnode_rm.plapi.blacklisted(), set([1]))
@skipIfNotPLCredentials
def test_provision_node_inslice(self):
architecture="x86_64", operatingSystem="f12")
plnode_rm = self.ec.get_resource(node)
- self.assertEquals(len(plnode_rm.plapi.blacklisted()), 0)
- self.assertEquals(len(plnode_rm.plapi.reserved()), 0)
+ self.assertEqual(len(plnode_rm.plapi.blacklisted()), 0)
+ self.assertEqual(len(plnode_rm.plapi.reserved()), 0)
plnode_rm.do_discover()
plnode_rm.do_provision()
ip = plnode_rm.get("ip")
- self.assertEquals(ip, "138.96.116.32")
- self.assertEquals(len(plnode_rm.plapi.reserved()), 1)
+ self.assertEqual(ip, "138.96.116.32")
+ self.assertEqual(len(plnode_rm.plapi.reserved()), 1)
@skipIfNotPLCredentials
def test_provision_node_not_inslice(self):
city='Paris')
plnode_rm = self.ec.get_resource(node)
- self.assertEquals(plnode_rm.plapi.blacklisted(), set())
- self.assertEquals(plnode_rm.plapi.reserved(), set())
+ self.assertEqual(plnode_rm.plapi.blacklisted(), set())
+ self.assertEqual(plnode_rm.plapi.reserved(), set())
plnode_rm.do_discover()
plnode_rm.do_provision()
class PLSfaNodeFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(PlanetlabSfaNode._rtype, "planetlab::sfa::Node")
- self.assertEquals(len(PlanetlabSfaNode._attributes), 31)
+ self.assertEqual(PlanetlabSfaNode._rtype, "planetlab::sfa::Node")
+ self.assertEqual(len(PlanetlabSfaNode._attributes), 31)
class PLSfaNodeTestCase(unittest.TestCase):
"""
api1 = plnode_rm1.sfaapi
self.assertIsInstance(api1, SFAAPI)
- self.assertEquals(len(api1._reserved), 0)
- self.assertEquals(len(api1._blacklist), 0)
+ self.assertEqual(len(api1._reserved), 0)
+ self.assertEqual(len(api1._blacklist), 0)
node2 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node2, "hostname", "planetlab2.ionio.gr")
plnode_rm2 = self.ec.get_resource(node2)
api2 = plnode_rm2.sfaapi
- self.assertEquals(api1, api2)
+ self.assertEqual(api1, api2)
@skipIfNotSfaCredentials
def test_discover(self):
hostname = plnode_rm.get("hostname")
self.assertIsNotNone(hostname)
- self.assertEquals(len(plnode_rm.sfaapi._reserved), 0)
+ self.assertEqual(len(plnode_rm.sfaapi._reserved), 0)
plnode_rm.do_discover()
- self.assertEquals(len(plnode_rm.sfaapi._reserved), 1)
- self.assertEquals(plnode_rm._node_to_provision, 'ple.mimuw.roti.mimuw.edu.pl')
+ self.assertEqual(len(plnode_rm.sfaapi._reserved), 1)
+ self.assertEqual(plnode_rm._node_to_provision, 'ple.mimuw.roti.mimuw.edu.pl')
plnode_rm.sfaapi._reserved = set()
plnode_rm.sfaapi._blacklist = set()
plnode_rm = self.ec.get_resource(node)
- self.assertEquals(plnode_rm.sfaapi._reserved, set())
+ self.assertEqual(plnode_rm.sfaapi._reserved, set())
self.assertIsNone(plnode_rm._node_to_provision)
plnode_rm.do_discover()
cmd = 'echo "IT WORKED"'
((out, err), proc) = plnode_rm.execute(cmd)
- self.assertEquals(out.strip(), "IT WORKED")
+ self.assertEqual(out.strip(), "IT WORKED")
plnode_rm.sfaapi._reserved = set()
plnode_rm.sfaapi._blacklist = set()
self.assertIsInstance(api1, MANIFOLDAPI)
self.assertIsInstance(api2, MANIFOLDAPI)
- self.assertEquals(api1, api2)
+ self.assertEqual(api1, api2)
class MANIFOLDAPITestCase(unittest.TestCase):
r_info = self.api.get_resource_info(filters=filters)
hostname = r_info[0]['hostname']
- self.assertEquals(hostname, 'planetlab2.tlm.unavarra.es')
+ self.assertEqual(hostname, 'planetlab2.tlm.unavarra.es')
# query with 2 filters
filters['network'] = 'ple'
r_info = self.api.get_resource_info(filters=filters)
hostname = r_info[0]['hostname']
- self.assertEquals(hostname, 'planetlab2.tlm.unavarra.es')
+ self.assertEqual(hostname, 'planetlab2.tlm.unavarra.es')
# query with fields only, without filters
fields = ['latitude','longitude']
r_info = self.api.get_resource_info(fields=fields)
value = r_info[10]
- self.assertEquals(value.keys(), fields)
+ self.assertEqual(list(value.keys()), fields)
# query with 2 filters and 2 fields
r_info = self.api.get_resource_info(filters, fields)
value = r_info[0]
result = {'latitude': '42.7993', 'longitude': '-1.63544'}
- self.assertEquals(value, result)
+ self.assertEqual(value, result)
# query with filters where the AND should be zero resources
filters['network'] = 'omf'
r_info = self.api.get_resource_info(filters, fields)
- self.assertEquals(r_info, [])
+ self.assertEqual(r_info, [])
def test_fail_if_invalid_field(self):
"""
'urn:publicid:IDN+ple:uttple+node+planetlab2.utt.fr',
'urn:publicid:IDN+ple:lilleple+node+node1pl.planet-lab.telecom-lille1.eu']
- self.assertEquals(resources, result)
+ self.assertEqual(resources, result)
def test_update_resources_from_slice(self):
"""
def inc(count):
count[0] += 1
- for x in xrange(10):
+ for x in range(10):
runner.put(inc, count)
runner.destroy()
- self.assertEquals(count[0], 10)
+ self.assertEqual(count[0], 10)
def test_run_interrupt(self):
runner = ParallelRun(maxthreads = 4)
runner.start()
- for x in xrange(100):
+ for x in range(100):
runner.put(sleep)
runner.empty()
runner = ParallelRun(maxthreads = 4)
runner.start()
- for x in xrange(4):
+ for x in range(4):
runner.put(inc, count)
runner.put(error)
runner.destroy()
- self.assertEquals(count[0], 4)
+ self.assertEqual(count[0], 4)
self.assertRaises(RuntimeError, runner.sync)
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
ec2.wait_finished(apps)
ec2.shutdown()
- self.assertEquals(len(ec.resources), len(ec2.resources))
+ self.assertEqual(len(ec.resources), len(ec2.resources))
shutil.rmtree(dirpath)
try:
os.stat(d + "/" + name)
return d + "/" + name
- except OSError, e:
+ except OSError as e:
if e.errno != os.errno.ENOENT:
raise
return None
def add_key_to_agent(filename):
ssh_add = find_bin_or_die("ssh-add")
args = [ssh_add, filename]
- null = file("/dev/null", "w")
- assert subprocess.Popen(args, stderr = null).wait() == 0
- null.close()
+ with open("/dev/null", "w") as null:
+ assert subprocess.Popen(args, stderr = null).wait() == 0
def get_free_port():
s = socket.socket()
"""
def gen_sshd_config(filename, port, server_key, auth_keys):
- conf = open(filename, "w")
- text = _SSH_CONF % (port, server_key, auth_keys)
- conf.write(text)
- conf.close()
+ with open(filename, "w") as conf:
+ text = _SSH_CONF % (port, server_key, auth_keys)
+ conf.write(text)
return filename
def gen_auth_keys(pubkey, output, environ):
#opts = ['from="127.0.0.1/32"'] # fails in stupid yans setup
opts = []
- for k, v in environ.items():
+ for k, v in list(environ.items()):
opts.append('environment="%s=%s"' % (k, v))
- lines = file(pubkey).readlines()
+ with open(pubkey) as f:
+ lines = f.readlines()
pubkey = lines[0].split()[0:2]
- out = file(output, "w")
- out.write("%s %s %s\n" % (",".join(opts), pubkey[0], pubkey[1]))
- out.close()
+ with open(output, "w") as out:
+ out.write("%s %s %s\n" % (",".join(opts), pubkey[0], pubkey[1]))
return output
def start_ssh_agent():
# No need to gather the pid, ssh-agent knows how to kill itself; after we
# had set up the environment
ssh_agent = find_bin_or_die("ssh-agent")
- null = file("/dev/null", "w")
- proc = subprocess.Popen([ssh_agent, "-k"], stdout = null)
- null.close()
+ with open("/dev/null", "w") as null:
+ proc = subprocess.Popen([ssh_agent, "-k"], stdout = null)
assert proc.wait() == 0
for k in data:
del os.environ[k]
(outremote, errrmote), premote = rexec(command, host, user,
port = env.port, agent = True)
- self.assertEquals(outlocal, outremote)
+ self.assertEqual(outlocal, outremote)
def test_rcopy_list(self):
env = test_environment()
files.extend(names)
os.path.walk(destdir, recls, files)
- origfiles = map(lambda s: os.path.basename(s), [dirpath, f.name, f1.name])
+ origfiles = [os.path.basename(s) for s in [dirpath, f.name, f1.name]]
- self.assertEquals(sorted(origfiles), sorted(files))
+ self.assertEqual(sorted(origfiles), sorted(files))
os.remove(f1.name)
shutil.rmtree(dirpath)
files.extend(names)
os.path.walk(destdir, recls, files)
- origfiles = map(lambda s: os.path.basename(s), [dirpath, f.name, f1.name])
+ origfiles = [os.path.basename(s) for s in [dirpath, f.name, f1.name]]
- self.assertEquals(sorted(origfiles), sorted(files))
+ self.assertEqual(sorted(origfiles), sorted(files))
def test_rproc_manage(self):
env = test_environment()
port = env.port,
agent = True)
- self.assertEquals(status, ProcStatus.RUNNING)
+ self.assertEqual(status, ProcStatus.RUNNING)
rkill(pid, ppid,
host = host,
port = env.port,
agent = True)
- self.assertEquals(status, ProcStatus.FINISHED)
+ self.assertEqual(status, ProcStatus.FINISHED)
if __name__ == '__main__':
seconds1 = _get_total_seconds(date)
seconds2 = date.total_seconds()
- self.assertEquals(seconds1, seconds2)
+ self.assertEqual(seconds1, seconds2)
if __name__ == '__main__':