BUILDDIR = $(CURDIR)/build
DISTDIR = $(CURDIR)/dist
-# stupid distutils, it's broken in so many ways
SUBBUILDDIR = $(shell python -c 'import distutils.util, sys; \
print "lib.%s-%s" % (distutils.util.get_platform(), \
sys.version[0:3])')
test-one: all
echo $(file) $(case)
- PYTHONPATH="$(PYPATH)" python $(file) $(case)
+ PYTHONPATH="$(PYPATH)" $(file) $(case)
coverage: all
rm -f .coverage
username = # SSH user account on host
ssh_key = # Path to SSH public key file to access host
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", hostname)
ec.set(node, "username", username)
ec.set(node, "identity", ssh_key)
-app = ec.register_resource("LinuxApplication")
+app = ec.register_resource("linux::Application")
ec.set(app, "command", "ping -c3 nepi.inria.fr")
ec.register_connection(app, node)
Mandatory dependencies:
\begin{itemize}
\item Python 2.6+
- \item Mercurial
+ \item Mercurial
+ \item python-ipaddr
+ \item python-networkx
+ \item python-pygraphviz
+ \item python-matplotlib
\end{itemize}
Optional dependencies:
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
- $ sudo aptitude install -y python mercurial
+ $ sudo apt-get -y install python mercurial python-ipaddr python-networkx python-pygraphviz python-matplotlib
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
- $ sudo yum install -y python mercurial
+ $ sudo yum -y install python mercurial python-ipaddr python-networkx graphviz-python python-matplotlib
\end{verbatim}
\endgroup
\subsection{Install dependencies on Mac}
First install homebrew (\url{http://mxcl.github.io/homebrew/}),
-then install Python.
+then you can install Python and the rest of the dependencies as follows:
\begingroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
- $ brew install python
+ $ brew install python
+ $ sudo port install mercurial
+ $ sudo easy_install pip
+ $ sudo pip install ipaddr
+ $ sudo pip install networkx
+ $ sudo pip install pygraphviz
+ $ sudo pip install matplotlib
+\end{verbatim}
+
+\endgroup
+
+To use Python you will need to set the PATH environmental variable as:
+
+\begingroup
+ \fontsize{10pt}{12pt}\selectfont
+
+\begin{verbatim}
+ $ export PATH=$PATH:/usr/local/share/python
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
-$ hg clone http://nepi.inria.fr/code/nepi -r nepi-3.0-release
+$ hg clone http://nepi.inria.fr/code/nepi -r nepi-3-dev
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
- $ PYTHONPATH=$PYTHONPATH:<path-to-nepi>/src python experiment.py
+ $ export PYTHONPATH=$PYTHONPATH:<path-to-nepi>/src
\end{verbatim}
\endgroup
Writing a simple NEPI expeiment script is easy.
Take a look at the example in the FAQ section \ref{faq:ping_example}.
Once you have written down the script, you can run it using
-Python. Note that since NEPI is not yet installed in your system,
+Python. If NEPI is not installed in your system,
you will need to export the path to NEPI's source code to
the PYTHONPATH environment variable, so that Python can find
NEPI's libraries.
\begin{verbatim}
$ export PYTHONPATH=<path-to-nepi>/src:$PYTHONPATH
- $ python first-experiment.py
+ $ cd <path-to-nepi>
+ $ python examples/linux/ping.py -a localhost
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
-
-$ sudo apt-get install ipython
-
+ $ sudo apt-get install ipython
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
-
-$ sudo yum install ipython
-
+ $ sudo yum install ipython
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
-
-$ pip install ipython
-
+ $ pip install ipython
\end{verbatim}
\endgroup
\fontsize{10pt}{12pt}\selectfont
\begin{verbatim}
-
-$ export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python:/usr/local/share/python/ipython
-
+ $ export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python:/usr/local/share/python/ipython
\end{verbatim}
\endgroup
%cpaste
def add_node(ec, hostname, username, ssh_key):
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", hostname)
ec.set(node, "username", username)
ec.set(node, "identity", ssh_key)
return node
def add_app(ec, command, node):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", command)
ec.register_connection(app, node)
return app
name. pluser and plpassword are the user and password used for authenticate yourself in the Planetlab web page (www.planet-lab.eu). For example, when registering a Planetlab node for your experiment, the
experiment description will look a lot like this:
\begin{lstlisting}[language=Python]
-node = ec.register_resource("PlanetlabNode")
+node = ec.register_resource("planetlab::Node")
ec.set(node, "username", "institute_project")
ec.set(node, "pluser", "​​john.doe@institute.edu")
ec.set(node, "plpassword", "guessit")
\end{lstlisting}
Now we know how to add a filter to the node description:
\begin{lstlisting}[language=Python]
- node = ec.register_resource("PlanetlabNode??")
+ node = ec.register_resource("planetlab::Node??")
ec.set(node, "username", "institute_project")
ec.set(node, "pluser", "​​jhon.doe@institute.edu")
ec.set(node, "plpassword", "guessit")
In case of more filters, an AND between the filters will be applied:
\begin{lstlisting}[language=Python]
- node = ec.register_resource("PlanetlabNode??")
+ node = ec.register_resource("planetlab::Node??")
ec.set(node, "username", "institute_project")
ec.set(node, "pluser", "​​jhon.doe@institute.edu")
ec.set(node, "plpassword", "guessit")
In this case, our experiment description will look like this:
\begin{lstlisting}[language=Python]
-node1 = ec.register_resource("PlanetlabNode")
+node1 = ec.register_resource("planetlab::Node")
ec.set(node1, "username", "institute_project")
ec.set(node1, "pluser", "​​john.doe@institute.edu")
ec.set(node1, "plpassword", "guessit")
ec.set(node1, "hostname", "planetlab2.utt.fr")
## planetlab2.utt.fr is the specific node we want to use
-node2 = ec.register_resource("PlanetlabNode")
+node2 = ec.register_resource("planetlab::Node")
ec.set(node2, "username", "institute_project")
ec.set(node2, "pluser", "​​john.doe@institute.edu")
ec.set(node2, "plpassword", "guessit")
PlanetLab nodes may fail for different reasons, ssh authentication failure, file system corrupted, nodes unreachable, between others. Moreover, the mal functioning nodes can vary from one experiment run to the next one. In NEPI there is the ability to register these mal functioning nodes in order run the experiment in a more efficient way. Also, this information can be use to evaluate the performance of the experiment and the nodes themselves.
-The PlanetlabNode resource, is instantiated for each Planetlab node defined in the experiment. The node discovery and provisioning occurs in parallel for every node defined, so a list of the nodes failures is needed while deploying, in order to avoid to repeat the provision of mal functioning nodes. This list of blacklisted nodes during the experiment, can be saved and maintain for following run of the same experiment or others experiments. This list it is called blacklist. Moreover, the nodes in the blacklist in the moment the experiment is started, can be use to directly discard from the node discover and provision the unwanted nodes.
+The planetlab::Node resource, is instantiated for each Planetlab node defined in the experiment. The node discovery and provisioning occurs in parallel for every node defined, so a list of the nodes failures is needed while deploying, in order to avoid to repeat the provision of mal functioning nodes. This list of blacklisted nodes during the experiment, can be saved and maintain for following run of the same experiment or others experiments. This list it is called blacklist. Moreover, the nodes in the blacklist in the moment the experiment is started, can be use to directly discard from the node discover and provision the unwanted nodes.
-There is an attribute available for this matter, is called 'persist\_blacklist' and is a global attribute, meaning that if set, is set for every resource of type PlanetlabNode.
+There is an attribute available for this matter, is called 'persist\_blacklist' and is a global attribute, meaning that if set, is set for every resource of type planetlab::Node.
The blacklist file is stored in ~/.nepi/plblacklist.txt.
Example on how to use the attribute:
Two Planetlab nodes that read from the blacklist at the beginning of the experiment, and write new blacklisted nodes (if any) at the end.
\begin{lstlisting}[language=Python]
-node1 = ec.register_resource("PlanetlabNode")
+node1 = ec.register_resource("planetlab::Node")
ec.set(node1, "username", username)
ec.set(node1, "pluser", pl_user)
ec.set(node1, "plpassword", pl_password)
ec.set(node1, "cleanHome", True)
ec.set(node1, "cleanProcesses", True)
-node2 = ec.register_resource("PlanetlabNode")
+node2 = ec.register_resource("planetlab::Node")
ec.set(node2, "username", username)
ec.set(node2, "pluser", pl_user)
ec.set(node2, "plpassword", pl_password)
ec.set(node2, "cleanHome", True)
ec.set(node2, "cleanProcesses", True)
-ec.set_global('PlanetlabNode', 'persist_blacklist', True)
+ec.set_global("planetlab::Node", 'persist_blacklist', True)
\end{lstlisting}
The attribute can be retrieved with the method get\_global :
\begin{lstlisting}[language=Python]
-ec.get_global('PlanetlabNode', 'persist_blacklist').
+ec.get_global("planetlab::Node", 'persist_blacklist').
\end{lstlisting}
\subsection{SFA Support}
sfauser = os.environ.get('SFA_USER') --- for example 'ple.inria.lucia_guevgeozian_odizzio'
sfaPrivateKey = os.environ.get('SFA_PK') --- for example '/home/.sfi/lucia_guevgeozian_odizzio.pkey'
-node1 = ec.register_resource("PlanetlabSfaNode")
+node1 = ec.register_resource("planetlab::sfa::Node")
ec.set(node1, "hostname", 'planetlab1.cs.vu.nl')
ec.set(node1, "username", username)
ec.set(node1, "sfauser", sfauser)
\end{lstlisting}
\subsubsection{SFA with iMinds Testbed (w-iLab.t)}
-The control and management software running in w-iLab.t is OMF 6, but its resources can be discover and provisioned using SFA, the experiment description for the WilabtSfaNode in NEPI is similar to the one in PlanetlabNode. Below is an example :
+The control and management software running in w-iLab.t is OMF 6, but its resources can be discover and provisioned using SFA, the experiment description for the WilabtSfaNode in NEPI is similar to the one in Planetlab::Node. Below is an example :
\begin{lstlisting}[language=Python]
from nepi.execution.ec import ExperimentController
import os
sfaPrivateKey = os.environ.get('SFA_PK')
# nodes
-node1 = ec.register_resource("WilabtSfaNode")
+node1 = ec.register_resource("wilabt::sfa::Node")
ec.set(node1, "hostname", 'zotacM20')
ec.set(node1, "slicename", slicename)
ec.set(node1, "sfauser", sfauser)
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# NEPI, a framework to manage network experiments
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.runner import ExperimentRunner
+from nepi.util.netgraph import NetGraph, TopologyType
+import nepi.data.processing.ccn.parser as ccn_parser
+
+import networkx
+import socket
+import os
+
+content_name = "ccnx:/test/bunny.ts"
+
+STOP_TIME = "5000s"
+
+repofile = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "repoFile1.0.8.2")
+
+def get_simulator(ec):
+ simulator = ec.filter_resources("linux::ns3::Simulation")
+
+ if not simulator:
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", "localhost")
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.set(simu, "enableDump", True)
+ ec.set(simu, "StopTime", STOP_TIME)
+ ec.register_connection(simu, node)
+ return simu
+
+ return simulator[0]
+
+def add_collector(ec, trace_name, subdir, newname = None):
+ collector = ec.register_resource("Collector")
+ ec.set(collector, "traceName", trace_name)
+ ec.set(collector, "subDir", subdir)
+ if newname:
+ ec.set(collector, "rename", newname)
+
+ return collector
+
+def add_dce_host(ec, nid):
+ simu = get_simulator(ec)
+
+ host = ec.register_resource("ns3::Node")
+ ec.set(host, "enableStack", True)
+ ec.register_connection(host, simu)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "host", host)
+
+def add_dce_ccnd(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add dce ccnd to the dce node
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")
+ ec.set (ccnd, "stackSize", 1<<20)
+ ec.set (ccnd, "debug", 7)
+ ec.set (ccnd, "capacity", 50000)
+ ec.set (ccnd, "StartTime", "1s")
+ ec.register_connection(ccnd, host)
+
+ # Collector to retrieve ccnd log
+ collector = add_collector(ec, "stderr", nid, "log")
+ ec.register_connection(collector, ccnd)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_dce_ccnr(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a CCN content repository to the dce node
+ ccnr = ec.register_resource("linux::ns3::dce::CCNR")
+ ec.set (ccnr, "repoFile1", repofile)
+ ec.set (ccnr, "stackSize", 1<<20)
+ ec.set (ccnr, "StartTime", "2s")
+ ec.register_connection(ccnr, host)
+
+def add_dce_ccncat(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a ccncat application to the dce host
+ ccncat = ec.register_resource("linux::ns3::dce::CCNCat")
+ ec.set (ccncat, "contentName", content_name)
+ ec.set (ccncat, "stackSize", 1<<20)
+ ec.set (ccncat, "StartTime", "8s")
+ ec.register_connection(ccncat, host)
+
+def add_dce_fib_entry(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ host1 = ec.netgraph.node_annotation(nid1, "host")
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)
+ ip2 = net[nid2]
+
+ # Add FIB entry between peer hosts
+ ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")
+ ec.set (ccndc, "protocol", "udp")
+ ec.set (ccndc, "uri", "ccnx:/")
+ ec.set (ccndc, "host", ip2)
+ ec.set (ccndc, "stackSize", 1<<20)
+ ec.set (ccndc, "StartTime", "4s")
+ ec.register_connection(ccndc, host1)
+
+def add_dce_net_iface(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ host = ec.netgraph.node_annotation(nid1, "host")
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)
+ ip1 = net[nid1]
+ prefix = net["prefix"]
+
+ dev = ec.register_resource("ns3::PointToPointNetDevice")
+ ec.set(dev,"DataRate", "5Mbps")
+ ec.set(dev, "ip", ip1)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(host, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+def avg_interests(ec, run):
+ ## Process logs
+ logs_dir = ec.run_dir
+
+ (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count) = ccn_parser.process_content_history_logs(
+ logs_dir,
+ ec.netgraph.topology)
+
+ shortest_path = networkx.shortest_path(graph,
+ source = ec.netgraph.sources()[0],
+ target = ec.netgraph.targets()[0])
+
+ ### Compute metric: Avg number of Interests seen per content name
+ ### normalized by the number of nodes in the shortest path
+ content_name_count = len(content_names.values())
+ nodes_in_shortest_path = len(shortest_path) - 1
+ metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
+
+ # TODO: DUMP RESULTS TO FILE
+ # TODO: DUMP GRAPH DELAYS!
+ f = open("/tmp/metric", "a+")
+ f.write("%.2f\n" % metric)
+ f.close()
+ print " METRIC", metric
+
+ return metric
+
+def add_dce_edge(ec, nid1, nid2):
+ ### Add network interfaces to hosts
+ p2p1 = add_dce_net_iface(ec, nid1, nid2)
+ p2p2 = add_dce_net_iface(ec, nid2, nid1)
+
+ # Create point to point link between interfaces
+ chan = ec.register_resource("ns3::PointToPointChannel")
+ ec.set(chan, "Delay", "0ms")
+
+ ec.register_connection(chan, p2p1)
+ ec.register_connection(chan, p2p2)
+
+ #### Add routing between CCN nodes
+ add_dce_fib_entry(ec, nid1, nid2)
+ add_dce_fib_entry(ec, nid2, nid1)
+
+def add_dce_node(ec, nid):
+ ### Add CCN nodes (ec.netgraph holds the topology graph)
+ add_dce_host(ec, nid)
+ add_dce_ccnd(ec, nid)
+
+ if nid == ec.netgraph.targets()[0]:
+ add_dce_ccnr(ec, nid)
+
+ if nid == ec.netgraph.sources()[0]:
+ add_dce_ccncat(ec, nid)
+
+if __name__ == '__main__':
+
+ #### Create NEPI Experiment Description with LINEAR topology
+ ec = ExperimentController("dce_ccn",
+ topo_type = TopologyType.LINEAR,
+ node_count = 2,
+ assign_st = True,
+ assign_ips = True,
+ add_node_callback = add_dce_node,
+ add_edge_callback = add_dce_edge)
+
+ print "Results stored at", ec.exp_dir
+
+ #### Retrieve the consumer to wait for ot to finish
+ ccncat = ec.filter_resources("linux::ns3::dce::CCNCat")
+
+ #### Run experiment until metric convergences
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec, min_runs = 1, max_runs = 1,
+ compute_metric_callback = avg_interests,
+ wait_guids = ccncat,
+ wait_time = 0)
+
--- /dev/null
+#!/usr/bin/env python\r
+\r
+###############################################################################\r
+#\r
+# NEPI, a framework to manage network experiments\r
+#\r
+# This program is free software: you can redistribute it and/or modify\r
+# it under the terms of the GNU General Public License as published by\r
+# the Free Software Foundation, either version 3 of the License, or\r
+# (at your option) any later version.\r
+#\r
+# This program is distributed in the hope that it will be useful,\r
+# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+# GNU General Public License for more details.\r
+#\r
+# You should have received a copy of the GNU General Public License\r
+# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
+#\r
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>\r
+#\r
+###############################################################################\r
+from nepi.execution.ec import ExperimentController \r
+from nepi.execution.runner import ExperimentRunner\r
+from nepi.util.netgraph import TopologyType\r
+import nepi.data.processing.ccn.parser as ccn_parser\r
+\r
+import networkx\r
+import socket\r
+import os\r
+import numpy\r
+from scipy import stats\r
+from matplotlib import pyplot\r
+import math\r
+import random\r
+\r
+def avg_interest_rtt(ec, run):\r
+ logs_dir = ec.run_dir\r
+ \r
+ # Parse downloaded CCND logs\r
+ (graph,\r
+ content_names,\r
+ interest_expiry_count,\r
+ interest_dupnonce_count,\r
+ interest_count,\r
+ content_count) = ccn_parser.process_content_history_logs(\r
+ logs_dir, ec.netgraph.topology)\r
+\r
+ # statistics on RTT\r
+ rtts = [content_names[content_name]["rtt"] \\r
+ for content_name in content_names.keys()]\r
+\r
+ # sample mean and standard deviation\r
+ sample = numpy.array(rtts)\r
+ n, min_max, mean, var, skew, kurt = stats.describe(sample)\r
+ std = math.sqrt(var)\r
+ ci = stats.t.interval(0.95, n-1, loc = mean, \r
+ scale = std/math.sqrt(n))\r
+\r
+ global metrics\r
+ metrics.append((mean, ci[0], ci[1]))\r
+ \r
+ return mean\r
+\r
+def normal_law(ec, run, sample):\r
+ x = numpy.array(sample)\r
+ n = len(sample)\r
+ std = x.std()\r
+ se = std / math.sqrt(n)\r
+ m = x.mean()\r
+ se95 = se * 2\r
+ \r
+ return m * 0.05 >= se95\r
+\r
+def post_process(ec, runs):\r
+ global metrics\r
+ \r
+ # plot convergence graph\r
+ y = numpy.array([float(m[0]) for m in metrics])\r
+ low = numpy.array([float(m[1]) for m in metrics])\r
+ high = numpy.array([float(m[2]) for m in metrics])\r
+ error = [y - low, high - y]\r
+ x = range(1,runs + 1)\r
+\r
+ # plot average RTT and confidence interval for each iteration\r
+ pyplot.errorbar(x, y, yerr = error, fmt='o')\r
+ pyplot.plot(x, y, 'r-')\r
+ pyplot.xlim([0.5, runs + 0.5])\r
+ pyplot.xticks(numpy.arange(1, len(y)+1, 1))\r
+ pyplot.xlabel('Iteration')\r
+ pyplot.ylabel('Average RTT')\r
+ pyplot.grid()\r
+ pyplot.savefig("plot.png")\r
+ pyplot.show()\r
+\r
+content_name = "ccnx:/test/bunny.ts"\r
+\r
+STOP_TIME = "5000s"\r
+\r
+repofile = os.path.join(\r
+ os.path.dirname(os.path.realpath(__file__)), \r
+ "repoFile1.0.8.2")\r
+\r
+def get_simulator(ec):\r
+ simulator = ec.filter_resources("linux::ns3::Simulation")\r
+\r
+ if not simulator:\r
+ node = ec.register_resource("linux::Node")\r
+ ec.set(node, "hostname", "localhost")\r
+\r
+ simu = ec.register_resource("linux::ns3::Simulation")\r
+ ec.register_connection(simu, node)\r
+ return simu\r
+\r
+ return simulator[0]\r
+\r
+def add_collector(ec, trace_name, subdir, newname = None):\r
+ collector = ec.register_resource("Collector")\r
+ ec.set(collector, "traceName", trace_name)\r
+ ec.set(collector, "subDir", subdir)\r
+ if newname:\r
+ ec.set(collector, "rename", newname)\r
+\r
+ return collector\r
+\r
+def add_dce_host(ec, nid):\r
+ simu = get_simulator(ec)\r
+ \r
+ host = ec.register_resource("ns3::Node")\r
+ ec.set(host, "enableStack", True)\r
+ ec.register_connection(host, simu)\r
+\r
+ # Annotate the graph\r
+ ec.netgraph.annotate_node(nid, "host", host)\r
+ \r
+def add_dce_ccnd(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add dce ccnd to the dce node\r
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")\r
+ ec.set (ccnd, "stackSize", 1<<20)\r
+ ec.set (ccnd, "debug", 7)\r
+ ec.set (ccnd, "capacity", 50000)\r
+ ec.set (ccnd, "StartTime", "1s")\r
+ ec.set (ccnd, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccnd, host)\r
+\r
+ # Collector to retrieve ccnd log\r
+ collector = add_collector(ec, "stderr", str(nid), "log")\r
+ ec.register_connection(collector, ccnd)\r
+\r
+ # Annotate the graph\r
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)\r
+\r
+def add_dce_ccnr(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add a CCN content repository to the dce node\r
+ ccnr = ec.register_resource("linux::ns3::dce::CCNR")\r
+ ec.set (ccnr, "repoFile1", repofile) \r
+ ec.set (ccnr, "stackSize", 1<<20)\r
+ ec.set (ccnr, "StartTime", "2s")\r
+ ec.set (ccnr, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccnr, host)\r
+\r
+def add_dce_ccncat(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add a ccncat application to the dce host\r
+ ccncat = ec.register_resource("linux::ns3::dce::CCNCat")\r
+ ec.set (ccncat, "contentName", content_name)\r
+ ec.set (ccncat, "stackSize", 1<<20)\r
+ ec.set (ccncat, "StartTime", "8s")\r
+ ec.set (ccncat, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccncat, host)\r
+\r
+def add_dce_fib_entry(ec, nid1, nid2):\r
+ # Retrieve annotations from netgraph\r
+ host1 = ec.netgraph.node_annotation(nid1, "host")\r
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
+ ip2 = net[nid2]\r
+\r
+ # Add FIB entry between peer hosts\r
+ ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")\r
+ ec.set (ccndc, "protocol", "udp") \r
+ ec.set (ccndc, "uri", "ccnx:/") \r
+ ec.set (ccndc, "host", ip2)\r
+ ec.set (ccndc, "stackSize", 1<<20)\r
+ ec.set (ccndc, "StartTime", "2s")\r
+ ec.set (ccndc, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccndc, host1)\r
+\r
+def add_dce_net_iface(ec, nid1, nid2):\r
+ # Retrieve annotations from netgraph\r
+ host = ec.netgraph.node_annotation(nid1, "host")\r
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
+ ip1 = net[nid1]\r
+ prefix = net["prefix"]\r
+\r
+ dev = ec.register_resource("ns3::PointToPointNetDevice")\r
+ ec.set(dev,"DataRate", "5Mbps")\r
+ ec.set(dev, "ip", ip1)\r
+ ec.set(dev, "prefix", prefix)\r
+ ec.register_connection(host, dev)\r
+\r
+ queue = ec.register_resource("ns3::DropTailQueue")\r
+ ec.register_connection(dev, queue)\r
+\r
+ return dev\r
+\r
+def add_edge(ec, nid1, nid2):\r
+ ### Add network interfaces to hosts\r
+ p2p1 = add_dce_net_iface(ec, nid1, nid2)\r
+ p2p2 = add_dce_net_iface(ec, nid2, nid1)\r
+\r
+ # Create point to point link between interfaces\r
+ chan = ec.register_resource("ns3::PointToPointChannel")\r
+ ec.set(chan, "Delay", "0ms")\r
+\r
+ ec.register_connection(chan, p2p1)\r
+ ec.register_connection(chan, p2p2)\r
+\r
+ #### Add routing between CCN nodes\r
+ add_dce_fib_entry(ec, nid1, nid2)\r
+ add_dce_fib_entry(ec, nid2, nid1)\r
+\r
+def add_node(ec, nid):\r
+ ### Add CCN nodes (ec.netgraph holds the topology graph)\r
+ add_dce_host(ec, nid)\r
+ add_dce_ccnd(ec, nid)\r
+ \r
+ if nid == ec.netgraph.targets()[0]:\r
+ add_dce_ccnr(ec, nid)\r
+\r
+ if nid == ec.netgraph.sources()[0]:\r
+ add_dce_ccncat(ec, nid)\r
+\r
+def wait_guids(ec):\r
+ return ec.filter_resources("linux::ns3::dce::CCNCat")\r
+\r
+if __name__ == '__main__':\r
+\r
+ metrics = []\r
+\r
+ # topology translation to NEPI model\r
+ ec = ExperimentController("dce_4n_linear",\r
+ topo_type = TopologyType.LINEAR, \r
+ node_count = 4,\r
+ assign_st = True,\r
+ assign_ips = True,\r
+ add_node_callback = add_node,\r
+ add_edge_callback = add_edge)\r
+\r
+ #### Run experiment until metric convergence\r
+ rnr = ExperimentRunner()\r
+ runs = rnr.run(ec,\r
+ min_runs = 10,\r
+ max_runs = 100, \r
+ compute_metric_callback = avg_interest_rtt,\r
+ evaluate_convergence_callback = normal_law,\r
+ wait_guids = wait_guids(ec))\r
+ \r
+ ### post processing\r
+ post_process(ec, runs)\r
+\r
+\r
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# NEPI, a framework to manage network experiments
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.runner import ExperimentRunner
+from nepi.util.netgraph import NetGraph, TopologyType
+import nepi.data.processing.ccn.parser as ccn_parser
+
+import networkx
+import socket
+import os
+
+PL_NODES = dict({
+ 0: "iraplab1.iralab.uni-karlsruhe.de",
+ 1: "planetvs2.informatik.uni-stuttgart.de",
+ 2: "dfn-ple1.x-win.dfn.de",
+ 3: "planetlab2.extern.kuleuven.be",
+ 4: "mars.planetlab.haw-hamburg.de",
+ 5: "planetlab-node3.it-sudparis.eu",
+ 6: "node2pl.planet-lab.telecom-lille1.eu",
+ 7: "planetlab1.informatik.uni-wuerzburg.de",
+ 8: "planet1.l3s.uni-hannover.de",
+ 9: "planetlab1.wiwi.hu-berlin.de",
+ 10: "pl2.uni-rostock.de",
+ 11: "planetlab1.u-strasbg.fr",
+ 12: "peeramidion.irisa.fr",
+ 13: "planetlab2.unineuchatel.ch",
+ })
+
+pl_slice = os.environ.get("PL_SLICE")
+pl_user = os.environ.get("PL_USER")
+pl_password = os.environ.get("PL_PASS")
+pl_ssh_key = os.environ.get("PL_SSHKEY")
+
+content_name = "ccnx:/test/bunny.ts"
+
+pipeline = 4 # Default value for ccncat
+
+operating_system = "f14"
+
+country = "germany"
+
+repofile = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "repoFile1.0.8.2")
+
+def add_collector(ec, trace_name, subdir, newname = None):
+ collector = ec.register_resource("Collector")
+ ec.set(collector, "traceName", trace_name)
+ ec.set(collector, "subDir", subdir)
+ if newname:
+ ec.set(collector, "rename", newname)
+
+ return collector
+
+def add_pl_host(ec, nid):
+ hostname = PL_NODES[nid]
+
+ # Add a planetlab host to the experiment description
+ host = ec.register_resource("planetlab::Node")
+ ec.set(host, "hostname", hostname)
+ ec.set(host, "username", pl_slice)
+ ec.set(host, "identity", pl_ssh_key)
+ #ec.set(host, "pluser", pl_user)
+ #ec.set(host, "plpassword", pl_password)
+ #ec.set(host, "country", country)
+ #ec.set(host, "operatingSystem", operating_system)
+ ec.set(host, "cleanExperiment", True)
+ ec.set(host, "cleanProcesses", True)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "hostname", hostname)
+ ec.netgraph.annotate_node(nid, "host", host)
+
+ # Annotate the graph node with an ip address
+ ip = socket.gethostbyname(hostname)
+ ec.netgraph.annotate_node_ip(nid, ip)
+
+def add_pl_ccnd(ec, nid):
+ # Retrieve annotation from netgraph
+ host = ec.netgraph.node_annotation(nid, "host")
+
+ # Add a CCN daemon to the planetlab node
+ ccnd = ec.register_resource("linux::CCND")
+ ec.set(ccnd, "debug", 7)
+ ec.register_connection(ccnd, host)
+
+ # Collector to retrieve ccnd log
+ collector = add_collector(ec, "stderr", nid, "log")
+ ec.register_connection(collector, ccnd)
+
+ # Annotate the graph
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_pl_ccnr(ec, nid):
+ # Retrieve annotation from netgraph
+ ccnd = ec.netgraph.node_annotation(nid, "ccnd")
+
+ # Add a CCN content repository to the planetlab node
+ ccnr = ec.register_resource("linux::CCNR")
+
+ ec.set(ccnr, "repoFile1", repofile)
+ ec.register_connection(ccnr, ccnd)
+
+def add_pl_ccncat(ec, nid):
+ # Retrieve annotation from netgraph
+ ccnd = ec.netgraph.node_annotation(nid, "ccnd")
+
+ # Add a CCN cat application to the planetlab node
+ ccncat = ec.register_resource("linux::CCNCat")
+ ec.set(ccncat, "pipeline", pipeline)
+ ec.set(ccncat, "contentName", content_name)
+ ec.register_connection(ccncat, ccnd)
+
+def add_pl_fib_entry(ec, nid1, nid2):
+ # Retrieve annotations from netgraph
+ ccnd1 = ec.netgraph.node_annotation(nid1, "ccnd")
+ hostname2 = ec.netgraph.node_annotation(nid2, "hostname")
+
+ # Add a FIB entry between one planetlab node and its peer
+ entry = ec.register_resource("linux::FIBEntry")
+ ec.set(entry, "host", hostname2)
+ ec.register_connection(entry, ccnd1)
+
+ # Collector to retrieve peering ping output (to measure neighbors delay)
+ ec.enable_trace(entry, "ping")
+ collector = add_collector(ec, "ping", nid1)
+ ec.register_connection(collector, entry)
+
+ return entry
+
+def avg_interests(ec, run):
+ ## Process logs
+ logs_dir = ec.run_dir
+
+ (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count) = ccn_parser.process_content_history_logs(
+ logs_dir,
+ ec.netgraph.topology,
+ parse_ping_logs = True)
+
+ shortest_path = networkx.shortest_path(graph,
+ source = ec.netgraph.sources()[0],
+ target = ec.netgraph.targets()[0])
+
+ ### Compute metric: Avg number of Interests seen per content name
+ ### normalized by the number of nodes in the shortest path
+ content_name_count = len(content_names.values())
+ nodes_in_shortest_path = len(shortest_path) - 1
+ metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
+
+ # TODO: DUMP RESULTS TO FILE
+ # TODO: DUMP GRAPH DELAYS!
+ f = open("/tmp/metric", "a+")
+ f.write("%.2f\n" % metric)
+ f.close()
+ print " METRIC", metric
+
+ return metric
+
+def add_pl_edge(ec, nid1, nid2):
+ #### Add connections between CCN nodes
+ add_pl_fib_entry(ec, nid1, nid2)
+ add_pl_fib_entry(ec, nid2, nid1)
+
+def add_pl_node(ec, nid):
+ ### Add CCN nodes (ec.netgraph holds the topology graph)
+ add_pl_host(ec, nid)
+ add_pl_ccnd(ec, nid)
+
+ if nid == ec.netgraph.targets()[0]:
+ add_pl_ccnr(ec, nid)
+
+ if nid == ec.netgraph.sources()[0]:
+ add_pl_ccncat(ec, nid)
+
+if __name__ == '__main__':
+
+ #### Create NEPI Experiment Description with LINEAR topology
+ ec = ExperimentController("pl_ccn",
+ topo_type = TopologyType.LINEAR,
+ node_count = 4,
+ #assign_ips = True,
+ assign_st = True,
+ add_node_callback = add_pl_node,
+ add_edge_callback = add_pl_edge)
+
+ print "Results stored at", ec.exp_dir
+
+ #### Retrieve the content producing resource to wait for ot to finish
+ ccncat = ec.filter_resources("linux::CCNCat")
+
+ #### Run experiment until metric convergences
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec, min_runs = 10, max_runs = 300,
+ compute_metric_callback = avg_interests,
+ wait_guids = ccncat,
+ wait_time = 0)
+
--- /dev/null
+#!/usr/bin/env python\r
+\r
+###############################################################################\r
+#\r
+# NEPI, a framework to manage network experiments\r
+#\r
+# This program is free software: you can redistribute it and/or modify\r
+# it under the terms of the GNU General Public License as published by\r
+# the Free Software Foundation, either version 3 of the License, or\r
+# (at your option) any later version.\r
+#\r
+# This program is distributed in the hope that it will be useful,\r
+# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+# GNU General Public License for more details.\r
+#\r
+# You should have received a copy of the GNU General Public License\r
+# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
+#\r
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>\r
+#\r
+###############################################################################\r
+\r
+from nepi.execution.ec import ExperimentController \r
+from nepi.execution.runner import ExperimentRunner\r
+from nepi.util.netgraph import TopologyType\r
+import nepi.data.processing.ccn.parser as ccn_parser\r
+\r
+import networkx\r
+import socket\r
+import os\r
+import numpy\r
+from scipy import stats\r
+from matplotlib import pyplot\r
+import math\r
+import random\r
+\r
+from optparse import OptionParser\r
+\r
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "\r
+ "-k <pl-ssh-key> -N <nodes>")\r
+\r
+parser = OptionParser(usage = usage)\r
+parser.add_option("-s", "--pl-slice", dest="pl_slice",\r
+ help="PlanetLab slicename", type="str")\r
+parser.add_option("-u", "--pl-user", dest="pl_user",\r
+ help="PlanetLab web username", type="str")\r
+parser.add_option("-p", "--pl-password", dest="pl_password",\r
+ help="PlanetLab web password", type="str")\r
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",\r
+ help="Path to private SSH key associated with the PL account",\r
+ type="str")\r
+parser.add_option("-N", "--nodes", dest="nodes",\r
+ help="Comma separated list of Planetlab nodes",\r
+ type="str")\r
+\r
+(options, args) = parser.parse_args()\r
+\r
+pl_slice = options.pl_slice\r
+pl_ssh_key = options.pl_ssh_key\r
+pl_user = options.pl_user\r
+pl_password = options.pl_password\r
+NODES = options.nodes.strip().split(",")\r
+\r
+def avg_interest_rtt(ec, run):\r
+ logs_dir = ec.run_dir\r
+ \r
+ # Parse downloaded CCND logs\r
+ (graph,\r
+ content_names,\r
+ interest_expiry_count,\r
+ interest_dupnonce_count,\r
+ interest_count,\r
+ content_count) = ccn_parser.process_content_history_logs(\r
+ logs_dir, ec.netgraph.topology)\r
+\r
+ # statistics on RTT\r
+ rtts = [content_names[content_name]["rtt"] \\r
+ for content_name in content_names.keys()]\r
+\r
+ # sample mean and standard deviation\r
+ sample = numpy.array(rtts)\r
+ n, min_max, mean, var, skew, kurt = stats.describe(sample)\r
+ std = math.sqrt(var)\r
+ ci = stats.t.interval(0.95, n-1, loc = mean, \r
+ scale = std/math.sqrt(n))\r
+\r
+ global metrics\r
+ metrics.append((mean, ci[0], ci[1]))\r
+ \r
+ return mean\r
+\r
+def normal_law(ec, run, sample):\r
+ print "SAMPLE", sample\r
+\r
+ x = numpy.array(sample)\r
+ n = len(sample)\r
+ std = x.std()\r
+ se = std / math.sqrt(n)\r
+ m = x.mean()\r
+ se95 = se * 2\r
+ \r
+ return m * 0.05 >= se95\r
+\r
+def post_process(ec, runs):\r
+ global metrics\r
+ \r
+ # plot convergence graph\r
+ y = numpy.array([float(m[0]) for m in metrics])\r
+ low = numpy.array([float(m[1]) for m in metrics])\r
+ high = numpy.array([float(m[2]) for m in metrics])\r
+ error = [y - low, high - y]\r
+ x = range(1,runs + 1)\r
+\r
+ # plot average RTT and confidence interval for each iteration\r
+ pyplot.errorbar(x, y, yerr = error, fmt='o')\r
+ pyplot.plot(x, y, 'r-')\r
+ pyplot.xlim([0.5, runs + 0.5])\r
+ pyplot.xticks(numpy.arange(1, len(y)+1, 1))\r
+ pyplot.xlabel('Iteration')\r
+ pyplot.ylabel('Average RTT')\r
+ pyplot.grid()\r
+ pyplot.savefig("plot.png")\r
+ pyplot.show()\r
+\r
+content_name = "ccnx:/test/bunny.ts"\r
+\r
+repofile = os.path.join(\r
+ os.path.dirname(os.path.realpath(__file__)), \r
+ "repoFile1.0.8.2")\r
+\r
+def get_simulator(ec):\r
+ simulator = ec.filter_resources("linux::ns3::Simulation")\r
+\r
+ if not simulator:\r
+ node = ec.register_resource("linux::Node")\r
+ ec.set(node, "hostname", "localhost")\r
+\r
+ simu = ec.register_resource("linux::ns3::Simulation")\r
+ ec.register_connection(simu, node)\r
+ return simu\r
+\r
+ return simulator[0]\r
+\r
+def add_collector(ec, trace_name, subdir, newname = None):\r
+ collector = ec.register_resource("Collector")\r
+ ec.set(collector, "traceName", trace_name)\r
+ ec.set(collector, "subDir", subdir)\r
+ if newname:\r
+ ec.set(collector, "rename", newname)\r
+\r
+ return collector\r
+\r
+def add_dce_host(ec, nid):\r
+ simu = get_simulator(ec)\r
+ \r
+ host = ec.register_resource("ns3::Node")\r
+ ec.set(host, "enableStack", True)\r
+ ec.register_connection(host, simu)\r
+\r
+ # Annotate the graph\r
+ ec.netgraph.annotate_node(nid, "host", host)\r
+ \r
+def add_dce_ccnd(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add dce ccnd to the dce node\r
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")\r
+ ec.set (ccnd, "stackSize", 1<<20)\r
+ ec.set (ccnd, "debug", 7)\r
+ ec.set (ccnd, "capacity", 50000)\r
+ ec.set (ccnd, "StartTime", "1s")\r
+ ec.set (ccnd, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccnd, host)\r
+\r
+ # Collector to retrieve ccnd log\r
+ collector = add_collector(ec, "stderr", str(nid), "log")\r
+ ec.register_connection(collector, ccnd)\r
+\r
+ # Annotate the graph\r
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)\r
+\r
+def add_dce_ccnr(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add a CCN content repository to the dce node\r
+ ccnr = ec.register_resource("linux::ns3::dce::CCNR")\r
+ ec.set (ccnr, "repoFile1", repofile) \r
+ ec.set (ccnr, "stackSize", 1<<20)\r
+ ec.set (ccnr, "StartTime", "2s")\r
+ ec.set (ccnr, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccnr, host)\r
+\r
+def add_dce_ccncat(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add a ccncat application to the dce host\r
+ ccncat = ec.register_resource("linux::ns3::dce::CCNCat")\r
+ ec.set (ccncat, "contentName", content_name)\r
+ ec.set (ccncat, "stackSize", 1<<20)\r
+ ec.set (ccncat, "StartTime", "8s")\r
+ ec.set (ccncat, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccncat, host)\r
+\r
+def add_dce_fib_entry(ec, nid1, nid2):\r
+ # Retrieve annotations from netgraph\r
+ host1 = ec.netgraph.node_annotation(nid1, "host")\r
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
+ ip2 = net[nid2]\r
+\r
+ # Add FIB entry between peer hosts\r
+ ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")\r
+ ec.set (ccndc, "protocol", "udp") \r
+ ec.set (ccndc, "uri", "ccnx:/") \r
+ ec.set (ccndc, "host", ip2)\r
+ ec.set (ccndc, "stackSize", 1<<20)\r
+ ec.set (ccndc, "StartTime", "2s")\r
+ ec.set (ccndc, "StopTime", STOP_TIME)\r
+ ec.register_connection(ccndc, host1)\r
+\r
+def add_dce_net_iface(ec, nid1, nid2):\r
+ # Retrieve annotations from netgraph\r
+ host = ec.netgraph.node_annotation(nid1, "host")\r
+ net = ec.netgraph.edge_net_annotation(nid1, nid2)\r
+ ip1 = net[nid1]\r
+ prefix = net["prefix"]\r
+\r
+ dev = ec.register_resource("ns3::PointToPointNetDevice")\r
+ ec.set(dev,"DataRate", "5Mbps")\r
+ ec.set(dev, "ip", ip1)\r
+ ec.set(dev, "prefix", prefix)\r
+ ec.register_connection(host, dev)\r
+\r
+ queue = ec.register_resource("ns3::DropTailQueue")\r
+ ec.register_connection(dev, queue)\r
+\r
+ return dev\r
+\r
+def add_pl_host(ec, nid):\r
+ hostname = NODES[nid]\r
+\r
+ # Add a planetlab host to the experiment description\r
+ host = ec.register_resource("planetlab::Node")\r
+ ec.set(host, "hostname", hostname)\r
+ ec.set(host, "username", pl_slice)\r
+ ec.set(host, "identity", pl_ssh_key)\r
+ ec.set(host, "cleanExperiment", True)\r
+ ec.set(host, "cleanProcesses", True)\r
+\r
+ # Annotate the graph\r
+ ec.netgraph.annotate_node(nid, "hostname", hostname)\r
+ ec.netgraph.annotate_node(nid, "host", host)\r
+ \r
+ # Annotate the graph node with an ip address\r
+ ip = socket.gethostbyname(hostname)\r
+ ec.netgraph.annotate_node_ip(nid, ip)\r
+\r
+def add_pl_ccnd(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ host = ec.netgraph.node_annotation(nid, "host")\r
+ \r
+ # Add a CCN daemon to the planetlab node\r
+ ccnd = ec.register_resource("linux::CCND")\r
+ ec.set(ccnd, "debug", 7)\r
+ ec.register_connection(ccnd, host)\r
+ \r
+ # Collector to retrieve ccnd log\r
+ collector = add_collector(ec, "stderr", str(nid), "log")\r
+ ec.register_connection(collector, ccnd)\r
+\r
+ # Annotate the graph\r
+ ec.netgraph.annotate_node(nid, "ccnd", ccnd)\r
+\r
+def add_pl_ccnr(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ ccnd = ec.netgraph.node_annotation(nid, "ccnd")\r
+ \r
+ # Add a CCN content repository to the planetlab node\r
+ ccnr = ec.register_resource("linux::CCNR")\r
+\r
+ ec.set(ccnr, "repoFile1", repofile)\r
+ ec.register_connection(ccnr, ccnd)\r
+\r
+def add_pl_ccncat(ec, nid):\r
+ # Retrieve annotation from netgraph\r
+ ccnd = ec.netgraph.node_annotation(nid, "ccnd")\r
+ \r
+ # Add a CCN cat application to the planetlab node\r
+ ccncat = ec.register_resource("linux::CCNCat")\r
+ ec.set(ccncat, "contentName", content_name)\r
+ ec.register_connection(ccncat, ccnd)\r
+\r
+def add_pl_fib_entry(ec, nid1, nid2):\r
+ # Retrieve annotations from netgraph\r
+ ccnd1 = ec.netgraph.node_annotation(nid1, "ccnd")\r
+ hostname2 = ec.netgraph.node_annotation(nid2, "hostname")\r
+ \r
+ # Add a FIB entry between one planetlab node and its peer\r
+ entry = ec.register_resource("linux::FIBEntry")\r
+ ec.set(entry, "host", hostname2)\r
+ ec.register_connection(entry, ccnd1)\r
+\r
+ # Collector to retrieve peering ping output (to measure neighbors delay)\r
+ ec.enable_trace(entry, "ping")\r
+ collector = add_collector(ec, "ping", str(nid1))\r
+ ec.register_connection(collector, entry)\r
+\r
+ return entry\r
+\r
+def add_node(ec, nid):\r
+ ### Add CCN nodes (ec.netgraph holds the topology graph)\r
+ add_dce_host(ec, nid)\r
+ add_dce_ccnd(ec, nid)\r
+ \r
+ if nid == ec.netgraph.targets()[0]:\r
+ add_dce_ccnr(ec, nid)\r
+\r
+ if nid == ec.netgraph.sources()[0]:\r
+ add_dce_ccncat(ec, nid)\r
+\r
+def add_edge(ec, nid1, nid2):\r
+ #### Add connections between CCN nodes\r
+ add_pl_fib_entry(ec, nid1, nid2)\r
+ add_pl_fib_entry(ec, nid2, nid1)\r
+\r
+def add_node(ec, nid):\r
+ ### Add CCN nodes (ec.netgraph holds the topology graph)\r
+ add_pl_host(ec, nid)\r
+ add_pl_ccnd(ec, nid)\r
+ \r
+ if nid == ec.netgraph.targets()[0]:\r
+ add_pl_ccnr(ec, nid)\r
+\r
+ if nid == ec.netgraph.sources()[0]:\r
+ add_pl_ccncat(ec, nid)\r
+\r
+def wait_guids(ec):\r
+ return ec.filter_resources("linux::CCNCat")\r
+\r
+if __name__ == '__main__':\r
+\r
+ metrics = []\r
+\r
+ # topology translation to NEPI model\r
+ ec = ExperimentController("pl_4n_linear",\r
+ topo_type = TopologyType.LINEAR, \r
+ node_count = 4,\r
+ assign_st = True,\r
+ assign_ips = True,\r
+ add_node_callback = add_node,\r
+ add_edge_callback = add_edge)\r
+\r
+ #### Run experiment until metric convergence\r
+ rnr = ExperimentRunner()\r
+ runs = rnr.run(ec,\r
+ min_runs = 10,\r
+ max_runs = 100, \r
+ compute_metric_callback = avg_interest_rtt,\r
+ evaluate_convergence_callback = normal_law,\r
+ wait_guids = wait_guids(ec))\r
+ \r
+ ### post processing\r
+ post_process(ec, runs)\r
+\r
+\r
return node
-def add_point2point_device(ec, node, ip, prefix):
+def add_device(ec, node, ip, prefix):
dev = ec.register_resource("ns3::PointToPointNetDevice")
ec.set(dev, "ip", ip)
ec.set(dev, "prefix", prefix)
return dev
-ec = ExperimentController(exp_id = "dce-custom-ping")
+ec = ExperimentController(exp_id = "dce-custom-dlm-ping")
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3:Simulation")
ec.set(simu, "verbose", True)
-ec.set(simu, "nsLog", "DceApplication")
-ec.set(simu, "enableDump", True)
+#ec.set(simu, "enableDump", True)
ec.register_connection(simu, node)
nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
# Create channel
chan = ec.register_resource("ns3::PointToPointChannel")
ec.set(chan, "Delay", "2ms")
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
### create applications
-ping = ec.register_resource("ns3::LinuxDceApplication")
+ping = ec.register_resource("linux::ns3::dce::Application")
ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
"cd iputils-s20101006/ && "
ec.set (ping, "binary", "ping")
ec.set (ping, "stackSize", 1<<20)
ec.set (ping, "arguments", "-c 10;-s 1000;10.0.0.2")
+#ec.set (ping, "useDlmLoader", True)
ec.set (ping, "StartTime", "1s")
ec.set (ping, "StopTime", "20s")
ec.register_connection(ping, nsnode1)
ec.shutdown()
print "PING OUTPUT", stdout
+
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+def add_ns3_node(ec, simu):
+ node = ec.register_resource("ns3::Node")
+ ec.register_connection(node, simu)
+
+ ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ ec.register_connection(node, ipv4)
+
+ arp = ec.register_resource("ns3::ArpL3Protocol")
+ ec.register_connection(node, arp)
+
+ icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ ec.register_connection(node, icmp)
+
+ udp = ec.register_resource("ns3::UdpL4Protocol")
+ ec.register_connection(node, udp)
+
+ tcp = ec.register_resource("ns3::TcpL4Protocol")
+ ec.register_connection(node, tcp)
+
+ return node
+
+def add_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::CsmaNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+ec = ExperimentController(exp_id = "dce-custom-csma-ping")
+
+node = ec.register_resource("linux::Node")
+ec.set(node, "hostname", "localhost")
+ec.set(node, "cleanProcesses", True)
+
+simu = ec.register_resource("linux::ns3::Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+nsnode1 = add_ns3_node(ec, simu)
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+
+nsnode2 = add_ns3_node(ec, simu)
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+
+# Create channel
+chan = ec.register_resource("ns3::CsmaChannel")
+ec.set(chan, "Delay", "2ms")
+
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
+
+### create applications
+ping = ec.register_resource("linux::ns3::dce::Application")
+ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
+ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
+ "cd iputils-s20101006/ && "
+ "sed -i 's/CFLAGS=/CFLAGS+=/g' Makefile && "
+ "make CFLAGS=-fPIC LDFLAGS='-pie -rdynamic' ping && "
+ "cp ping ${BIN_DCE} && cd - ")
+ec.set (ping, "binary", "ping")
+ec.set (ping, "stackSize", 1<<20)
+ec.set (ping, "arguments", "-c 10;-s 1000;10.0.0.2")
+ec.set (ping, "StartTime", "1s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(ping, "stdout")
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
+
return node
-def add_point2point_device(ec, node, ip, prefix):
+def add_device(ec, node, ip, prefix):
dev = ec.register_resource("ns3::PointToPointNetDevice")
ec.set(dev, "ip", ip)
ec.set(dev, "prefix", prefix)
ec = ExperimentController(exp_id = "dce-custom-ccn")
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
-ec.set(simu, "nsLog", "DceApplication")
-ec.set(simu, "enableDump", True)
ec.register_connection(simu, node)
nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
# Create channel
chan = ec.register_resource("ns3::PointToPointChannel")
ec.set(chan, "Delay", "2ms")
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
### create applications
# Add a LinuxCCNDceApplication to ns-3 node 1 to install custom ccnx sources
# and run a CCNx daemon
-ccnd1 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ccnd1 = ec.register_resource("linux::ns3::dce::CCNApplication")
# NOTE THAT INSTALLATION MIGHT FAIL IF openjdk-6-jdk is not installed
ec.set(ccnd1, "depends", "libpcap0.8-dev openjdk-6-jdk ant1.8 autoconf "
"libssl-dev libexpat-dev libpcap-dev libecryptfs0 libxml2-utils auto"
# parameters
repofile = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
- "..", "..", "..",
- "test", "resources", "linux", "ns3", "ccn", "repoFile1")
+ "..", "..", "test", "resources", "linux",
+ "ns3", "ccn", "repoFile1")
-ccnr = ec.register_resource("ns3::LinuxCCNDceApplication")
+ccnr = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccnr, "binary", "ccnr")
ec.set (ccnr, "environment", "CCNR_DIRECTORY=/REPO/")
ec.set (ccnr, "files", "%s=/REPO/repoFile1" % repofile)
# daemon. Note that the CCNx sources and build instructions
# do not need to be specified again (NEPI will take the
# instructions from the first definition).
-ccnd2 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ccnd2 = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccnd2, "binary", "ccnd")
ec.set (ccnd2, "stackSize", 1<<20)
ec.set (ccnd2, "environment", "CCND_CAP=50000; CCND_DEBUG=7")
# Add DCE application to configure peer CCN faces between
# nodes
-ccndc1 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ccndc1 = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccndc1, "binary", "ccndc")
ec.set (ccndc1, "arguments", "-v;add;ccnx:/;udp;10.0.0.2")
ec.set (ccndc1, "stackSize", 1<<20)
ec.set (ccndc1, "StopTime", "120s")
ec.register_connection(ccndc1, nsnode1)
-ccndc2 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ccndc2 = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccndc2, "binary", "ccndc")
ec.set (ccndc2, "arguments", "-v;add;ccnx:/;udp;10.0.0.1")
ec.set (ccndc2, "stackSize", 1<<20)
ec.register_connection(ccndc2, nsnode2)
# Add a DCE application to perform a ccncat and retrieve content
-ccncat = ec.register_resource("ns3::LinuxCCNDceApplication")
+ccncat = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccncat, "binary", "ccncat")
ec.set (ccncat, "arguments", "ccnx:/test/bunny.ts")
ec.set (ccncat, "stdinFile", "")
ec.shutdown()
-
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+def add_ns3_node(ec, simu):
+ node = ec.register_resource("ns3::Node")
+ ec.register_connection(node, simu)
+
+ ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ ec.register_connection(node, ipv4)
+
+ arp = ec.register_resource("ns3::ArpL3Protocol")
+ ec.register_connection(node, arp)
+
+ icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ ec.register_connection(node, icmp)
+
+ udp = ec.register_resource("ns3::UdpL4Protocol")
+ ec.register_connection(node, udp)
+
+ tcp = ec.register_resource("ns3::TcpL4Protocol")
+ ec.register_connection(node, tcp)
+
+ return node
+
+def add_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::PointToPointNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+ec = ExperimentController(exp_id = "dce-custom-p2p-ping")
+
+node = ec.register_resource("linux::Node")
+ec.set(node, "hostname", "localhost")
+ec.set(node, "cleanProcesses", True)
+
+simu = ec.register_resource("linux::ns3::Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+nsnode1 = add_ns3_node(ec, simu)
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
+
+nsnode2 = add_ns3_node(ec, simu)
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
+
+# Create channel
+chan = ec.register_resource("ns3::PointToPointChannel")
+ec.set(chan, "Delay", "2ms")
+
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
+
+### create applications
+ping = ec.register_resource("linux::ns3::dce::Application")
+ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
+ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
+ "cd iputils-s20101006/ && "
+ "sed -i 's/CFLAGS=/CFLAGS+=/g' Makefile && "
+ "make CFLAGS=-fPIC LDFLAGS='-pie -rdynamic' ping && "
+ "cp ping ${BIN_DCE} && cd - ")
+ec.set (ping, "binary", "ping")
+ec.set (ping, "stackSize", 1<<20)
+ec.set (ping, "arguments", "-c 10;-s 1000;10.0.0.2")
+ec.set (ping, "StartTime", "1s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(ping, "stdout")
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
+
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+def add_ns3_node(ec, simu):
+ node = ec.register_resource("ns3::Node")
+ ec.register_connection(node, simu)
+
+ ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ ec.register_connection(node, ipv4)
+
+ arp = ec.register_resource("ns3::ArpL3Protocol")
+ ec.register_connection(node, arp)
+
+ icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ ec.register_connection(node, icmp)
+
+ udp = ec.register_resource("ns3::UdpL4Protocol")
+ ec.register_connection(node, udp)
+
+ tcp = ec.register_resource("ns3::TcpL4Protocol")
+ ec.register_connection(node, tcp)
+
+ return node
+
+def add_device(ec, node, ip, prefix, access_point = False):
+ dev = ec.register_resource("ns3::WifiNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ phy = ec.register_resource("ns3::YansWifiPhy")
+ ec.set(phy, "Standard", "WIFI_PHY_STANDARD_80211a")
+ ec.register_connection(dev, phy)
+
+ error = ec.register_resource("ns3::NistErrorRateModel")
+ ec.register_connection(phy, error)
+
+ manager = ec.register_resource("ns3::ArfWifiManager")
+ ec.register_connection(dev, manager)
+
+ if access_point:
+ mac = ec.register_resource("ns3::ApWifiMac")
+ else:
+ mac = ec.register_resource("ns3::StaWifiMac")
+
+ ec.set(mac, "Standard", "WIFI_PHY_STANDARD_80211a")
+ ec.register_connection(dev, mac)
+
+ return dev, phy
+
+def add_constant_mobility(ec, node, x, y, z):
+ mobility = ec.register_resource("ns3::ConstantPositionMobilityModel")
+ position = "%d:%d:%d" % (x, y, z)
+ ec.set(mobility, "Position", position)
+ ec.register_connection(node, mobility)
+ return mobility
+
+def add_wifi_channel(ec):
+ channel = ec.register_resource("ns3::YansWifiChannel")
+ delay = ec.register_resource("ns3::ConstantSpeedPropagationDelayModel")
+ ec.register_connection(channel, delay)
+
+ loss = ec.register_resource("ns3::LogDistancePropagationLossModel")
+ ec.register_connection(channel, loss)
+
+ return channel
+
+ec = ExperimentController(exp_id = "dce-custom-wifi-ping")
+
+node = ec.register_resource("linux::Node")
+ec.set(node, "hostname", "localhost")
+ec.set(node, "cleanProcesses", True)
+
+simu = ec.register_resource("linux::ns3::Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+nsnode1 = add_ns3_node(ec, simu)
+add_constant_mobility(ec, nsnode1, 0, 0, 0)
+dev1, phy1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+
+nsnode2 = add_ns3_node(ec, simu)
+add_constant_mobility(ec, nsnode2, 50, 50, 0)
+dev2, phy2 = add_device(ec, nsnode2, "10.0.0.2", "30", access_point = True)
+
+# Create channel
+chan = add_wifi_channel(ec)
+ec.register_connection(chan, phy1)
+ec.register_connection(chan, phy2)
+
+### create applications
+ping = ec.register_resource("linux::ns3::dce::Application")
+ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
+ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
+ "cd iputils-s20101006/ && "
+ "sed -i 's/CFLAGS=/CFLAGS+=/g' Makefile && "
+ "make CFLAGS=-fPIC LDFLAGS='-pie -rdynamic' ping && "
+ "cp ping ${BIN_DCE} && cd - ")
+ec.set (ping, "binary", "ping")
+ec.set (ping, "stackSize", 1<<20)
+ec.set (ping, "arguments", "-c 10;-s 1000;10.0.0.2")
+ec.set (ping, "StartTime", "1s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(ping, "stdout")
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
+
return node
-def add_point2point_device(ec, node, ip, prefix):
+def add_device(ec, node, ip, prefix):
dev = ec.register_resource("ns3::PointToPointNetDevice")
ec.set(dev, "ip", ip)
ec.set(dev, "prefix", prefix)
return dev
-ec = ExperimentController(exp_id = "dce-ccn-app")
+ec = ExperimentController(exp_id = "dce-ccncat")
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.register_connection(simu, node)
nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
# Create channel
chan = ec.register_resource("ns3::PointToPointChannel")
ec.set(chan, "Delay", "2ms")
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
### create applications
# Add ccnd to ns-3 node1
-ccnd1 = ec.register_resource("ns3::LinuxDceCCND")
+ccnd1 = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd1, "stackSize", 1<<20)
ec.set (ccnd1, "debug", 7)
ec.set (ccnd1, "capacity", 50000)
# Add CCN repository with content to ns-3 node1
repofile = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
- "..", "..", "..",
- "test", "resources", "linux", "ns3", "ccn", "repoFile1")
+ "..", "..", "test", "resources", "linux",
+ "ns3", "ccn", "repoFile1")
-ccnr = ec.register_resource("ns3::LinuxDceCCNR")
+ccnr = ec.register_resource("linux::ns3::dce::CCNR")
ec.set (ccnr, "repoFile1", repofile)
ec.set (ccnr, "stackSize", 1<<20)
ec.set (ccnr, "StartTime", "2s")
ec.register_connection(ccnr, nsnode1)
# Add CCN repository with content to ns-3 node2
-ccnd2 = ec.register_resource("ns3::LinuxDceCCND")
+ccnd2 = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd2, "stackSize", 1<<20)
ec.set (ccnd2, "debug", 7)
ec.set (ccnd2, "capacity", 50000)
ec.register_connection(ccnd2, nsnode2)
# Add face from ns-3 node1 to ns-3 node2
-ccndc1 = ec.register_resource("ns3::LinuxDceFIBEntry")
+ccndc1 = ec.register_resource("linux::ns3::dce::FIBEntry")
ec.set (ccndc1, "protocol", "udp")
ec.set (ccndc1, "uri", "ccnx:/")
ec.set (ccndc1, "host", "10.0.0.2")
ec.register_connection(ccndc1, nsnode1)
# Add face from ns-3 node2 to ns-3 node1
-ccndc2 = ec.register_resource("ns3::LinuxDceFIBEntry")
+ccndc2 = ec.register_resource("linux::ns3::dce::FIBEntry")
ec.set (ccndc2, "protocol", "udp")
ec.set (ccndc2, "uri", "ccnx:/")
ec.set (ccndc2, "host", "10.0.0.1")
ec.register_connection(ccndc2, nsnode2)
# Add a ccncat to node2 to retrieve content
-ccncat = ec.register_resource("ns3::LinuxDceCCNCat")
+ccncat = ec.register_resource("linux::ns3::dce::CCNCat")
ec.set (ccncat, "contentName", "ccnx:/test/bunny.ts")
ec.set (ccncat, "stackSize", 1<<20)
ec.set (ccncat, "StartTime", "4s")
return node
-ec = ExperimentController(exp_id = "dce-ccnpeek-app")
+ec = ExperimentController(exp_id = "dce-local-ccnpeek")
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.register_connection(simu, node)
nsnode = add_ns3_node(ec, simu)
### create applications
-ccnd = ec.register_resource("ns3::LinuxDceCCND")
+ccnd = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd, "stackSize", 1<<20)
ec.set (ccnd, "StartTime", "1s")
ec.register_connection(ccnd, nsnode)
-ccnpoke = ec.register_resource("ns3::LinuxDceCCNPoke")
+ccnpoke = ec.register_resource("linux::ns3::dce::CCNPoke")
ec.set (ccnpoke, "contentName", "ccnx:/chunk0")
ec.set (ccnpoke, "content", "DATA")
ec.set (ccnpoke, "stackSize", 1<<20)
ec.set (ccnpoke, "StartTime", "2s")
ec.register_connection(ccnpoke, nsnode)
-ccnpeek = ec.register_resource("ns3::LinuxDceCCNPeek")
+ccnpeek = ec.register_resource("linux::ns3::dce::CCNPeek")
ec.set (ccnpeek, "contentName", "ccnx:/chunk0")
ec.set (ccnpeek, "stackSize", 1<<20)
ec.set (ccnpeek, "StartTime", "4s")
ec.shutdown()
print "PEEK received", stdout
+
return node
-def add_point2point_device(ec, node, ip, prefix):
+def add_device(ec, node, ip, prefix):
dev = ec.register_resource("ns3::PointToPointNetDevice")
ec.set(dev, "ip", ip)
ec.set(dev, "prefix", prefix)
ec = ExperimentController(exp_id = "dce-ping-app")
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.register_connection(simu, node)
nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
# Create channel
chan = ec.register_resource("ns3::PointToPointChannel")
ec.set(chan, "Delay", "2ms")
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
### create applications
-ping = ec.register_resource("ns3::LinuxDcePing")
+ping = ec.register_resource("linux::ns3::dce::Ping")
ec.set (ping, "stackSize", 1<<20)
ec.set (ping, "target", "10.0.0.2")
ec.set (ping, "count", "10")
ec.shutdown()
print "PING OUTPUT", stdout
+
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
#
-# Instructions to run this example:
-#
-# 1. First edit the script file where required (See ASSING messages)
-#
-# 2. Then, run the script:
+
+# Example of how to run this experiment (replace with your information):
#
# $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATHS:src python examples/linux/ccn/two_nodes_file_retrieval.py
+# python examples/linux/ccn_simple_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+
+# CCN topology:
+#
+#
+#
+# content ccncat
+# Linux host Linux host
+# 0 ------- network -------- 1
#
from nepi.execution.ec import ExperimentController
+from optparse import OptionParser
import os
-ssh_key = ####### <<< ASSING the absolute path to the private SSH key to login into the remote host >>>
-ssh_user = ####### <<< ASSING the SSH username >>>
+usage = ("usage: %prog -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname1", dest="hostname1",
+ help="Remote host 1", type="str")
+parser.add_option("-b", "--hostname2", dest="hostname2",
+ help="Remote host 2", type="str")
+parser.add_option("-u", "--username", dest="username",
+ help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key",
+ help="Path to private SSH key to be used for connection",
+ type="str")
+(options, args) = parser.parse_args()
+
+hostname1 = options.hostname1
+hostname2 = options.hostname2
+username = options.username
+ssh_key = options.ssh_key
## Create the experiment controller
-ec = ExperimentController(exp_id = "demo_CCN")
+ec = ExperimentController(exp_id = "ccn_simple_transfer")
+
+##### CONFIGURING NODE 1
## Register node 1
-node1 = ec.register_resource("LinuxNode")
+node1 = ec.register_resource("linux::Node")
# Set the hostname of the first node to use for the experiment
-hostname1 = "peeramidion.irisa.fr" ##### <<< ASSIGN the hostname of a host you have SSSH access to >>>
ec.set(node1, "hostname", hostname1)
# username should be your SSH user
-ec.set(node1, "username", ssh_user)
+ec.set(node1, "username", username)
# Absolute path to the SSH private key
ec.set(node1, "identity", ssh_key)
# Clean all files, results, etc, from previous experiments wit the same exp_id
# Kill all running processes in the node before running the experiment
ec.set(node1, "cleanProcesses", True)
+## Register a CCN daemon in node 1
+ccnd1 = ec.register_resource("linux::CCND")
+# Set ccnd log level to 7
+ec.set(ccnd1, "debug", 7)
+ec.register_connection(ccnd1, node1)
+
+## Register a repository in node 1
+ccnr1 = ec.register_resource("linux::CCNR")
+ec.register_connection(ccnr1, ccnd1)
+
+## Push the file into the repository
+local_path_to_content = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "..", "big_buck_bunny_240p_mpeg4_lq.ts")
+
+content_name = "ccnx:/test/FILE"
+
+# Add a content to the repository
+co = ec.register_resource("linux::CCNContent")
+ec.set(co, "contentName", content_name)
+# NEPI will upload the specified file to the remote node and write it
+# into the CCN repository
+ec.set(co, "content", local_path_to_content)
+ec.register_connection(co, ccnr1)
+
+##### CONFIGURING NODE 2
+
## Register node 2
-node2 = ec.register_resource("LinuxNode")
+node2 = ec.register_resource("linux::Node")
# Set the hostname of the first node to use for the experiment
-hostname2 = "planetlab2.upc.es" ##### <<< ASSIGN the hostname of a host you have SSSH access to >>>
ec.set(node2, "hostname", hostname2)
# username should be your SSH user
-ec.set(node2, "username", ssh_user)
+ec.set(node2, "username", username)
# Absolute path to the SSH private key
ec.set(node2, "identity", ssh_key)
# Clean all files, results, etc, from previous experiments wit the same exp_id
# Kill all running processes in the node before running the experiment
ec.set(node2, "cleanProcesses", True)
-## Register a CCN daemon in node 1
-ccnd1 = ec.register_resource("LinuxCCND")
-# Set ccnd log level to 7
-ec.set(ccnd1, "debug", 7)
-ec.register_connection(ccnd1, node1)
-
## Register a CCN daemon in node 2
-ccnd2 = ec.register_resource("LinuxCCND")
+ccnd2 = ec.register_resource("linux::CCND")
# Set ccnd log level to 7
ec.set(ccnd2, "debug", 7)
ec.register_connection(ccnd2, node2)
-## Register a repository in node 1
-ccnr1 = ec.register_resource("LinuxCCNR")
-ec.register_connection(ccnr1, ccnd1)
-
-## Push the file into the repository
-local_path_to_content = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..", "..",
- "big_buck_bunny_240p_mpeg4_lq.ts")
+## Retrieve the file stored in node 1 from node 2
+ccncat = ec.register_resource("linux::CCNCat")
+ec.set(ccncat, "contentName", content_name)
+ec.register_connection(ccncat, ccnd2)
-# Register a FIB entry from node 1 to node 2
-co = ec.register_resource("LinuxCCNContent")
-ec.set(co, "contentName", "ccnx:/test/FILE1")
-# NEPI will upload the specified file to the remote node and write it
-# into the CCN repository
-ec.set(co, "content", local_path_to_content)
-ec.register_connection(co, ccnr1)
+##### INTERCONNECTING CCN NODES ...
# Register a FIB entry from node 1 to node 2
-entry1 = ec.register_resource("LinuxFIBEntry")
+entry1 = ec.register_resource("linux::FIBEntry")
ec.set(entry1, "host", hostname2)
ec.register_connection(entry1, ccnd1)
# Register a FIB entry from node 2 to node 1
-entry2 = ec.register_resource("LinuxFIBEntry")
+entry2 = ec.register_resource("linux::FIBEntry")
ec.set(entry2, "host", hostname1)
ec.register_connection(entry2, ccnd2)
-## Retrieve the file stored in node 1 from node 2
-command = "ccncat ccnx:/test/FILE1"
-app = ec.register_resource("LinuxCCNApplication")
-ec.set(app, "command", command)
-ec.register_connection(app, ccnd2)
-
-# Register a collector to automatically collect the ccnd logs
-# to a local directory
-results_dir = "/tmp/demo_CCN_results"
-col1 = ec.register_resource("Collector")
-ec.set(col1, "traceName", "stderr")
-ec.set(col1, "storeDir", results_dir)
-ec.set(col1, "subDir", hostname1)
-ec.register_connection(col1, ccnd1)
-
-col2 = ec.register_resource("Collector")
-ec.set(col2, "traceName", "stderr")
-ec.set(col2, "storeDir", results_dir)
-ec.set(col2, "subDir", hostname2)
-ec.register_connection(col2, ccnd2)
+##### STARTING THE EXPERIMENT
## Deploy all resources
ec.deploy()
# Wait until the ccncat is finished
-ec.wait_finished([app])
+ec.wait_finished([ccncat])
-## CCND logs will be collected to the results_dir upon shutdown.
-## We can aldo get the content of the logs now:
-#print "LOG2", ec.trace(ccnd1, "stderr")
-#print "LOG 1", ec.trace(ccnd2, "stderr")
+stdout = ec.trace(ccncat, "stdout")
+f = open("video.ts", "w")
+f.write(stdout)
+f.close()
ec.shutdown()
+
+print "Transfered FILE stored localy at video.ts"
--- /dev/null
+#!/usr/bin/env python
+
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+# NOTE: This experiment example uses the generic LinuxApplication
+# ResourceManager to do the CCN set up in the hosts.
+# Alternatively, CCN specific ResourceManagers can be used
+# (i.e. LinuxCCND, LinuxCCNR, etc...), and those require less
+# manual configuration.
+#
+#
+
+# CCN topology:
+#
+#
+#
+# content ccncat
+# Linux host Linux host
+# 0 ------- Internet ------ 0
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/linux/ccn_advanced_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname1", dest="hostname1",
+ help="Remote host 1", type="str")
+parser.add_option("-b", "--hostname2", dest="hostname2",
+ help="Remote host 2", type="str")
+parser.add_option("-u", "--username", dest="username",
+ help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key",
+ help="Path to private SSH key to be used for connection",
+ type="str")
+(options, args) = parser.parse_args()
+
+hostname1 = options.hostname1
+hostname2 = options.hostname2
+username = options.username
+ssh_key = options.ssh_key
+
+def add_node(ec, host, user, ssh_key = None):
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "identity", ssh_key)
+ ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanProcesses", True)
+ return node
+
+def add_ccnd(ec, peers):
+ # Dependencies for Fedora
+ depends = ( " autoconf openssl-devel expat-devel libpcap-devel "
+ " ecryptfs-utils-devel libxml2-devel automake gawk "
+ " gcc gcc-c++ git pcre-devel make ")
+
+ # UBUNTU / DEBIAN
+ # depends = ( " autoconf libssl-dev libexpat-dev libpcap-dev "
+ # " libecryptfs0 libxml2-utils automake gawk gcc g++ "
+ # " git-core pkg-config libpcre3-dev make ")
+
+ sources = "http://www.ccnx.org/releases/ccnx-0.8.2.tar.gz"
+
+ build = (
+ # Evaluate if ccnx binaries are already installed
+ " ( "
+ " test -f ${BIN}/ccnx-0.8.2/bin/ccnd"
+ " ) || ( "
+ # If not, untar and build
+ " ( "
+ " mkdir -p ${SRC}/ccnx-0.8.2 && "
+ " tar xf ${SRC}/ccnx-0.8.2.tar.gz --strip-components=1 -C ${SRC}/ccnx-0.8.2 "
+ " ) && "
+ "cd ${SRC}/ccnx-0.8.2 && "
+ # Just execute and silence warnings...
+ "( ./configure && make ) "
+ " )")
+
+ install = (
+ # Evaluate if ccnx binaries are already installed
+ " ( "
+ " test -f ${BIN}/ccnx-0.8.2/bin/ccnd"
+ " ) || ( "
+ " mkdir -p ${BIN}/ccnx-0.8.2/bin && "
+ " cp -r ${SRC}/ccnx-0.8.2/bin ${BIN}/ccnx-0.8.2"
+ " )"
+ )
+
+ env = "PATH=$PATH:${BIN}/ccnx-0.8.2/bin"
+
+ # BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
+ command = "ccndstart && "
+ peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ command += " ; ".join(peers) + " && "
+ command += " ccnr & "
+
+ app = ec.register_resource("linux::Application")
+ ec.set(app, "depends", depends)
+ ec.set(app, "sources", sources)
+ ec.set(app, "install", install)
+ ec.set(app, "build", build)
+ ec.set(app, "env", env)
+ ec.set(app, "command", command)
+
+ return app
+
+def add_publish(ec, movie, content_name):
+ env = "PATH=$PATH:${BIN}/ccnx-0.8.2/bin"
+ command = "ccnseqwriter -r %s" % content_name
+
+ app = ec.register_resource("linux::Application")
+ ec.set(app, "stdin", movie)
+ ec.set(app, "env", env)
+ ec.set(app, "command", command)
+
+ return app
+
+def add_ccncat(ec, content_name):
+ env = "PATH=$PATH:${BIN}/ccnx-0.8.2/bin"
+ command = "ccncat %s" % content_name
+
+ app = ec.register_resource("linux::Application")
+ ec.set(app, "env", env)
+ ec.set(app, "command", command)
+
+ return app
+
+## Create the experiment controller
+ec = ExperimentController(exp_id = "ccn_advanced_transfer")
+
+# Register first PlanetLab host
+node1 = add_node(ec, hostname1, username, ssh_key)
+
+# Register CCN setup for host
+peers = [hostname2]
+ccnd1 = add_ccnd(ec, peers)
+ec.register_connection(ccnd1, node1)
+
+# Register content producer application (ccnseqwriter)
+## Push the file into the repository
+local_path_to_content = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "..", "big_buck_bunny_240p_mpeg4_lq.ts")
+
+content_name = "ccnx:/test/FILE"
+
+pub = add_publish(ec, local_path_to_content, content_name)
+ec.register_connection(pub, node1)
+
+# The movie can only be published after ccnd is running
+ec.register_condition(pub, ResourceAction.START,
+ ccnd1, ResourceState.STARTED)
+
+# Register Linux host
+node2 = add_node(ec, hostname2, username, ssh_key)
+
+# Register CCN setup for Linux host
+peers = [hostname1]
+ccnd2 = add_ccnd(ec, peers)
+ec.register_connection(ccnd2, node2)
+
+# Register consumer application (ccncat)
+ccncat = add_ccncat(ec, content_name)
+ec.register_connection(ccncat, node2)
+
+# The file can only be retrieved after ccnd is running
+ec.register_condition(ccncat, ResourceAction.START,
+ ccnd2, ResourceState.STARTED)
+
+# And also, the file can only be retrieved after it was published
+ec.register_condition(ccncat, ResourceAction.START,
+ pub, ResourceState.STARTED)
+
+# Deploy all ResourceManagers
+ec.deploy()
+
+# Wait until the applications are finished
+apps = [ccncat]
+ec.wait_finished(apps)
+
+stdout = ec.trace(ccncat, "stdout")
+f = open("video.ts", "w")
+f.write(stdout)
+f.close()
+
+# Shutdown the experiment controller
+ec.shutdown()
+
+print "Transfered FILE stored localy at video.ts"
+
--- /dev/null
+#include <stdio.h>
+
+int
+main(int argc, char** argv){
+
+ printf("Hello world\n");
+ return 0;
+}
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/linux/hello_world.py -a <hostname> -u <username> -i <ssh-key>
+
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser, SUPPRESS_HELP
+import os
+
+usage = ("usage: %prog -a <hostanme> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname", dest="hostname",
+ help="Remote host", type="str")
+parser.add_option("-u", "--username", dest="username",
+ help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key",
+ help="Path to private SSH key to be used for connection",
+ type="str")
+(options, args) = parser.parse_args()
+
+hostname = options.hostname
+username = options.username
+ssh_key = options.ssh_key
+
+ec = ExperimentController(exp_id = "src-up-exp")
+
+node = ec.register_resource("linux::Node")
+ec.set(node, "hostname", hostname)
+ec.set(node, "username", username)
+ec.set(node, "identity", ssh_key)
+ec.set(node, "cleanExperiment", True)
+ec.set(node, "cleanProcesses", True)
+
+path_to_sources = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "hello.c")
+
+app = ec.register_resource("linux::Application")
+ec.set(app, "sources", path_to_sources)
+ec.set(app, "build", "gcc ${SRC}/hello.c -o ${BIN}/hello")
+ec.set(app, "command", "${BIN}/hello")
+ec.register_connection(node, app)
+
+ec.deploy()
+
+ec.wait_finished(app)
+
+print ec.trace(app, "stdout")
+
+ec.shutdown()
+
# Alina Quereilhac <alina.quereilhac@inria.fr>
#
#
-# Example of how to run this experiment (replace with your credentials):
+# Example of how to run this experiment (replace with your information):
#
# $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATH:~/repos/nepi/src python examples/linux/file_transfer.py -u inria_nepi -i ~/.ssh/id_rsa_planetlab -a planetlab1.u-strasbg.fr -b planetlab1.utt.fr
-
+# python examples/linux/netcat_file_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
ssh_key = options.ssh_key
## Create the experiment controller
-ec = ExperimentController(exp_id = "file_transfer")
+ec = ExperimentController(exp_id = "nc_file_transfer")
## Register node 1
-node1 = ec.register_resource("LinuxNode")
+node1 = ec.register_resource("linux::Node")
# Set the hostname of the first node to use for the experiment
ec.set(node1, "hostname", hostname1)
# username should be your SSH user
ec.set(node1, "cleanProcesses", True)
## Register node 2
-node2 = ec.register_resource("LinuxNode")
+node2 = ec.register_resource("linux::Node")
# Set the hostname of the first node to use for the experiment
ec.set(node2, "hostname", hostname2)
# username should be your SSH user
command = "cat ${SHARE}/%s | pv -fbt 2> bw.txt | nc %s 1234" % (
video, hostname2 )
-server = ec.register_resource("LinuxApplication")
+server = ec.register_resource("linux::Application")
ec.set(server, "depends", "pv nc tcpdump")
ec.set(server, "files", local_path_to_video)
ec.set(server, "command", command)
# if not nc in the client side close the socket suddently if runned in background
command = "nc -dl 1234 > %s" % video
-client = ec.register_resource("LinuxApplication")
+client = ec.register_resource("linux::Application")
ec.set(client, "depends", "nc")
ec.set(client, "command", command)
ec.register_connection(client, node2)
# Register a tcpdump in the server node to monitor the file transfer
command = "tcpdump -ni eth0 -w file_transfer.pcap -s0 port 1234 2>&1"
-capture = ec.register_resource("LinuxApplication")
+capture = ec.register_resource("linux::Application")
ec.set(capture, "depends", "tcpdump")
ec.set(capture, "command", command)
ec.set(capture, "sudo", True)
ec.shutdown()
+print "Total bytes transfered saved to bw.txt..."
+
--- /dev/null
+#\r
+# NEPI, a framework to manage network experiments\r
+# Copyright (C) 2014 INRIA\r
+#\r
+# This program is free software: you can redistribute it and/or modify\r
+# it under the terms of the GNU General Public License as published by\r
+# the Free Software Foundation, either version 3 of the License, or\r
+# (at your option) any later version.\r
+#\r
+# This program is distributed in the hope that it will be useful,\r
+# but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+# GNU General Public License for more details.\r
+#\r
+# You should have received a copy of the GNU General Public License\r
+# along with this program. If not, see <http://www.gnu.org/licenses/>.\r
+#\r
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>\r
+# Maksym Gabielkov <maksym.gabielkovc@inria.fr>\r
+#\r
+\r
+## This is a maintenance script used to bootstrap the nodes from\r
+## Nitos testbed (NITLab) before running a OMF experiment using\r
+## Nitos nodes. This fixes the problem of Resource Controller \r
+## misbehaving by restarting it and it also loads the ath5k driver.\r
+\r
+# Example of how to run this experiment (replace with your information):\r
+#\r
+# $ cd <path-to-nepi>\r
+# python examples/linux/nitos_testbed_bootstrap.py -H <list-of-nitos-hosts> -u <nitos-username> -i <ssh-key> -g <nitos-gateway> -U <nitos-gateway-username>\r
+#\r
+\r
+from nepi.execution.ec import ExperimentController\r
+from optparse import OptionParser, SUPPRESS_HELP\r
+import os\r
+\r
+usage = ("usage: %prog -H <list-of-nitos-hosts> -u <nitos-username> -i <ssh-key> -g <nitos-gateway> -U <nitos-gateway-username>")\r
+\r
+parser = OptionParser(usage = usage)\r
+parser.add_option("-H", "--hosts", dest="hosts", \r
+ help="Space separated list of hosts", type="str")\r
+parser.add_option("-u", "--username", dest="username", \r
+ help="Username for the nitos hosts (usually root)", \r
+ type="str", default="root" )\r
+parser.add_option("-g", "--gateway", dest="gateway", \r
+ help="Nitos gateway hostname", \r
+ type="str", default="nitlab.inf.uth.gr")\r
+parser.add_option("-U", "--gateway-user", dest="gateway_username", \r
+ help="Nitos gateway username", \r
+ type="str", default="nitlab.inf.uth.gr")\r
+parser.add_option("-i", "--ssh-key", dest="ssh_key", \r
+ help="Path to private SSH key to be used for connection", \r
+ type="str")\r
+(options, args) = parser.parse_args()\r
+\r
+hosts = options.hosts.split(" ")\r
+username = options.username\r
+gateway = options.gateway\r
+gateway_username = options.gateway_username\r
+ssh_key = options.ssh_key\r
+\r
+apps = []\r
+\r
+ec = ExperimentController(exp_id="ath5k")\r
+\r
+for hostname in hosts:\r
+ node = ec.register_resource("linux::Node")\r
+ ec.set(node, "username", username)\r
+ ec.set(node, "hostname", hostname)\r
+ ec.set(node, "gateway", gateway)\r
+ ec.set(node, "gatewayUser", gateway_username)\r
+ ec.set(node, "cleanExperiment", True)\r
+\r
+ app = ec.register_resource("linux::Application")\r
+ ec.set(app, "command", "modprobe ath5k && ip a | grep wlan0 && service omf_rc restart")\r
+ ec.register_connection(app, node)\r
+ \r
+ apps.append(app)\r
+\r
+ec.deploy()\r
+ec.wait_finished(apps)\r
+\r
+for app in apps:\r
+ print ec.trace(app, "stdout") \r
+\r
+\r
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/linux/ping.py -a <hostname> -u <username> -i <ssh-key>
+
+
from nepi.execution.ec import ExperimentController
+from optparse import OptionParser, SUPPRESS_HELP
+import os
+
+usage = ("usage: %prog -a <hostanme> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname", dest="hostname",
+ help="Remote host", type="str")
+parser.add_option("-u", "--username", dest="username",
+ help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key",
+ help="Path to private SSH key to be used for connection",
+ type="str")
+(options, args) = parser.parse_args()
+
+hostname = options.hostname
+username = options.username
+ssh_key = options.ssh_key
+
ec = ExperimentController(exp_id = "ping-exp")
-hostname = ## Add a string with the target hostname
-username = ## Add a string with the username to SSH hostname
-
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", hostname)
ec.set(node, "username", username)
-ec.set(node, "cleanHome", True)
+ec.set(node, "identity", ssh_key)
+ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
-app = ec.register_resource("LinuxApplication")
-ec.set(app, "command", "ping -c3 www.google.com")
+app = ec.register_resource("linux::Application")
+ec.set(app, "command", "ping -c3 nepi.inria.fr")
ec.register_connection(app, node)
ec.deploy()
print ec.trace(app, "stdout")
ec.shutdown()
+
#
#
# content ccncat
-# PL host Linux host
+# Linux host Linux host
# 0 ------- Internet ------ 0
#
import time
def add_node(ec, host, user, ssh_key = None):
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
ec.set(node, "identity", ssh_key)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
command += " ; ".join(peers) + " && "
command += " ccnr & "
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "depends", depends)
ec.set(app, "sources", sources)
ec.set(app, "install", install)
env = "PATH=$PATH:${BIN}/ccnx-0.7.1/bin"
command = "ccnseqwriter -r ccnx:/VIDEO"
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "stdin", movie)
ec.set(app, "env", env)
ec.set(app, "command", command)
env = "PATH=$PATH:${BIN}/ccnx-0.7.1/bin"
command = "sudo -S dbus-uuidgen --ensure ; ( ccncat ccnx:/VIDEO | vlc - ) "
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "depends", "vlc")
ec.set(app, "forwardX11", True)
ec.set(app, "env", env)
import time
def add_node(ec, host, user, ssh_key = None):
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linuxNode")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
ec.set(node, "identity", ssh_key)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
def add_ccnd(ec, node):
- ccnd = ec.register_resource("LinuxCCND")
+ ccnd = ec.register_resource("linux::CCND")
ec.set(ccnd, "debug", 7)
ec.register_connection(ccnd, node)
return ccnd
def add_ccnr(ec, ccnd):
- ccnr = ec.register_resource("LinuxCCNR")
+ ccnr = ec.register_resource("linux::CCNR")
ec.register_connection(ccnr, ccnd)
return ccnr
def add_fib_entry(ec, ccnd, peer_host):
- entry = ec.register_resource("LinuxFIBEntry")
+ entry = ec.register_resource("linux::FIBEntry")
ec.set(entry, "host", peer_host)
ec.register_connection(entry, ccnd)
return entry
def add_content(ec, ccnr, content_name, content):
- co = ec.register_resource("LinuxCCNContent")
+ co = ec.register_resource("linux::CCNContent")
ec.set(co, "contentName", content_name)
ec.set(co, "content", content)
ec.register_connection(co, ccnr)
command = "ccnpeek %(content_name)s; ccncat %(content_name)s" % {
"content_name" : content_name}
- app = ec.register_resource("LinuxCCNApplication")
+ app = ec.register_resource("linux::CCNApplication")
ec.set(app, "command", command)
ec.register_connection(app, ccnd)
return app
-def add_collector(ec, trace_name, store_dir):
+def add_collector(ec, trace_name):
collector = ec.register_resource("Collector")
ec.set(collector, "traceName", trace_name)
- ec.set(collector, "storeDir", store_dir)
return collector
( pl_user, movie, exp_id, pl_ssh_key, results_dir ) = get_options()
- ec = ExperimentController(exp_id = exp_id)
+ ec = ExperimentController(exp_id = exp_id, local_dir = results_dir)
# hosts in the US
#host1 = "planetlab4.wail.wisc.edu"
app, ResourceState.STARTED, time = "10s")
# Register a collector to automatically collect traces
- collector = add_collector(ec, "stderr", results_dir)
+ collector = add_collector(ec, "stderr")
for ccnd in ccnds.values():
ec.register_connection(collector, ccnd)
ec = ExperimentController(exp_id)
# server
-node1 = ec.register_resource("LinuxNode")
+node1 = ec.register_resource("linux::Node")
ec.set(node1, "hostname", "wlab29.")
ec.set(node1, "username", "root")
ec.set(node1, "gatewayUser", "etourdi")
ec.set(node1, "gateway", "etourdi.pl.sophia.inria.fr")
-ec.set(node1, "cleanHome", True)
+ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
# client
-node2 = ec.register_resource("LinuxNode")
+node2 = ec.register_resource("linux::Node")
ec.set(node2, "hostname", "wlab5.")
ec.set(node2, "username", "root")
ec.set(node2, "gatewayUser", "etourdi")
ec.set(node2, "gateway", "etourdi.pl.sophia.inria.fr")
-ec.set(node2, "cleanHome", True)
+ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
-app1 = ec.register_resource("LinuxApplication")
+app1 = ec.register_resource("linux::Application")
video= "../big_buck_bunny_240p_mpeg4_lq.ts"
ec.set(app1, "sources", video)
command = "cat ${SRC}/big_buck_bunny_240p_mpeg4_lq.ts| nc wlab5. 1234"
ec.set(app1, "command", command)
ec.register_connection(app1, node1)
-app2 = ec.register_resource("LinuxApplication")
+app2 = ec.register_resource("linux::Application")
command = "nc -dl 1234 > big_buck_copied_movie.ts"
ec.set(app2, "command", command)
ec.register_connection(app2, node2)
import time
def add_node(ec, host, user):
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
def add_app(ec):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", "sleep 30; echo 'HOLA'")
return app
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
#
-# Example of how to run this experiment (replace with your credentials):
+# Example of how to run this experiment (replace with your information):
#
# $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATH:~/repos/nepi/src python examples/linux/vlc_streaming.py -u inria_nepi -i ~/.ssh/id_rsa_planetlab -a planetlab1.u-strasbg.fr -b planetlab1.utt.fr | vlc -
-
+# python examples/linux/vlc_streaming.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceState, ResourceAction
ec = ExperimentController(exp_id = "vlc_streamming")
## Register node 1
-node1 = ec.register_resource("LinuxNode")
+node1 = ec.register_resource("linux::Node")
# Set the hostname of the first node to use for the experiment
ec.set(node1, "hostname", hostname1)
# username should be your SSH user
ec.set(node1, "cleanProcesses", True)
## Register node 2
-node2 = ec.register_resource("LinuxNode")
+node2 = ec.register_resource("linux::Node")
# Set the hostname of the first node to use for the experiment
ec.set(node2, "hostname", hostname2)
# username should be your SSH user
"--sout '#rtp{dst=%s,port=5004,mux=ts}' vlc://quit") % \
(video, hostname2)
-server = ec.register_resource("LinuxApplication")
+server = ec.register_resource("linux::Application")
ec.set(server, "depends", "vlc")
ec.set(server, "files", local_path_to_video)
ec.set(server, "command", command)
"--sout '#std{access=file,mux=ts,dst=VIDEO}'") % \
(hostname2, video)
-client = ec.register_resource("LinuxApplication")
+client = ec.register_resource("linux::Application")
ec.set(client, "depends", "vlc")
ec.set(client, "command", command)
ec.register_connection(client, node2)
# Wait until the ccncat is finished
ec.wait_finished([server])
-print ec.trace(client, "VIDEO")
+video = ec.trace(client, "VIDEO")
+f = open("video.ts", "w")
+f.write(video)
+f.close()
ec.shutdown()
+print "Streamed VIDEO stored localy at video.ts"
ec = ExperimentController(exp_id = "ns3-csma-p2p-ping")
# Simulation will run in a remote machine
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
# Add a simulation resource
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "enableDump", True)
ec.register_connection(simu, node)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+ec = ExperimentController(exp_id = "ns3-local-csma-ping")
+
+# Simulation will executed in the local machine
+node = ec.register_resource("linux::Node")
+ec.set(node, "hostname", "localhost")
+
+# Add a simulation resource
+simu = ec.register_resource("linux::ns3::Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+## Add a ns-3 node with its protocol stack
+nsnode1 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode1, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode1, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode1, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode1, icmp)
+
+# Add a csma net device to the node
+dev1 = ec.register_resource("ns3::CsmaNetDevice")
+ec.set(dev1, "ip", "10.0.0.1")
+ec.set(dev1, "prefix", "30")
+ec.register_connection(nsnode1, dev1)
+queue1 = ec.register_resource("ns3::DropTailQueue")
+ec.register_connection(dev1, queue1)
+
+## Add another ns-3 node with its protocol stack
+nsnode2 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode2, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode2, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode2, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode2, icmp)
+
+# Add a csma net device to the node
+dev2 = ec.register_resource("ns3::CsmaNetDevice")
+ec.set(dev2, "ip", "10.0.0.2")
+ec.set(dev2, "prefix", "30")
+ec.register_connection(nsnode2, dev2)
+queue2 = ec.register_resource("ns3::DropTailQueue")
+ec.register_connection(dev2, queue2)
+
+# Add a csma channel
+chan = ec.register_resource("ns3::CsmaChannel")
+ec.set(chan, "Delay", "0s")
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
+
+### create pinger
+ping = ec.register_resource("ns3::V4Ping")
+ec.set (ping, "Remote", "10.0.0.2")
+ec.set (ping, "Interval", "1s")
+ec.set (ping, "Verbose", True)
+ec.set (ping, "StartTime", "0s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(simu, "stdout")
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
ec = ExperimentController(exp_id = "ns3-wifi-ping")
# Simulation will run in a remote machine
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
# Add a simulation resource
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "enableDump", True)
-ec.set (simu, "stopTime", "22s")
+ec.set (simu, "StopTime", "22s")
ec.register_connection(simu, node)
x = 30
from nepi.execution.ec import ExperimentController
-ec = ExperimentController(exp_id = "ns3-local-ping")
+ec = ExperimentController(exp_id = "ns3-local-p2p-ping")
# Simulation will executed in the local machine
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
# Add a simulation resource
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+ec = ExperimentController(exp_id = "ns3-local-wifi-ping")
+
+# Simulation will executed in the local machine
+node = ec.register_resource("linux::Node")
+ec.set(node, "hostname", "localhost")
+
+# Add a simulation resource
+simu = ec.register_resource("linux::ns3::Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+## Add a ns-3 node with its protocol stack
+nsnode1 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode1, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode1, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode1, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode1, icmp)
+
+# Adding constant mobility to the ns-3 node
+mobility1 = ec.register_resource("ns3::ConstantPositionMobilityModel")
+position1 = "%d:%d:%d" % (0, 0, 0)
+ec.set(mobility1, "Position", position1)
+ec.register_connection(nsnode1, mobility1)
+
+# Add a wifi access point net device to the node
+dev1 = ec.register_resource("ns3::WifiNetDevice")
+ec.set(dev1, "ip", "10.0.0.1")
+ec.set(dev1, "prefix", "30")
+ec.register_connection(nsnode1, dev1)
+
+phy1 = ec.register_resource("ns3::YansWifiPhy")
+ec.set(phy1, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev1, phy1)
+
+error1 = ec.register_resource("ns3::NistErrorRateModel")
+ec.register_connection(phy1, error1)
+
+manager1 = ec.register_resource("ns3::ArfWifiManager")
+ec.register_connection(dev1, manager1)
+
+# Mark the node as a wireless access point
+mac1 = ec.register_resource("ns3::ApWifiMac")
+ec.set(mac1, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev1, mac1)
+
+## Add another ns-3 node with its protocol stack
+nsnode2 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode2, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode2, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode2, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode2, icmp)
+
+# Adding constant mobility to the ns-3 node
+mobility2 = ec.register_resource("ns3::ConstantPositionMobilityModel")
+position2 = "%d:%d:%d" % (50, 50, 0)
+ec.set(mobility2, "Position", position2)
+ec.register_connection(nsnode2, mobility2)
+
+# Add a wifi station net device to the node
+dev2 = ec.register_resource("ns3::WifiNetDevice")
+ec.set(dev2, "ip", "10.0.0.2")
+ec.set(dev2, "prefix", "30")
+ec.register_connection(nsnode2, dev2)
+
+phy2 = ec.register_resource("ns3::YansWifiPhy")
+ec.set(phy2, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev2, phy2)
+
+error2 = ec.register_resource("ns3::NistErrorRateModel")
+ec.register_connection(phy2, error2)
+
+manager2 = ec.register_resource("ns3::ArfWifiManager")
+ec.register_connection(dev2, manager2)
+
+# Mark the node as a wireless station
+mac2 = ec.register_resource("ns3::StaWifiMac")
+ec.set(mac2, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev2, mac2)
+
+# Add a wifi channel
+chan = ec.register_resource("ns3::YansWifiChannel")
+delay = ec.register_resource("ns3::ConstantSpeedPropagationDelayModel")
+ec.register_connection(chan, delay)
+loss = ec.register_resource("ns3::LogDistancePropagationLossModel")
+ec.register_connection(chan, loss)
+ec.register_connection(chan, phy1)
+ec.register_connection(chan, phy2)
+
+### create pinger
+ping = ec.register_resource("ns3::V4Ping")
+ec.set (ping, "Remote", "10.0.0.2")
+ec.set (ping, "Interval", "1s")
+ec.set (ping, "Verbose", True)
+ec.set (ping, "StartTime", "0s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(simu, "stdout")
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
from nepi.execution.ec import ExperimentController
-from optparse import OptionParser, SUPPRESS_HELP
+from optparse import OptionParser
usage = ("usage: %prog -H <hostanme> -u <username> -i <ssh-key>")
username = options.username
identity = options.ssh_key
-ec = ExperimentController(exp_id = "ns3-remote-ping")
+ec = ExperimentController(exp_id = "ns3-remote-p2p-ping")
# Simulation will run in a remote machine
-node = ec.register_resource("LinuxNode")
+node = ec.register_resource("linux::Node")
ec.set(node, "hostname", hostname)
ec.set(node, "username", username)
ec.set(node, "identity", identity)
ec.set(node, "cleanExperiment", True)
# Add a simulation resource
-simu = ec.register_resource("LinuxNS3Simulation")
+simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
--- /dev/null
+# Generated by NetlabClient
+
+set ns [new Simulator]
+source tb_compat.tcl
+
+# Nodes
+set node0 [$ns node]
+tb-set-node-os $node0 NepiVlcOMF6Baseline
+set node1 [$ns node]
+tb-set-node-os $node1 NepiVlcOMF6Baseline
+
+$ns rtproto Static
+$ns run
+
+# NetlabClient generated file ends here.
+# Finished at: 6/19/14 5:38 PM
+
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Julien Tribino <julien.tribino@inria.fr>
+#
+###############################################################################
+
+# Topology
+#
+#
+# Testbed : iMinds
+#
+# Node
+# node0ZZ
+# 0
+# |
+# |
+# 0
+# Node
+# node0ZZ
+# PING
+#
+# - Experiment:
+# - t0 : Deployment
+# - t1 : Ping Start
+# - t2 (t1 + 10s) : Ping stop
+# - t3 (t2 + 2s) : Kill the application
+#
+
+from nepi.execution.resource import ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -x <nodex> -z <nodez> -s <slice-name> -c <channel>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-x", "--nodex", dest="nodex",
+ help="w-iLab.t first reserved node "
+ "(must be of form: "
+ " nodex.<experiment_id>.<project_id>.wilab2.ilabt.iminds.be"
+ " all letters in lowercase )",
+ type="str")
+parser.add_option("-z", "--nodez", dest="nodez",
+ help="w-iLab.t first reserved node "
+ "(must be of form: "
+ " nodex.<experiment_id>.<project_id>.wilab2.ilabt.iminds.be"
+ " all letters in lowercase )",
+ type="str")
+parser.add_option("-s", "--slice-name", dest="slicename",
+ help="Nitos slice name", type="str")
+(options, args) = parser.parse_args()
+
+nodex = options.nodex
+nodez = options.nodez
+slicename = options.slicename
+
+# Create the EC
+ec = ExperimentController(exp_id="iminds_omf6_ping")
+
+# Create and Configure the Nodes
+
+node1 = ec.register_resource("omf::Node")
+ec.set(node1, "hostname", nodex)
+ec.set(node1, "xmppUser", slicename)
+ec.set(node1, "xmppServer", "xmpp.ilabt.iminds.be")
+ec.set(node1, "xmppPort", "5222")
+ec.set(node1, "xmppPassword", "1234")
+
+iface1 = ec.register_resource("omf::WifiInterface")
+ec.set(iface1, "name", "wlan0")
+ec.set(iface1, "mode", "adhoc")
+ec.set(iface1, "hw_mode", "g")
+ec.set(iface1, "essid", "ping")
+ec.set(iface1, "ip", "192.168.0.1/24")
+ec.register_connection(iface1, node1)
+
+node2 = ec.register_resource("omf::Node")
+ec.set(node2, "hostname", nodez)
+ec.set(node2, "xmppUser", slicename)
+ec.set(node2, "xmppServer", "xmpp.ilabt.iminds.be")
+ec.set(node2, "xmppPort", "5222")
+ec.set(node2, "xmppPassword", "1234")
+
+iface2 = ec.register_resource("omf::WifiInterface")
+ec.set(iface2, "name", "wlan0")
+ec.set(iface2, "mode", "adhoc")
+ec.set(iface2, "hw_mode", "g")
+ec.set(iface2, "essid", "ping")
+ec.set(iface2, "ip", "192.168.0.2/24")
+ec.register_connection(iface2, node2)
+
+channel = ec.register_resource("omf::Channel")
+ec.set(channel, "channel", "6")
+ec.register_connection(iface1, channel)
+ec.register_connection(iface2, channel)
+
+# Create and Configure the Application
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, "command", "ping -c3 192.168.0.2")
+ec.register_connection(app1, node1)
+
+## Make sure the ping stops after 30 seconds
+ec.register_condition(app1, ResourceAction.STOP, app1,
+ ResourceState.STARTED , "30s")
+
+# Deploy
+ec.deploy()
+
+# Wait until the VLC client is finished
+ec.wait_finished([app1])
+
+# Retrieve the output of the ping command
+ping_output = ec.trace(app1, "stdout")
+print "\n PING OUTPUT\n", ping_output, "\n"
+
+# Stop Experiment
+ec.shutdown()
+
--- /dev/null
+# Generated by NetlabClient
+
+set ns [new Simulator]
+source tb_compat.tcl
+
+# Nodes
+set node0 [$ns node]
+tb-set-node-os $node0 NepiVlcOMF6Baseline
+set node1 [$ns node]
+tb-set-node-os $node1 NepiVlcOMF6Baseline
+
+$ns rtproto Static
+$ns run
+
+# NetlabClient generated file ends here.
+# Finished at: 6/19/14 5:38 PM
+
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Julien Tribino <julien.tribino@inria.fr>
+#
+###############################################################################
+
+# Topology
+#
+#
+# Testbed : iMinds
+#
+# Node
+# node0ZZ
+# 0
+# |
+# |
+# 0
+# Node
+# node0ZZ
+# PING
+#
+#
+# - Experiment:
+# - t0 : Deployment
+# - t1 : Ping Start
+# - t2 (t1 + 10s) : Ping stop
+# - t3 (t2 + 2s) : Kill the application
+#
+
+from nepi.execution.resource import ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -x <nodex> -z <nodez> -s <slice-name> -c <channel>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-x", "--nodex", dest="nodex",
+ help="w-iLab.t first reserved node "
+ "(must be of form: "
+ " nodex.<experiment_id>.<project_id>.wilab2.ilabt.iminds.be"
+ " all letters in lowercase )",
+ type="str")
+parser.add_option("-z", "--nodez", dest="nodez",
+ help="w-iLab.t first reserved node "
+ "(must be of form: "
+ " nodex.<experiment_id>.<project_id>.wilab2.ilabt.iminds.be"
+ " all letters in lowercase )",
+ type="str")
+parser.add_option("-c", "--channel", dest="channel",
+ help="Nitos reserved channel",
+ type="str")
+parser.add_option("-s", "--slice-name", dest="slicename",
+ help="Nitos slice name", type="str")
+(options, args) = parser.parse_args()
+
+nodex = options.nodex
+nodez = options.nodez
+slicename = options.slicename
+chan = options.channel
+
+# Create the EC
+ec = ExperimentController(exp_id="iminds_omf6_ping")
+
+# Create and Configure the Nodes
+
+node1 = ec.register_resource("omf::Node")
+ec.set(node1, "hostname", nodex)
+ec.set(node1, "xmppUser", slicename)
+ec.set(node1, "xmppServer", "xmpp.ilabt.iminds.be")
+ec.set(node1, "xmppPort", "5222")
+ec.set(node1, "xmppPassword", "1234")
+
+iface1 = ec.register_resource("omf::WifiInterface")
+ec.set(iface1, "name", "wlan0")
+ec.set(iface1, "mode", "adhoc")
+ec.set(iface1, "hw_mode", "g")
+ec.set(iface1, "essid", "vlc")
+ec.set(iface1, "ip", "192.168.0.1/24")
+ec.register_connection(iface1, node1)
+
+node2 = ec.register_resource("omf::Node")
+ec.set(node2, "hostname", nodez)
+ec.set(node2, "xmppUser", slicename)
+ec.set(node2, "xmppServer", "xmpp.ilabt.iminds.be")
+ec.set(node2, "xmppPort", "5222")
+ec.set(node2, "xmppPassword", "1234")
+
+iface2 = ec.register_resource("omf::WifiInterface")
+ec.set(iface2, "name", "wlan0")
+ec.set(iface2, "mode", "adhoc")
+ec.set(iface2, "hw_mode", "g")
+ec.set(iface2, "essid", "vlc")
+ec.set(iface2, "ip", "192.168.0.2/24")
+ec.register_connection(iface2, node2)
+
+channel = ec.register_resource("omf::Channel")
+ec.set(channel, "channel", "6")
+ec.register_connection(iface1, channel)
+ec.register_connection(iface2, channel)
+
+client_ip = "192.168.0.2"
+
+# Create and Configure the Application
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, "command",
+ "/root/vlc/vlc-1.1.13/cvlc /root/10-by-p0d.avi --sout '#rtp{dst=%s,port=5004,mux=ts}'" % client_ip)
+ec.register_connection(app1, node1)
+
+## Add a OMFApplication to run the client VLC
+app2 = ec.register_resource("omf::Application")
+## Send the transmitted video to a file.
+ec.set(app2, "command", "/root/vlc/vlc-1.1.13/cvlc rtp://%s:5004 --sout '#standard{access=file,mux=ts,dst=/root/video.ts}'" % client_ip)
+ec.register_connection(app2, node2)
+
+## Add a OMFApplication to count the number of bytes in the transmitted video
+app3 = ec.register_resource("omf::Application")
+## Send the transmitted video to a file.
+ec.set(app3, "command", "ls -lah /root/video.ts")
+ec.register_connection(app3, node2)
+
+app4 = ec.register_resource("omf::Application")
+ec.set(app4, "command", "/usr/bin/killall vlc_app")
+ec.register_connection(app4, node1)
+
+app5 = ec.register_resource("omf::Application")
+ec.set(app5, "command", "/usr/bin/killall vlc_app")
+ec.register_connection(app5, node2)
+
+## start app2 5s after app1
+ec.register_condition(app2, ResourceAction.START, app1, ResourceState.STARTED , "5s")
+# start app3 after app2 stopped
+ec.register_condition(app3, ResourceAction.START, app2, ResourceState.STOPPED , "5s")
+# start the kill of vlc processes after they stopped
+ec.register_condition(app4, ResourceAction.START, app1, ResourceState.STOPPED , "5s")
+ec.register_condition(app5, ResourceAction.START, app2, ResourceState.STOPPED , "5s")
+
+## We need to explicitly STOP all applications
+## stop app1 65s after it started
+ec.register_condition(app1, ResourceAction.STOP, app1, ResourceState.STARTED , "65s")
+## stop app2 5 seconds after app2
+ec.register_condition(app2, ResourceAction.STOP, app1, ResourceState.STOPPED , "5s")
+# stop app3 after 5s
+ec.register_condition(app3, ResourceAction.STOP, app3, ResourceState.STOPPED , "5s")
+# stop app4
+ec.register_condition(app4, ResourceAction.STOP, app4, ResourceState.STARTED , "5s")
+# stop app5
+ec.register_condition(app5, ResourceAction.STOP, app5, ResourceState.STARTED , "5s")
+
+# Deploy
+ec.deploy()
+
+# DO NOT WAIT FOR THE VLC applications or it will never stop
+ec.wait_finished([app4, app5])
+
+# Retrieve the bytes transmitted output and print it
+byte_count = ec.trace(app3, "stdout")
+print "BYTES transmitted", byte_count
+
+# Stop Experiment
+ec.shutdown()
+
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Julien Tribino <julien.tribino@inria.fr>
+#
+###############################################################################
+
+# Topology
+#
+#
+# Testbed : Nitos
+#
+# Node
+# node0ZZ
+# 0
+# |
+# |
+# 0
+# Node
+# node0ZZ
+# PING
+#
+#
+# - Experiment:
+# - t0 : Deployment
+# - t1 : Ping Start
+# - t2 (t1 + 10s) : Ping stop
+# - t3 (t2 + 2s) : Kill the application
+#
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -x <nodex> -z <nodez> -s <slice-name> -c <channel>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-x", "--nodex", dest="nodex",
+ help="Nitos first reserved node "
+ "(e.g. hostname must be of form: node0XX)",
+ type="str")
+parser.add_option("-z", "--nodez", dest="nodez",
+ help="Nitos second reserved node "
+ "(e.g. hostname must be of form: node0ZZ)",
+ type="str")
+parser.add_option("-c", "--channel", dest="channel",
+ help="Nitos reserved channel",
+ type="str")
+parser.add_option("-s", "--slice-name", dest="slicename",
+ help="Nitos slice name", type="str")
+(options, args) = parser.parse_args()
+
+nodex = options.nodex
+nodez = options.nodez
+slicename = options.slicename
+chan = options.channel
+
+# Create the EC
+ec = ExperimentController(exp_id="nitos_omf6_ping")
+
+# Create and Configure the Nodes
+node1 = ec.register_resource("omf::Node")
+ec.set(node1, "hostname", nodex)
+ec.set(node1, "xmppUser", slicename)
+ec.set(node1, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(node1, "xmppPort", "5222")
+ec.set(node1, "xmppPassword", "1234")
+
+# Create and Configure the Interfaces
+iface1 = ec.register_resource("omf::WifiInterface")
+ec.set(iface1, "name", "wlan0")
+ec.set(iface1, "mode", "adhoc")
+ec.set(iface1, "hw_mode", "g")
+ec.set(iface1, "essid", "ping")
+ec.set(iface1, "ip", "192.168.0.%s/24" % nodex[-2:])
+ec.register_connection(node1, iface1)
+
+# Create and Configure the Nodes
+node2 = ec.register_resource("omf::Node")
+ec.set(node2, "hostname", nodez)
+ec.set(node2, "xmppUser", slicename)
+ec.set(node2, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(node2, "xmppPort", "5222")
+ec.set(node2, "xmppPassword", "1234")
+
+# Create and Configure the Interfaces
+iface2 = ec.register_resource("omf::WifiInterface")
+ec.set(iface2, "name", "wlan0")
+ec.set(iface2, "mode", "adhoc")
+ec.set(iface2, "hw_mode", "g")
+ec.set(iface2, "essid", "ping")
+ec.set(iface2, "ip", "192.168.0.%s/24" % nodez[-2:])
+ec.register_connection(node2, iface2)
+
+# Create and Configure the Channel
+channel = ec.register_resource("omf::Channel")
+ec.set(channel, "channel", chan)
+ec.set(channel, "xmppUser", slicename)
+ec.set(channel, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(channel, "xmppPort", "5222")
+ec.set(channel, "xmppPassword", "1234")
+ec.register_connection(iface1, channel)
+ec.register_connection(iface2, channel)
+
+# Create and Configure the PING Application
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, "appid", "Ping#1")
+ec.set(app1, "command", "/bin/ping -c3 192.168.0.%s" % nodez[-2:])
+ec.register_connection(app1, node1)
+
+## Make sure the ping stops after 30 seconds
+ec.register_condition(app1, ResourceAction.STOP, app1,
+ ResourceState.STARTED , "30s")
+
+# Deploy
+ec.deploy()
+
+ec.wait_finished([app1])
+
+# Retrieve the output of the ping command
+ping_output = ec.trace(app1, "stdout")
+print "\n PING OUTPUT\n", ping_output, "\n"
+
+# Stop Experiment
+ec.shutdown()
+
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Julien Tribino <julien.tribino@inria.fr>
+#
+###############################################################################
+
+# Topology
+#
+#
+# Testbed : Nitos
+#
+# Node
+# node0XX
+# VLC client
+# 0
+# |
+# |
+# 0
+# Node
+# node0ZZ
+# VLC server
+#
+#
+# - Experiment:
+# - t0 : Deployment
+# - t1 : Ping Start
+# - t2 (t1 + 10s) : Ping stop
+# - t3 (t2 + 2s) : Kill the application
+#
+
+from nepi.execution.resource import ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -x <nodex> -z <nodez> -s <slice-name> -c <channel>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-x", "--nodex", dest="nodex",
+ help="Nitos first reserved node "
+ "(e.g. hostname must be of form: node0XX)",
+ type="str")
+parser.add_option("-z", "--nodez", dest="nodez",
+ help="Nitos second reserved node "
+ "(e.g. hostname must be of form: node0ZZ)",
+ type="str")
+parser.add_option("-c", "--channel", dest="channel",
+ help="Nitos reserved channel",
+ type="str")
+parser.add_option("-s", "--slice-name", dest="slicename",
+ help="Nitos slice name", type="str")
+(options, args) = parser.parse_args()
+
+nodex = options.nodex
+nodez = options.nodez
+slicename = options.slicename
+chan = options.channel
+
+# Create the EC
+ec = ExperimentController(exp_id="nitos_omf6_vlc")
+
+# Create and Configure the Nodes
+node1 = ec.register_resource("omf::Node")
+ec.set(node1, "hostname", nodex)
+ec.set(node1, "xmppUser", slicename)
+ec.set(node1, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(node1, "xmppPort", "5222")
+ec.set(node1, "xmppPassword", "1234")
+
+# Create and Configure the Interfaces
+iface1 = ec.register_resource("omf::WifiInterface")
+ec.set(iface1, "name", "wlan0")
+ec.set(iface1, "mode", "adhoc")
+ec.set(iface1, "hw_mode", "g")
+ec.set(iface1, "essid", "vlc")
+ec.set(iface1, "ip", "192.168.0.%s/24" % nodex[-2:])
+ec.register_connection(node1, iface1)
+
+# Create and Configure the Nodes
+node2 = ec.register_resource("omf::Node")
+ec.set(node2, "hostname", nodez)
+ec.set(node2, "xmppUser", slicename)
+ec.set(node2, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(node2, "xmppPort", "5222")
+ec.set(node2, "xmppPassword", "1234")
+
+# Create and Configure the Interfaces
+iface2 = ec.register_resource("omf::WifiInterface")
+ec.set(iface2, "name", "wlan0")
+ec.set(iface2, "mode", "adhoc")
+ec.set(iface2, "hw_mode", "g")
+ec.set(iface2, "essid", "vlc")
+ec.set(iface2, "ip", "192.168.0.%s/24" % nodez[-2:])
+ec.register_connection(node2, iface2)
+
+# Create and Configure the Channel
+channel = ec.register_resource("omf::Channel")
+ec.set(channel, "channel", chan)
+ec.set(channel, "xmppUser", slicename)
+ec.set(channel, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(channel, "xmppPort", "5222")
+ec.set(channel, "xmppPassword", "1234")
+ec.register_connection(iface1, channel)
+ec.register_connection(iface2, channel)
+
+client_ip = "192.168.0.%s" % nodez[-2:]
+
+# Create and Configure the Application
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, "command",
+ "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc /root/10-by-p0d.avi --sout '#rtp{dst=%s,port=5004,mux=ts}'" % client_ip)
+ec.register_connection(app1, node1)
+
+## Add a OMFApplication to run the client VLC and count the numer of bytes
+## transmitted, using wc.
+app2 = ec.register_resource("omf::Application")
+ec.set(app2, "command",
+ "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc rtp://%s:5004 | wc -c "% client_ip)
+
+## Alternativelly, you can try to send the video to standard output and
+## recover it using the stdout trace. However, it seems that sending
+## binary messages back to the client is not well supported by the OMF 6 RC
+#ec.set(app2, "command", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc rtp://%s:5004 --sout '#standard{access=file,mux=ts,dst=-}'" % client_ip)
+ec.register_connection(app2, node2)
+
+## stop app1 65s after it started
+ec.register_condition(app1, ResourceAction.STOP, app1, ResourceState.STARTED , "65s")
+## start app2 5s after app1
+ec.register_condition(app2, ResourceAction.START, app1, ResourceState.STARTED , "5s")
+## stop app2 5 seconds after app2
+ec.register_condition(app2, ResourceAction.STOP, app1, ResourceState.STOPPED , "5s")
+
+# Deploy
+ec.deploy()
+
+ec.wait_finished([app2])
+
+# Retrieve the bytes transmitted count and print it
+byte_count = ec.trace(app2, "stdout")
+print "BYTES transmitted", byte_count
+
+## If you redirected the video to standard output, you can try to
+## retrieve the stdout of the VLC client
+## video = ec.trace(app2, "stdout")
+#f = open("video.ts", "w")
+#f.write(video)
+#f.close()
+
+# Stop Experiment
+ec.shutdown()
+
ec = ExperimentController()
# Create and Configure the Node
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
# If the hostname is not declared, Nepi will take SFA to provision one.
ec.set(node1, 'hostname', 'node0.nepi-robot.nepi.wilab2.ilabt.iminds.be')
# XMPP credentials
ec.set(node1, 'version', "5")
# Create and Configure the Application
-app1 = ec.register_resource("OMFRobotApplication")
+app1 = ec.register_resource("omf::RobotApplication")
ec.set(app1, 'appid', "robot")
ec.set(app1, 'version', "5")
ec.set(app1, 'command', "/users/jtribino/RobotCTRLComm.rb /users/jtribino/coordinate.csv")
### Define OMF Method to simplify definition of resources ###
def add_node(ec, hostname, xmppServer, xmppUser, xmppPort = "5222", xmppPassword = "1234"):
- node = ec.register_resource("OMFNode")
+ node = ec.register_resource("omf::Node")
ec.set(node, 'hostname', hostname)
ec.set(node, 'xmppServer', xmppServer)
ec.set(node, 'xmppUser', xmppUser)
def add_interface(ec, ip, xmppServer, xmppUser, essid = "ccn", name = "wlan0", mode = "adhoc",
typ = "g", xmppPort = "5222", xmppPassword = "1234"):
- iface = ec.register_resource("OMFWifiInterface")
+ iface = ec.register_resource("omf::WifiInterface")
ec.set(iface, 'name', name)
ec.set(iface, 'mode', mode)
ec.set(iface, 'hw_mode', typ)
return iface
def add_channel(ec, channel, xmppServer, xmppUser, xmppPort = "5222", xmppPassword = "1234"):
- chan = ec.register_resource("OMFChannel")
+ chan = ec.register_resource("omf::Channel")
ec.set(chan, 'channel', channel)
ec.set(chan, 'xmppServer', xmppServer)
ec.set(chan, 'xmppUser', xmppUser)
def add_app(ec, appid, command, env, xmppServer, xmppUser,
xmppPort = "5222", xmppPassword = "1234"):
- app = ec.register_resource("OMFApplication")
+ app = ec.register_resource("omf::Application")
ec.set(app, 'appid', appid)
ec.set(app, 'command', command)
ec.set(app, 'env', env)
### Define OMF Method to simplify definition of resources ###
def add_node(ec, hostname, xmppServer, xmppUser, xmppPort = "5222", xmppPassword = "1234"):
- node = ec.register_resource("OMFNode")
+ node = ec.register_resource("omf::Node")
ec.set(node, 'hostname', hostname)
ec.set(node, 'xmppServer', xmppServer)
ec.set(node, 'xmppUser', xmppUser)
def add_interface(ec, ip, xmppServer, xmppUser, essid = "ccn", name = "wlan0", mode = "adhoc",
typ = "g", xmppPort = "5222", xmppPassword = "1234"):
- iface = ec.register_resource("OMFWifiInterface")
+ iface = ec.register_resource("omf::WifiInterface")
ec.set(iface, 'name', name)
ec.set(iface, 'mode', mode)
ec.set(iface, 'hw_mode', typ)
return iface
def add_channel(ec, channel, xmppServer, xmppUser, xmppPort = "5222", xmppPassword = "1234"):
- chan = ec.register_resource("OMFChannel")
+ chan = ec.register_resource("omf::Channel")
ec.set(chan, 'channel', channel)
ec.set(chan, 'xmppServer', xmppServer)
ec.set(chan, 'xmppUser', xmppUser)
def add_app(ec, host, appid, command, env, xmppServer, xmppUser,
xmppPort = "5222", xmppPassword = "1234"):
- app = ec.register_resource("OMFApplication")
+ app = ec.register_resource("omf::Application")
ec.set(app, 'appid', appid)
ec.set(app, 'command', command)
ec.set(app, 'env', env)
ec = ExperimentController()
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'omf.nitos.node0XX')
ec.set(node1, 'xmppServer', "ZZZ")
ec.set(node1, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(node1, 'xmppPassword', "1234")
ec.set(node1, 'version', "5")
-node2 = ec.register_resource("OMFNode")
+node2 = ec.register_resource("omf::Node")
ec.set(node2, 'hostname', "omf.nitos.node0YY")
ec.set(node2, 'xmppServer', "ZZZ")
ec.set(node2, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(node2, 'version', "5")
# Create and Configure the Interfaces
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', "wlan0")
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'ip', "192.168.0.XX/24")
ec.set(iface1, 'version', "5")
-iface2 = ec.register_resource("OMFWifiInterface")
+iface2 = ec.register_resource("omf::WifiInterface")
ec.set(iface2, 'name', "wlan0")
ec.set(iface2, 'mode', "adhoc")
ec.set(iface2, 'hw_mode', 'g')
ec.set(iface2, 'version', "5")
# Create and Configure the Channel
-channel = ec.register_resource("OMFChannel")
+channel = ec.register_resource("omf::Channel")
ec.set(channel, 'channel', "6")
ec.set(channel, 'xmppServer', "ZZZ")
ec.set(channel, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(channel, 'version', "5")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'appid', 'Vlc#1')
ec.set(app1, 'command', "/root/vlc/vlc-1.1.13/cvlc /root/10-by-p0d.avi --sout '#rtp{dst=192.168.0.YY,port=1234,mux=ts}'")
ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app1, 'version', "5")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'appid', 'Vlc#2')
ec.set(app2, 'command', "/root/vlc/vlc-1.1.13/cvlc rtp://192.168.0.YY:1234")
ec.set(app2, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app2, 'version', "5")
-app3 = ec.register_resource("OMFApplication")
+app3 = ec.register_resource("omf::Application")
ec.set(app3, 'appid', 'Kill#2')
ec.set(app3, 'command', "/usr/bin/killall vlc_app")
ec.set(app3, 'env', " ")
ec.set(app3, 'version', "5")
-app4 = ec.register_resource("OMFApplication")
+app4 = ec.register_resource("omf::Application")
ec.set(app4, 'appid', 'Kill#1')
ec.set(app4, 'command', "/usr/bin/killall vlc_app")
ec.set(app4, 'env', " ")
ec = ExperimentController()
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'omf.nitos.node025')
ec.set(node1, 'xmppServer', "tribino")
ec.set(node1, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(node1, 'xmppPassword', "1234")
ec.set(node1, 'version', "5")
-node2 = ec.register_resource("OMFNode")
+node2 = ec.register_resource("omf::Node")
ec.set(node2, 'hostname', "omf.nitos.node028")
ec.set(node2, 'xmppServer', "tribino")
ec.set(node2, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(node2, 'version', "5")
# Create and Configure the Interfaces
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', "wlan0")
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'ip', "192.168.0.25/24")
ec.set(iface1, 'version', "5")
-iface2 = ec.register_resource("OMFWifiInterface")
+iface2 = ec.register_resource("omf::WifiInterface")
ec.set(iface2, 'name', "wlan0")
ec.set(iface2, 'mode', "adhoc")
ec.set(iface2, 'hw_mode', 'g')
ec.set(iface2, 'version', "5")
# Create and Configure the Channel
-channel = ec.register_resource("OMFChannel")
+channel = ec.register_resource("omf::Channel")
ec.set(channel, 'channel', "6")
ec.set(channel, 'xmppServer', "tribino")
ec.set(channel, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(channel, 'version', "5")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'appid', 'Vlc#1')
ec.set(app1, 'command', "/root/vlc/vlc-1.1.13/cvlc /root/10-by-p0d.avi --sout '#rtp{dst=192.168.0.28,port=1234,mux=ts}'")
ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app1, 'version', "5")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'appid', 'Vlc#2')
ec.set(app2, 'command', "/root/vlc/vlc-1.1.13/cvlc rtp://192.168.0.28:1234")
ec.set(app2, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app2, 'version', "5")
-app3 = ec.register_resource("OMFApplication")
+app3 = ec.register_resource("omf::Application")
ec.set(app3, 'appid', 'Kill#2')
ec.set(app3, 'command', "/usr/bin/killall vlc_app")
ec.set(app3, 'env', " ")
ec.set(app3, 'version', "5")
-app4 = ec.register_resource("OMFApplication")
+app4 = ec.register_resource("omf::Application")
ec.set(app4, 'appid', 'Kill#1')
ec.set(app4, 'command', "/usr/bin/killall vlc_app")
ec.set(app4, 'env', " ")
-"""
- NEPI, a framework to manage network experiments
- Copyright (C) 2013 INRIA
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
- Author: Alina Quereilhac <alina.quereilhac@inria.fr>
- Julien Tribino <julien.tribino@inria.fr>
-
- Example :
- - Testbed : Nitos
- - Explanation :
-
- VLC Streaming on VLC
-
- Node
- omf.nitos.node0xx
- 0
- |
- |
- 0
- xEyes
-
- - Experiment:
- - t0 : Deployment
- - t1 : xEeyes Start
- - t2 (t1 + 10s) : xEyes stop
- - t3 (t2 + 2s) : Kill the application
-"""
-
#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Julien Tribino <julien.tribino@inria.fr>
+
+# Topology
+#
+#
+# Testbed : Nitos
+#
+# Node
+# omf.nitos.node0xx
+# 0
+# |
+# |
+# 0
+# xEyes
+#
+# - Experiment:
+# - t0 : Deployment
+# - t1 : xEeyes Start
+# - t2 (t1 + 10s) : xEyes stop
+# - t3 (t2 + 2s) : Kill the application
+#
+#
+
from nepi.execution.resource import ResourceFactory, ResourceAction, ResourceState
from nepi.execution.ec import ExperimentController
ec = ExperimentController()
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'omf.nitos.node0XX')
-ec.set(node1, 'xmppServer', "ZZZ")
-ec.set(node1, 'xmppUser', "nitlab.inf.uth.gr")
+ec.set(node1, 'xmppServer', "nitlab.inf.uth.gr")
+ec.set(node1, 'xmppUser', "<YOUR-SLICE>")
ec.set(node1, 'xmppPort', "5222")
ec.set(node1, 'xmppPassword', "1234")
ec.set(node1, 'version', "5")
# Create and Configure the Interfaces
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', "wlan0")
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'version', "5")
# Create and Configure the Channel
-channel = ec.register_resource("OMFChannel")
+channel = ec.register_resource("omf::Channel")
ec.set(channel, 'channel', "6")
ec.set(channel, 'xmppServer', "ZZZ")
ec.set(channel, 'xmppUser', "nitlab.inf.uth.gr")
ec.set(channel, 'version', "5")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'appid', 'XEyes#1')
ec.set(app1, 'command', "/usr/bin/xeyes")
ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app1, 'version', "5")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'appid', 'Kill#1')
ec.set(app2, 'path', "/usr/bin/kill")
ec.set(app2, 'args', "xeyes")
### Define OMF Method to simplify definition of resources ###
def add_node(ec, hostname, xmppServer, xmppUser, xmppPort = "5222", xmppPassword = "1234"):
- node = ec.register_resource("OMFNode")
+ node = ec.register_resource("omf::Node")
ec.set(node, 'hostname', hostname)
ec.set(node, 'xmppServer', xmppServer)
ec.set(node, 'xmppUser', xmppUser)
def add_interface(ec, ip, xmppServer, xmppUser, essid = "ccn", name = "wlan0", mode = "adhoc",
typ = "g", xmppPort = "5222", xmppPassword = "1234"):
- iface = ec.register_resource("OMFWifiInterface")
+ iface = ec.register_resource("omf::WifiInterface")
ec.set(iface, 'name', name)
ec.set(iface, 'mode', mode)
ec.set(iface, 'hw_mode', typ)
return iface
def add_channel(ec, channel, xmppServer, xmppUser, xmppPort = "5222", xmppPassword = "1234"):
- chan = ec.register_resource("OMFChannel")
+ chan = ec.register_resource("omf::Channel")
ec.set(chan, 'channel', channel)
ec.set(chan, 'xmppServer', xmppServer)
ec.set(chan, 'xmppUser', xmppUser)
def add_app(ec, host, appid, command, env, xmppServer, xmppUser,
xmppPort = "5222", xmppPassword = "1234"):
- app = ec.register_resource("OMFApplication")
+ app = ec.register_resource("omf::Application")
ec.set(app, 'appid', appid)
ec.set(app, 'command', command)
ec.set(app, 'env', env)
ec = ExperimentController()
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'omf.plexus.wlab17')
ec.set(node1, 'xmppServer', "nepi")
ec.set(node1, 'xmppUser', "xmpp-plexus.onelab.eu")
ec.set(node1, 'version', "5")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'appid', "robot")
ec.set(app1, 'command', "/root/CTRL_test.rb coord.csv")
ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec = ExperimentController()
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'omf.plexus.wlab17')
ec.set(node1, 'xmppUser', "nepi")
ec.set(node1, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(node1, 'xmppPassword', "1234")
ec.set(node1, 'version', "5")
-node2 = ec.register_resource("OMFNode")
+node2 = ec.register_resource("omf::Node")
ec.set(node2, 'hostname', "omf.plexus.wlab37")
ec.set(node2, 'xmppUser', "nepi")
ec.set(node2, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(node2, 'version', "5")
# Create and Configure the Interfaces
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', "wlan0")
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'ip', "10.0.0.17")
ec.set(iface1, 'version', "5")
-iface2 = ec.register_resource("OMFWifiInterface")
+iface2 = ec.register_resource("omf::WifiInterface")
ec.set(iface2, 'name', "wlan0")
ec.set(iface2, 'mode', "adhoc")
ec.set(iface2, 'hw_mode', 'g')
ec.set(iface2, 'version', "5")
# Create and Configure the Channel
-channel = ec.register_resource("OMFChannel")
+channel = ec.register_resource("omf::Channel")
ec.set(channel, 'channel', "6")
ec.set(channel, 'xmppUser', "nepi")
ec.set(channel, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(channel, 'version', "5")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'appid', 'Vlc#1')
ec.set(app1, 'command', "/opt/vlc-1.1.13/cvlc --quiet /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
#ec.set(app1, 'command', "/opt/vlc-1.1.13/cvlc --quiet /opt/big_buck_bunny_240p_mpeg4.ts --sout '#rtp{dst=10.0.0.XX,port=1234,mux=ts} '")
ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app1, 'version', "5")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'appid', 'Vlc#2')
ec.set(app2, 'command', "/opt/vlc-1.1.13/cvlc --quiet rtp://10.0.0.37:1234")
ec.set(app2, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(app2, 'version', "5")
-app3 = ec.register_resource("OMFApplication")
+app3 = ec.register_resource("omf::Application")
ec.set(app3, 'appid', 'Kill#1')
ec.set(app3, 'command', "/usr/bin/killall vlc")
ec.set(app3, 'env', " ")
ec.set(app3, 'version', "5")
-app4 = ec.register_resource("OMFApplication")
+app4 = ec.register_resource("omf::Application")
ec.set(app4, 'appid', 'Kill#2')
ec.set(app4, 'command', "/usr/bin/killall vlc")
ec.set(app4, 'env', " ")
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'servernode.nepivlcexperiment.nepi.wilab2.ilabt.iminds.be')
ec.set(node1, 'xmppServer', "xmpp.ilabt.iminds.be")
ec.set(node1, 'xmppUser', "nepi")
ec.set(node1, 'xmppPort', "5222")
ec.set(node1, 'xmppPassword', "1234")
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', 'wlan0')
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'essid', "vlc")
ec.set(iface1, 'ip', "192.168.0.1/24")
-node2 = ec.register_resource("OMFNode")
+node2 = ec.register_resource("omf::Node")
ec.set(node2, 'hostname', 'client1node.nepivlcexperiment.nepi.wilab2.ilabt.iminds.be')
ec.set(node2, 'xmppServer', "xmpp.ilabt.iminds.be")
ec.set(node2, 'xmppUser', "nepi")
ec.set(node2, 'xmppPort', "5222")
ec.set(node2, 'xmppPassword', "1234")
-iface2 = ec.register_resource("OMFWifiInterface")
+iface2 = ec.register_resource("omf::WifiInterface")
ec.set(iface2, 'name', 'wlan0')
ec.set(iface2, 'mode', "adhoc")
ec.set(iface2, 'hw_mode', "g")
ec.set(iface2, 'essid', "vlc")
ec.set(iface2, 'ip', "192.168.0.2/24")
-node3 = ec.register_resource("OMFNode")
+node3 = ec.register_resource("omf::Node")
ec.set(node3, 'hostname', 'client2node.nepivlcexperiment.nepi.wilab2.ilabt.iminds.be')
ec.set(node3, 'xmppServer', "xmpp.ilabt.iminds.be")
ec.set(node3, 'xmppUser', "nepi")
ec.set(node3, 'xmppPort', "5222")
ec.set(node3, 'xmppPassword', "1234")
-iface3 = ec.register_resource("OMFWifiInterface")
+iface3 = ec.register_resource("omf::WifiInterface")
ec.set(iface3, 'name', 'wlan0')
ec.set(iface3, 'mode', "adhoc")
ec.set(iface3, 'hw_mode', "g")
ec.set(iface3, 'essid', "vlc")
ec.set(iface3, 'ip', "192.168.0.3/24")
-chan = ec.register_resource("OMFChannel")
+chan = ec.register_resource("omf::Channel")
ec.set(chan, 'channel', "6")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'command', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc /root/10-by-p0d.avi --sout '#duplicate{dst=rtp{dst=192.168.0.2,port=1234,mux=ts},dst=rtp{dst=192.168.0.3,port=1234,mux=ts}}'")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'command', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc rtp://192.168.0.2:1234")
-app3 = ec.register_resource("OMFApplication")
+app3 = ec.register_resource("omf::Application")
ec.set(app3, 'command', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc rtp://192.168.0.3:1234")
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'node025')
ec.set(node1, 'xmppServer', "nitlab.inf.uth.gr")
ec.set(node1, 'xmppUser', "nepi")
ec.set(node1, 'xmppPort', "5222")
ec.set(node1, 'xmppPassword', "1234")
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', 'wlan0')
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'essid', "vlc")
ec.set(iface1, 'ip', "192.168.0.25/24")
-node2 = ec.register_resource("OMFNode")
+node2 = ec.register_resource("omf::Node")
ec.set(node2, 'hostname', 'node027')
ec.set(node2, 'xmppServer', "nitlab.inf.uth.gr")
ec.set(node2, 'xmppUser', "nepi")
ec.set(node2, 'xmppPort', "5222")
ec.set(node2, 'xmppPassword', "1234")
-iface2 = ec.register_resource("OMFWifiInterface")
+iface2 = ec.register_resource("omf::WifiInterface")
ec.set(iface2, 'name', 'wlan0')
ec.set(iface2, 'mode', "adhoc")
ec.set(iface2, 'hw_mode', "g")
ec.set(iface2, 'essid', "vlc")
ec.set(iface2, 'ip', "192.168.0.27/24")
-chan = ec.register_resource("OMFChannel")
+chan = ec.register_resource("omf::Channel")
ec.set(chan, 'channel', "6")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'command', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc /root/10-by-p0d.avi --sout '#rtp{dst=192.168.0.27,port=1234,mux=ts}'")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'command', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority /root/vlc/vlc-1.1.13/cvlc rtp://192.168.0.27:1234")
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'wlab12')
ec.set(node1, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(node1, 'xmppUser', "nepi")
ec.set(node1, 'xmppPassword', "1234")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
+app1 = ec.register_resource("omf::Application")
ec.set(app1, 'command', '/bin/hostname -f')
ec.set(app1, 'env', "")
-app2 = ec.register_resource("OMFApplication")
+app2 = ec.register_resource("omf::Application")
ec.set(app2, 'command', '/bin/date')
ec.set(app2, 'env', "")
-app3 = ec.register_resource("OMFApplication")
+app3 = ec.register_resource("omf::Application")
ec.set(app3, 'command', '/bin/hostname -f')
ec.set(app3, 'env', "")
# Create and Configure the Nodes
-node1 = ec.register_resource("OMFNode")
+node1 = ec.register_resource("omf::Node")
ec.set(node1, 'hostname', 'wlab12')
ec.set(node1, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(node1, 'xmppUser', "nepi")
ec.set(node1, 'xmppPort', "5222")
ec.set(node1, 'xmppPassword', "1234")
-iface1 = ec.register_resource("OMFWifiInterface")
+iface1 = ec.register_resource("omf::WifiInterface")
ec.set(iface1, 'name', 'wlan0')
ec.set(iface1, 'mode', "adhoc")
ec.set(iface1, 'hw_mode', "g")
ec.set(iface1, 'essid', "ping")
ec.set(iface1, 'ip', "192.168.0.12/24")
-node2 = ec.register_resource("OMFNode")
+node2 = ec.register_resource("omf::Node")
ec.set(node2, 'hostname', 'wlab49')
ec.set(node2, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(node2, 'xmppUser', "nepi")
ec.set(node2, 'xmppPort', "5222")
ec.set(node2, 'xmppPassword', "1234")
-iface2 = ec.register_resource("OMFWifiInterface")
+iface2 = ec.register_resource("omf::WifiInterface")
ec.set(iface2, 'name', 'wlan0')
ec.set(iface2, 'mode', "adhoc")
ec.set(iface2, 'hw_mode', "g")
ec.set(iface2, 'essid', "ping")
ec.set(iface2, 'ip', "192.168.0.49/24")
-chan = ec.register_resource("OMFChannel")
+chan = ec.register_resource("omf::Channel")
ec.set(chan, 'channel', "6")
# Create and Configure the Application
-app1 = ec.register_resource("OMFApplication")
-ec.set(app1, 'command', '/bin/ping 192.168.0.49')
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, 'command', '/bin/ping -c5 192.168.0.49')
ec.set(app1, 'env', "")
-app2 = ec.register_resource("OMFApplication")
-ec.set(app2, 'command', '/bin/ping 192.168.0.12')
+app2 = ec.register_resource("omf::Application")
+ec.set(app2, 'command', '/bin/ping -c5 192.168.0.12')
ec.set(app2, 'env', "")
--- /dev/null
+"""
+ NEPI, a framework to manage network experiments
+ Copyright (C) 2013 INRIA
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+ Julien Tribino <julien.tribino@inria.fr>
+
+
+"""
+
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceFactory, ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+# Create the EC
+ec = ExperimentController()
+
+# Create and Configure the Nodes
+
+node1 = ec.register_resource("omf::Node")
+ec.set(node1, 'hostname', 'wlab12')
+ec.set(node1, 'xmppServer', "xmpp-plexus.onelab.eu")
+ec.set(node1, 'xmppUser', "nepi")
+ec.set(node1, 'xmppPort', "5222")
+ec.set(node1, 'xmppPassword', "1234")
+
+iface1 = ec.register_resource("omf::WifiInterface")
+ec.set(iface1, 'name', 'wlan0')
+ec.set(iface1, 'mode', "adhoc")
+ec.set(iface1, 'hw_mode', "g")
+ec.set(iface1, 'essid', "ping")
+ec.set(iface1, 'ip', "192.168.0.12/24")
+
+node2 = ec.register_resource("omf::Node")
+ec.set(node2, 'hostname', 'wlab49')
+ec.set(node2, 'xmppServer', "xmpp-plexus.onelab.eu")
+ec.set(node2, 'xmppUser', "nepi")
+ec.set(node2, 'xmppPort', "5222")
+ec.set(node2, 'xmppPassword', "1234")
+
+iface2 = ec.register_resource("omf::WifiInterface")
+ec.set(iface2, 'name', 'wlan0')
+ec.set(iface2, 'mode', "adhoc")
+ec.set(iface2, 'hw_mode', "g")
+ec.set(iface2, 'essid', "ping")
+ec.set(iface2, 'ip', "192.168.0.49/24")
+
+chan = ec.register_resource("omf::Channel")
+ec.set(chan, 'channel', "6")
+
+# Create and Configure the Application
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, 'command', '/bin/ping -c5 192.168.0.49')
+ec.set(app1, 'env', "")
+
+app2 = ec.register_resource("omf::Application")
+ec.set(app2, 'command', '/bin/ping -c5 192.168.0.12')
+ec.set(app2, 'env', "")
+
+
+# Connection
+ec.register_connection(iface1, node1)
+ec.register_connection(iface2, node2)
+ec.register_connection(iface1, chan)
+ec.register_connection(iface2, chan)
+ec.register_connection(app1, node1)
+ec.register_connection(app2, node2)
+
+ec.register_condition([app2], ResourceAction.START, app1, ResourceState.STARTED , "2s")
+ec.register_condition([app1,app2], ResourceAction.STOP, app2, ResourceState.STARTED , "10s")
+
+
+# Deploy
+ec.deploy()
+
+ec.wait_finished([app1,app2])
+
+stdout_1 = ec.trace(app1, "stdout")
+stdout_2 = ec.trace(app2, "stdout")
+
+# Choose a directory to store the traces, by default
+# It it the folder ehere you run Nepi.
+
+f = open("app1.txt", "w")
+f.write(stdout_1)
+f.close()
+
+g = open("app2.txt", "w")
+g.write(stdout_2)
+g.close()
+
+# Stop Experiment
+ec.shutdown()
+
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+# Julien Tribino <julien.tribino@inria.fr>
+
+# Topology
+#
+#
+# Testbed : Nitos
+#
+# Node
+# omf.nitos.node0ZZ
+# 0
+# |
+# |
+# 0
+# Node
+# omf.nitos.node0ZZ
+# PING
+#
+#
+# - Experiment:
+# - t0 : Deployment
+# - t1 : Ping Start
+# - t2 (t1 + 10s) : Ping stop
+# - t3 (t2 + 2s) : Kill the application
+#
+#
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -x <nodex> -z <nodez> -s <slice-name> -c <channel>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-x", "--nodex", dest="nodex",
+ help="Nitos first reserved node "
+ "(e.g. hostname must be of form: omf.nitos.node0XX)",
+ type="str")
+parser.add_option("-z", "--nodez", dest="nodez",
+ help="Nitos second reserved node "
+ "(e.g. hostname must be of form: omf.nitos.node0ZZ)",
+ type="str")
+parser.add_option("-c", "--channel", dest="channel",
+ help="Nitos reserved channel",
+ type="str")
+parser.add_option("-s", "--slice-name", dest="slicename",
+ help="Nitos slice name", type="str")
+(options, args) = parser.parse_args()
+
+nodex = options.nodex
+nodez = options.nodez
+slicename = options.slicename
+chan = options.channel
+
+# Create the EC
+ec = ExperimentController(exp_id="nitos_omf5_ping")
+
+# Create and Configure the Nodes
+node1 = ec.register_resource("omf::Node")
+ec.set(node1, "hostname", nodex)
+ec.set(node1, "xmppUser", slicename)
+ec.set(node1, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(node1, "xmppPort", "5222")
+ec.set(node1, "xmppPassword", "1234")
+ec.set(node1, "version", "5")
+
+# Create and Configure the Interfaces
+iface1 = ec.register_resource("omf::WifiInterface")
+ec.set(iface1, "name", "wlan0")
+ec.set(iface1, "mode", "adhoc")
+ec.set(iface1, "hw_mode", "g")
+ec.set(iface1, "essid", "ping")
+ec.set(iface1, "ip", "192.168.0.%s/24" % nodex[-2:])
+ec.set(iface1, "version", "5")
+ec.register_connection(node1, iface1)
+
+# Create and Configure the Channel
+channel = ec.register_resource("omf::Channel")
+ec.set(channel, "channel", chan)
+ec.set(channel, "xmppUser", slicename)
+ec.set(channel, "xmppServer", "nitlab.inf.uth.gr")
+ec.set(channel, "xmppPort", "5222")
+ec.set(channel, "xmppPassword", "1234")
+ec.set(channel, "version", "5")
+ec.register_connection(iface1, channel)
+
+# Create and Configure the PING Application
+app1 = ec.register_resource("omf::Application")
+ec.set(app1, "appid", "Ping#1")
+ec.set(app1, "command", "/bin/ping -c3 192.168.0.%s" % nodex[-2:])
+ec.set(app1, "version", "5")
+ec.register_connection(app1, node1)
+
+app2 = ec.register_resource("omf::Application")
+ec.set(app2, "appid", "Kill#1")
+ec.set(app2, "command", "/usr/bin/killall ping")
+ec.set(app2, "version", "5")
+ec.register_connection(app2, node1)
+
+# User Behaviour
+ec.register_condition(app1, ResourceAction.STOP, app1, ResourceState.STARTED , "10s")
+ec.register_condition(app2, ResourceAction.START, app1, ResourceState.STARTED , "12s")
+ec.register_condition(app2, ResourceAction.STOP, app2, ResourceState.STARTED , "1s")
+
+# Deploy
+ec.deploy()
+
+ec.wait_finished([app1, app2])
+
+print ec.trace(app1, "stdout")
+
+# Stop Experiment
+ec.shutdown()
+
--- /dev/null
+new BUNNY broadcast enabled loop
+setup BUNNY input /home/inria_lguevgeo/.nepi/nepi-usr/src/big_buck_bunny_240p_mpeg4_lq.ts
+setup BUNNY output #rtp{access=udp,mux=ts,sdp=rtsp://0.0.0.0:8554/BUNNY}
+new test_sched schedule enabled
+setup test_sched append control BUNNY play
--- /dev/null
+new BUNNY vod enabled
+setup BUNNY input /home/inria_lguevgeo/.nepi/nepi-usr/src/big_buck_bunny_240p_mpeg4_lq.ts
+
--- /dev/null
+import matplotlib
+matplotlib.use('GTK')
+import matplotlib.pyplot as plt
+import numpy as np
+import os
+import time
+
+import subprocess
+
+##### Parsing Argument to Plot #####
+from optparse import OptionParser
+
+usage = ("usage: %prog -p <type-of-plot> -d <type-of-packets> -f <folder-with-stats>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-p", "--plot", dest="plot",
+ help="Type of Plot : vod_broad_cli | vod_broad_wlan | vod_broad_eth | broad_all | vod_all", type="string")
+parser.add_option("-d", "--packet", dest="packet",
+ help="Packet to use for the plot : frames | bytes", type="string")
+parser.add_option("-f", "--folder", dest="folder",
+ help="Folder with the statistics ", type="string")
+
+(options, args) = parser.parse_args()
+plot = options.plot
+packet = options.packet
+folder = options.folder
+
+##### Initialize the data #####
+
+overall_stats_broad = {}
+overall_stats_vod = {}
+
+for i in [1, 3, 5]:
+ overall_stats_broad[i] = {}
+ overall_stats_broad[i]['eth'] = []
+ overall_stats_broad[i]['wlan'] = []
+ overall_stats_broad[i]['cli'] = []
+
+ overall_stats_vod[i] = {}
+ overall_stats_vod[i]['eth'] = []
+ overall_stats_vod[i]['wlan'] = []
+ overall_stats_vod[i]['cli'] = []
+
+all_broad_folders = os.listdir(folder + 'demo_openlab_traces/broadcast')
+all_vod_folders = os.listdir(folder + 'demo_openlab_traces/vod')
+
+data_broad_folders = list()
+data_vod_folders = list()
+
+# Loop to take only the wanted folder
+for f in all_broad_folders :
+ if f.startswith('s_'):
+ data_broad_folders.append(f)
+
+for f in all_vod_folders :
+ if f.startswith('s_'):
+ data_vod_folders.append(f)
+
+##### For Broadcast #####
+
+stats_broad_wlan = list()
+stats_broad_eth = list()
+stats_broad_cli = list()
+
+# Write the wanted statistics into a file
+for exp in data_broad_folders :
+ broad_file = os.listdir(folder + 'demo_openlab_traces/broadcast/'+exp)
+ for f in broad_file :
+ dest = folder + "demo_openlab_traces/broadcast/" + exp + "/stats_" + f + ".txt"
+ command = "tshark -r " + folder + "demo_openlab_traces/broadcast/" + exp + "/" + f + " -z io,phs > " + dest
+ if f.startswith('capwificen_wlan'):
+ p = subprocess.Popen(command , shell=True)
+ p.wait()
+ stats_broad_wlan.append(dest)
+ if f.startswith('capwificen_eth'):
+ p = subprocess.Popen(command , shell=True)
+ p.wait()
+ stats_broad_eth.append(dest)
+ if f.startswith('capcli'):
+ p = subprocess.Popen(command , shell=True)
+ p.wait()
+ stats_broad_cli.append(dest)
+
+# Numer of client that was used
+def nb_client(s):
+ elt = s.split('_')
+ if elt[-2] == '1':
+ return 1
+ if elt[-2] == '3':
+ return 3
+ if elt[-2] == '5':
+ return 5
+
+# Return the value wanted
+def get_broad_values(list_files, type_file):
+ for s in list_files:
+ nb = nb_client(s)
+ o = open(s, 'r')
+ for l in o:
+ if 'udp' in l:
+ row = l.split(':')
+ f = row[1].split(' ')
+ frame = int(f[0])
+ byte = int(row[2])
+
+ res = {}
+ res['frames'] = frame
+ res['bytes'] = byte
+ if frame < 20 :
+ continue
+ overall_stats_broad[nb][type_file].append(res)
+ o.close()
+
+get_broad_values(stats_broad_wlan, 'wlan')
+get_broad_values(stats_broad_eth, 'eth')
+get_broad_values(stats_broad_cli, 'cli')
+
+#print overall_stats_broad
+
+##### For VOD #####
+
+stats_vod_wlan = list()
+stats_vod_eth = list()
+stats_vod_cli = list()
+
+# Write the wanted statistics into a file
+for exp in data_vod_folders :
+ vod_file = os.listdir(folder + 'demo_openlab_traces/vod/'+exp)
+ for f in vod_file :
+ dest = folder + "/demo_openlab_traces/vod/" + exp + "/stats_" + f + ".txt"
+ command = "tshark -r " + folder + "demo_openlab_traces/vod/" + exp + "/" + f + " -z io,phs > " + dest
+ if f.startswith('capwificen_wlan'):
+ p = subprocess.Popen(command , shell=True)
+ p.wait()
+ stats_vod_wlan.append(dest)
+ if f.startswith('capwificen_eth'):
+ p = subprocess.Popen(command , shell=True)
+ p.wait()
+ stats_vod_eth.append(dest)
+ if f.startswith('capcli'):
+ p = subprocess.Popen(command , shell=True)
+ p.wait()
+ stats_vod_cli.append(dest)
+
+# Return the value wanted
+def get_vod_values(list_files, type_file):
+ for s in list_files:
+ nb = nb_client(s)
+ o = open(s, 'r')
+ for l in o:
+ if 'udp' in l:
+ row = l.split(':')
+ f = row[1].split(' ')
+ frame = int(f[0])
+ byte = int(row[2])
+
+ res = {}
+ res['frames'] = frame
+ res['bytes'] = byte
+ if frame < 100 :
+ continue
+ overall_stats_vod[nb][type_file].append(res)
+ o.close()
+
+get_vod_values(stats_vod_wlan, 'wlan')
+get_vod_values(stats_vod_eth, 'eth')
+get_vod_values(stats_vod_cli, 'cli')
+
+#print overall_stats_vod
+
+##### For Plotting #####
+
+if plot != "vod_all":
+ means_broad_cli = list()
+ std_broad_cli = list()
+
+ means_broad_wlan = list()
+ std_broad_wlan = list()
+
+ means_broad_eth = list()
+ std_broad_eth = list()
+
+ for i in [1, 3, 5]:
+ data_cli = list()
+ for elt in overall_stats_broad[i]['cli']:
+ data_cli.append(elt['frames'])
+ samples = np.array(data_cli)
+
+ m = samples.mean()
+ std = np.std(data_cli)
+ means_broad_cli.append(m)
+ std_broad_cli.append(std)
+
+ data_wlan = list()
+ for elt in overall_stats_broad[i]['wlan']:
+ data_wlan.append(elt['frames'])
+ samples = np.array(data_wlan)
+
+ m = samples.mean()
+ std = np.std(data_wlan)
+ means_broad_wlan.append(m)
+ std_broad_wlan.append(std)
+
+ data_eth = list()
+ for elt in overall_stats_broad[i]['eth']:
+ data_eth.append(elt['frames'])
+ samples = np.array(data_eth)
+
+ m = samples.mean()
+ std = np.std(data_eth)
+ means_broad_eth.append(m)
+ std_broad_eth.append(std)
+
+if plot != "broad_all":
+ means_vod_cli = list()
+ std_vod_cli = list()
+
+ means_vod_wlan = list()
+ std_vod_wlan = list()
+
+ means_vod_eth = list()
+ std_vod_eth = list()
+
+ for i in [1, 3, 5]:
+ data_cli = list()
+ for elt in overall_stats_vod[i]['cli']:
+ data_cli.append(elt['frames'])
+ samples = np.array(data_cli)
+
+ m = samples.mean()
+ std = np.std(data_cli)
+ means_vod_cli.append(m)
+ std_vod_cli.append(std)
+
+ data_wlan = list()
+ for elt in overall_stats_vod[i]['wlan']:
+ data_wlan.append(elt['frames'])
+ samples = np.array(data_wlan)
+
+ m = samples.mean()
+ std = np.std(data_wlan)
+ means_vod_wlan.append(m)
+ std_vod_wlan.append(std)
+
+ data_eth = list()
+ for elt in overall_stats_vod[i]['eth']:
+ data_eth.append(elt['frames'])
+ samples = np.array(data_eth)
+
+ m = samples.mean()
+ std = np.std(data_eth)
+ means_vod_eth.append(m)
+ std_vod_eth.append(std)
+
+### To plot ###
+n_groups = 3
+
+#Put the right numbers
+if plot == "broad_all":
+ means_bars1 = tuple(means_broad_cli)
+ std_bars1 = tuple(std_broad_cli)
+
+ means_bars2 = tuple(means_broad_wlan)
+ std_bars2 = tuple(std_broad_wlan)
+
+ means_bars3 = tuple(means_broad_eth)
+ std_bars3 = tuple(std_broad_eth)
+
+if plot == "vod_all":
+ means_bars1 = tuple(means_vod_cli)
+ std_bars1 = tuple(std_vod_cli)
+
+ means_bars2 = tuple(means_vod_wlan)
+ std_bars2 = tuple(std_vod_wlan)
+
+ means_bars3 = tuple(means_vod_eth)
+ std_bars3 = tuple(std_vod_eth)
+
+if plot == "vod_broad_cli":
+ means_bars1 = tuple(means_broad_cli)
+ std_bars1 = tuple(std_broad_cli)
+
+ means_bars2 = tuple(means_vod_cli)
+ std_bars2 = tuple(std_vod_cli)
+
+if plot == "vod_broad_wlan":
+ means_bars1 = tuple(means_broad_wlan)
+ std_bars1 = tuple(std_broad_wlan)
+
+ means_bars2 = tuple(means_vod_wlan)
+ std_bars2 = tuple(std_vod_wlan)
+
+if plot == "vod_broad_eth":
+ means_bars1 = tuple(means_broad_eth)
+ std_bars1 = tuple(std_broad_eth)
+
+ means_bars2 = tuple(means_vod_eth)
+ std_bars2 = tuple(std_vod_eth)
+
+
+fig, ax = plt.subplots()
+
+index = np.arange(n_groups)
+bar_width = 0.3
+
+opacity = 0.4
+error_config = {'ecolor': '0.3'}
+
+if plot == "vod_all" or plot == "broad_all" :
+ rects1 = plt.bar(index, means_bars1, bar_width,
+ alpha=opacity,
+ color='y',
+ yerr=std_bars1,
+ error_kw=error_config,
+ label='Client')
+
+ rects2 = plt.bar(index + bar_width, means_bars2, bar_width,
+ alpha=opacity,
+ color='g',
+ yerr=std_bars2,
+ error_kw=error_config,
+ label='Wlan')
+
+ rects3 = plt.bar(index + 2*bar_width, means_bars3, bar_width,
+ alpha=opacity,
+ color='r',
+ yerr=std_bars3,
+ error_kw=error_config,
+ label='Eth')
+
+else :
+ rects1 = plt.bar(index, means_bars1, bar_width,
+ alpha=opacity,
+ color='y',
+ yerr=std_bars1,
+ error_kw=error_config,
+ label='Broadcast')
+
+ rects2 = plt.bar(index + bar_width, means_bars2, bar_width,
+ alpha=opacity,
+ color='g',
+ yerr=std_bars2,
+ error_kw=error_config,
+ label='VOD')
+
+plt.xlabel('Number of Client')
+
+if packet == "frames" :
+ plt.ylabel('Frames sent over UDP')
+if packet == "bytes" :
+ plt.ylabel('Bytes sent over UDP')
+
+if plot == "broad_all":
+ plt.title('Packet sent by number of client in broadcast mode')
+if plot == "vod_all":
+ plt.title('Packet sent by number of client in VOD mode')
+if plot == "vod_broad_cli":
+ plt.title('Packet received in average by client in broadcast and vod mode')
+if plot == "vod_broad_wlan":
+ plt.title('Packet sent in average to the clients in broadcast and vod mode')
+if plot == "vod_broad_eth":
+ plt.title('Packet received in average by the wifi center in broadcast and vod mode')
+
+plt.xticks(index + bar_width, ('1', '3', '5'))
+plt.legend()
+
+#plt.tight_layout()
+plt.show()
+
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+import os
+import time
+import argparse
+
+# Set experiment for broadcast or vod mode
+
+parser = argparse.ArgumentParser(description='NEPI VoD/Broadcast experiment')
+parser.add_argument('-m', '--mode', help='Set vlc mode, possible values <vod> or <broadcast>', required=True)
+args = parser.parse_args()
+
+mode = args.mode
+
+# Create the entity Experiment Controller
+
+exp_id = "vod_exp"
+ec = ExperimentController(exp_id)
+
+# Define SFA credentials
+
+slicename = 'ple.inria.nepi'
+sfauser = 'ple.inria.aquereilhac'
+sfaPrivateKey = '/home/alina/.sfi/aquereilhac.pkey'
+
+# Functions for nodes and ifaces registration
+
+def create_planetlab_node(ec, host):
+ node = ec.register_resource("planetlab::sfa::Node")
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", "inria_nepi")
+ ec.set(node, "sfauser", sfauser)
+ ec.set(node, "sfaPrivateKey", sfaPrivateKey)
+ ec.set(node, 'cleanExperiment', True)
+ return node
+
+def create_omf_node(ec, host):
+ node = ec.register_resource("wilabt::sfa::Node")
+ ec.set(node, "host", host)
+ ec.set(node, "slicename", slicename)
+ ec.set(node, "sfauser", sfauser)
+ ec.set(node, "sfaPrivateKey", sfaPrivateKey)
+ ec.set(node, "gatewayUser", "nepi")
+ ec.set(node, "gateway", "bastion.test.iminds.be")
+ ec.set(node, "disk_image", 'NepiVlcOMF6Baseline')
+ ec.set(node, 'xmppServer', "xmpp.ilabt.iminds.be")
+ ec.set(node, 'xmppUser', "nepi")
+ ec.set(node, 'xmppPort', "5222")
+ ec.set(node, 'xmppPassword', "1234")
+ return node
+
+def create_omf_iface(ec, ip, node):
+ iface = ec.register_resource("omf::WifiInterface")
+ ec.set(iface, 'name', 'wlan0')
+ ec.set(iface, 'mode', "adhoc")
+ ec.set(iface, 'hw_mode', "g")
+ ec.set(iface, 'essid', "vlc")
+ ec.set(iface, 'ip', ip)
+ ec.register_connection(iface, node)
+ return iface
+
+# Register Internet VLC server
+
+video_server = create_planetlab_node(ec, 'planetlab3.xeno.cl.cam.ac.uk')
+
+# Register wifi media center and client nodes
+
+wifi_center = create_omf_node(ec, 'zotacB1')
+client1 = create_omf_node(ec, 'zotacB3')
+client2 = create_omf_node(ec, 'zotacB5')
+client3 = create_omf_node(ec, 'zotacC1')
+client4 = create_omf_node(ec, 'zotacC3')
+client5 = create_omf_node(ec, 'zotacB2')
+
+omf_nodes = [wifi_center, client1, client2, client3, client4, client5]
+
+# Register ifaces in wireless nodes
+
+iface_center = create_omf_iface(ec, "192.168.0.1/24", wifi_center)
+iface_client1 = create_omf_iface(ec, "192.168.0.2/24", client1)
+iface_client2 = create_omf_iface(ec, "192.168.0.3/24", client2)
+iface_client3 = create_omf_iface(ec, "192.168.0.4/24", client3)
+iface_client4 = create_omf_iface(ec, "192.168.0.5/24", client4)
+iface_client5 = create_omf_iface(ec, "192.168.0.6/24", client5)
+
+omf_ifaces = [iface_center, iface_client1, iface_client2, iface_client3, iface_client4, iface_client5]
+
+# Register channel
+
+chan = ec.register_resource("omf::Channel")
+ec.set(chan, 'channel', "6")
+
+# Register connection ifaces - channel
+
+ec.register_connection(iface_center, chan)
+ec.register_connection(iface_client1, chan)
+ec.register_connection(iface_client2, chan)
+ec.register_connection(iface_client3, chan)
+ec.register_connection(iface_client4, chan)
+ec.register_connection(iface_client5, chan)
+
+resources = [video_server] + omf_nodes + omf_ifaces + [chan]
+
+# Deploy physical resources and wait until they become provisioned
+
+ec.deploy(resources)
+
+ec.wait_deployed(resources)
+
+time.sleep(3)
+
+# Functions for applications registration in the nodes
+
+def create_vlc_server(ec, video_server, mode):
+ vlc_server = ec.register_resource("linux::Application")
+ ec.set(vlc_server, "depends", "vlc")
+ ec.set(vlc_server, "sources", "examples/omf/demo_openlab/big_buck_bunny_240p_mpeg4_lq.ts")
+ # Depending on the mode selected to run the experiment,
+ # different configuation files and command to run are
+ # uploaded to the server
+ if mode == 'vod':
+ ec.set(vlc_server, "files", "examples/omf/demo_openlab/conf_VoD.vlm")
+ ec.set(vlc_server, "command", "sudo -S dbus-uuidgen --ensure ; cvlc --vlm-conf ${SHARE}/conf_VoD.vlm --rtsp-host 128.232.103.203:5554 2>/tmp/logpl.txt")
+ elif mode == 'broadcast':
+ ec.set(vlc_server, "files", "examples/omf/demo_openlab/conf_Broadcast.vlm")
+ ec.set(vlc_server, "command", "sudo -S dbus-uuidgen --ensure ; cvlc --vlm-conf ${SHARE}/conf_Broadcast.vlm --rtsp-host 128.232.103.203:5554 2>/tmp/logpl.txt")
+ ec.register_connection(video_server, vlc_server)
+ return vlc_server
+
+def create_omf_app(ec, command, node):
+ app = ec.register_resource("omf::Application")
+ ec.set(app, 'command', command)
+ ec.register_connection(app, node)
+ return app
+
+
+# Run the VLC server in the Planetlab node
+
+vlc_server = create_vlc_server(ec, video_server, mode)
+
+# Upload configuration to the wifi media center and run VLC
+
+if mode == 'vod':
+ update_file_wificenter = "echo -e 'new BUNNY vod enabled\\n"\
+ "setup BUNNY input rtsp://128.232.103.203:5554/BUNNY' > /root/wificenter.vlm"
+ command_wificenter = "/root/vlc/vlc-1.1.13/cvlc --vlm-conf /root/wificenter.vlm --rtsp-host 192.168.0.1:5554"
+elif mode == 'broadcast':
+ update_file_wificenter = "echo -e 'new BUNNY broadcast enabled loop\\n"\
+ "setup BUNNY input rtsp://128.232.103.203:8554/BUNNY\\n"\
+ "setup BUNNY output #rtp{access=udp,mux=ts,sdp=rtsp://0.0.0.0:8554/BUNNY}\\n\\n"\
+ "new test_sched schedule enabled\\n"\
+ "setup test_sched append control BUNNY play' > /root/wificenter.vlm"
+ command_wificenter = "/root/vlc/vlc-1.1.13/cvlc --vlm-conf /root/wificenter.vlm --rtsp-host 192.168.0.1:8554"
+
+upload_conf = create_omf_app(ec, update_file_wificenter , wifi_center)
+vlc_wificenter = create_omf_app(ec, command_wificenter , wifi_center)
+
+ec.register_condition(upload_conf, ResourceAction.START, vlc_server, ResourceState.STARTED , "2s")
+ec.register_condition(vlc_wificenter, ResourceAction.START, upload_conf, ResourceState.STARTED , "2s")
+
+# measurements in video server (PL node)
+measure_videoserver = ec.register_resource("linux::Application")
+ec.set(measure_videoserver, "depends", "tcpdump")
+ec.set(measure_videoserver, "sudo", True)
+command = "tcpdump -i eth0 not arp -n -w /tmp/capplserver_%s.pcap" % ("$(date +'%Y%m%d%H%M%S')")
+ec.set(measure_videoserver, "command", command)
+ec.register_connection(measure_videoserver, video_server)
+
+# Deploy servers
+ec.deploy([vlc_server, upload_conf, vlc_wificenter, measure_videoserver])
+
+ec.wait_started([vlc_server, upload_conf, vlc_wificenter, measure_videoserver])
+
+time.sleep(3)
+
+def deploy_experiment(ec, clients, wifi_center):
+
+ # measurements in transmitter eth0
+ command_measure_wificentereth0 = "/usr/sbin/tcpdump -i eth0 not arp -n -w /tmp/capwificen_eth0_%s_%s.pcap" % (len(clients), "$(date +'%Y%m%d%H%M%S')")
+ measure_wificentereth0 = create_omf_app(ec, command_measure_wificentereth0, wifi_center)
+ ec.register_condition(measure_wificentereth0, ResourceAction.STOP, measure_wificentereth0, ResourceState.STARTED , "65s")
+
+ # measurements in transmitter wlan0
+ command_measure_wificenterwlan0 = "/usr/sbin/tcpdump -i wlan0 not arp -n -w /tmp/capwificen_wlan0_%s_%s.pcap" % (len(clients), "$(date +'%Y%m%d%H%M%S')")
+ measure_wificenterwlan0 = create_omf_app(ec, command_measure_wificenterwlan0, wifi_center)
+ ec.register_condition(measure_wificenterwlan0, ResourceAction.STOP, measure_wificenterwlan0, ResourceState.STARTED , "65s")
+
+ # kill tcpdumps in wificenter
+ command_kill_measure_wificentereth0 = "killall /usr/sbin/tcpdump"
+ kill_measure_wificentereth0 = create_omf_app(ec, command_kill_measure_wificentereth0, wifi_center)
+ ec.register_condition(kill_measure_wificentereth0, ResourceAction.START, measure_wificentereth0, ResourceState.STARTED , "65s")
+ ec.register_condition(kill_measure_wificentereth0, ResourceAction.STOP, kill_measure_wificentereth0, ResourceState.STARTED , "2s")
+
+
+ apps = [measure_wificentereth0, measure_wificenterwlan0, kill_measure_wificentereth0]
+ delay = '2s'
+ for client in clients:
+ client_host = ec.get(client, 'host').split('.')[0]
+ # measurements in clients
+ command_measure_client = "/usr/sbin/tcpdump -i wlan0 not arp -n -w /tmp/capcli_%s_%s_%s.pcap" % (client_host, len(clients), "$(date +'%Y%m%d%H%M%S')")
+ # run vlc client
+ if mode == 'broadcast':
+ command_client = "/root/vlc/vlc-1.1.13/cvlc rtsp://192.168.0.1:8554/BUNNY --sout=file/ts:%s_%s_%s.ts 2>/tmp/logcli.txt" % (client_host, len(clients), "$(date +'%Y%m%d%H%M%S')")
+ elif mode == 'vod':
+ command_client = "/root/vlc/vlc-1.1.13/cvlc rtsp://192.168.0.1:5554/BUNNY --sout=file/ts:%s_%s_%s.ts 2>/tmp/logcli.txt" % (client_host, len(clients), "$(date +'%Y%m%d%H%M%S')")
+
+ # kill vlc client and tcpdump
+ command_client_killvlc = "killall vlc vlc_app"
+ command_client_killtcp = "killall /usr/sbin/tcpdump"
+
+ run_client = create_omf_app(ec, command_client, client)
+ measure_client = create_omf_app(ec, command_measure_client, client)
+ kill_clientvlc = create_omf_app(ec, command_client_killvlc, client)
+ kill_clienttcp = create_omf_app(ec, command_client_killtcp, client)
+ ec.register_condition(run_client, ResourceAction.START, measure_client, ResourceState.STARTED , delay)
+ ec.register_condition([run_client, measure_client], ResourceAction.STOP, run_client, ResourceState.STARTED , "60s")
+ ec.register_condition(kill_clientvlc, ResourceAction.START, run_client, ResourceState.STARTED , "60s")
+ ec.register_condition(kill_clienttcp, ResourceAction.START, measure_client, ResourceState.STARTED , "60s")
+ ec.register_condition(kill_clientvlc, ResourceAction.STOP, kill_clientvlc, ResourceState.STARTED , "2s")
+ ec.register_condition(kill_clienttcp, ResourceAction.STOP, kill_clienttcp, ResourceState.STARTED , "2s")
+ apps.append(run_client)
+ apps.append(measure_client)
+ apps.append(kill_clientvlc)
+ apps.append(kill_clienttcp)
+
+ return apps
+
+#################
+## 1 client run #
+#################
+
+apps1 = deploy_experiment(ec, [client1], wifi_center)
+
+ec.deploy(apps1)
+ec.wait_finished(apps1)
+
+################
+# 3 client run #
+################
+
+#apps3 = deploy_experiment(ec, [client1, client2, client3], wifi_center)
+#
+#ec.deploy(apps3)
+#ec.wait_finished(apps3)
+
+################
+# 5 client run #
+################
+#
+#apps5 = deploy_experiment(ec, [client1, client2, client3, client4, client5], wifi_center)
+
+#ec.deploy(apps5)
+#ec.wait_finished(apps5)
+
+ec.shutdown()
+
+# End
import sys
def add_node(ec, host, user, pl_user, pl_password):
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
if pl_user:
ec.set(node, "pluser", pl_user)
if pl_password:
ec.set(node, "plpassword", pl_password)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, node):
- ovs = ec.register_resource("OVSSwitch")
+ ovs = ec.register_resource("planetlab::OVSSwitch")
ec.set(ovs, "bridge_name", bridge_name)
ec.set(ovs, "virtual_ip_pref", virtual_ip_pref)
ec.set(ovs, "controller_ip", controller_ip)
ec.register_connection(ovs, node)
return ovs
-def add_port(ec, port_name, ovs):
- port = ec.register_resource("OVSPort")
+def add_port(ec, port_name, network, ovs):
+ port = ec.register_resource("planetlab::OVSPort")
ec.set(port, "port_name", port_name)
+ ec.set(port, "network", network)
ec.register_connection(port, ovs)
return port
-def add_tap(ec, ip4, prefix4, pointopoint, node):
- tap = ec.register_resource("PlanetlabTap")
- ec.set(tap, "ip4", ip4)
- ec.set(tap, "prefix4", prefix4)
+def add_tap(ec, ip, prefix, pointopoint, node):
+ tap = ec.register_resource("planetlab::Tap")
+ ec.set(tap, "ip", ip)
+ ec.set(tap, "prefix", prefix)
ec.set(tap, "pointopoint", pointopoint)
ec.set(tap, "up", True)
ec.register_connection(tap, node)
return tap
-def add_tunnel(ec, network, port0, tap):
- tunnel = ec.register_resource("OVSTunnel")
- ec.set(tunnel, "network", network)
+def add_tunnel(ec, port0, tap):
+ tunnel = ec.register_resource("linux::UdpTunnel")
ec.register_connection(port0, tunnel)
ec.register_connection(tunnel, tap)
return tunnel
def add_app(ec, command, node):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", command)
ec.register_connection(app, node)
return app
host1 = "planetlab2.ionio.gr"
host2 = "iraplab2.iralab.uni-karlsruhe.de"
-ip_controller = "xxx.yyy.zzz.ttt"
+ip_controller = "1.1.1.1"
#XXX : Depends on the Vsys_tag of your slice
network = "192.168.3.0"
ovs2 = add_ovs(ec, "nepi_bridge_2", "192.168.3.2/24", ip_controller, "6633", s2_node)
# Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port3 = add_port(ec, "nepi_port3", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port4 = add_port(ec, "nepi_port4", ovs2)
+port1 = add_port(ec, "nepi_port1", network, ovs1)
+port3 = add_port(ec, "nepi_port3", network, ovs1)
+port2 = add_port(ec, "nepi_port2", network, ovs2)
+port4 = add_port(ec, "nepi_port4", network, ovs2)
h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
# Add tap devices
-tap1 = add_tap(ec, "192.168.3.3", 24, "192.168.3.1", h1_node)
-tap2 = add_tap(ec, "192.168.3.4", 24, "192.168.3.2", h2_node)
+tap1 = add_tap(ec, "192.168.3.3", "24", "192.168.3.1", h1_node)
+tap2 = add_tap(ec, "192.168.3.4", "24", "192.168.3.2", h2_node)
# Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, port4)
+tunnel1 = add_tunnel(ec, port1, tap1)
+tunnel2 = add_tunnel(ec, port2, tap2)
+tunnel3 = add_tunnel(ec, port3, port4)
# Add ping commands
app1 = add_app(ec, "ping -c5 192.168.3.2", s1_node)
ec.shutdown()
-
-
-
import os, time
def add_node(ec, host, user, pl_user, pl_password):
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
if pl_user:
ec.set(node, "pluser", pl_user)
if pl_password:
ec.set(node, "plpassword", pl_password)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, node):
- ovs = ec.register_resource("OVSSwitch")
+ ovs = ec.register_resource("planetlab::OVSSwitch")
ec.set(ovs, "bridge_name", bridge_name)
ec.set(ovs, "virtual_ip_pref", virtual_ip_pref)
ec.set(ovs, "controller_ip", controller_ip)
ec.register_connection(ovs, node)
return ovs
-def add_port(ec, port_name, ovs):
- port = ec.register_resource("OVSPort")
+def add_port(ec, port_name, network, ovs):
+ port = ec.register_resource("planetlab::OVSPort")
ec.set(port, "port_name", port_name)
+ ec.set(port, "network", network)
ec.register_connection(port, ovs)
return port
-def add_tap(ec, ip4, prefix4, pointopoint, node):
- tap = ec.register_resource("PlanetlabTap")
- ec.set(tap, "ip4", ip4)
- ec.set(tap, "prefix4", prefix4)
+def add_tap(ec, ip, prefix, pointopoint, node):
+ tap = ec.register_resource("planetlab::Tap")
+ ec.set(tap, "ip", ip)
+ ec.set(tap, "prefix", prefix)
ec.set(tap, "pointopoint", pointopoint)
ec.set(tap, "up", True)
ec.register_connection(tap, node)
return tap
-def add_tunnel(ec, network, port0, tap):
- tunnel = ec.register_resource("OVSTunnel")
- ec.set(tunnel, "network", network)
+def add_tunnel(ec, port0, tap):
+ tunnel = ec.register_resource("linux::UdpTunnel")
ec.register_connection(port0, tunnel)
ec.register_connection(tunnel, tap)
return tunnel
def add_app(ec, command, node):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", command)
ec.register_connection(app, node)
return app
#XXX : Need to put 6 working nodes or to let Nepi find for you
switch1 = "planetlab2.virtues.fi"
switch2 = "planetlab2.upc.es"
-switch3 = "planetlab2.cs.aueb.gr"
+switch3 = "planetlab1.informatik.uni-erlangen.de"
host1 = "planetlab2.ionio.gr"
host2 = "iraplab2.iralab.uni-karlsruhe.de"
host3 = "planetlab2.diku.dk"
ovs3 = add_ovs(ec, "nepi_bridge_3", "192.168.3.6/24", ip_controller, "6633", s3_node)
# Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port4 = add_port(ec, "nepi_port4", ovs1)
-port7 = add_port(ec, "nepi_port7", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port5 = add_port(ec, "nepi_port5", ovs2)
-port3 = add_port(ec, "nepi_port3", ovs3)
-port6 = add_port(ec, "nepi_port6", ovs3)
+port1 = add_port(ec, "nepi_port1", network, ovs1)
+port4 = add_port(ec, "nepi_port4", network, ovs1)
+port7 = add_port(ec, "nepi_port7", network, ovs1)
+port2 = add_port(ec, "nepi_port2", network, ovs2)
+port5 = add_port(ec, "nepi_port5", network, ovs2)
+port3 = add_port(ec, "nepi_port3", network, ovs3)
+port6 = add_port(ec, "nepi_port6", network, ovs3)
h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
h3_node = add_node(ec, host3, slicename, pl_user, pl_password)
# Add tap devices
-tap1 = add_tap(ec, "192.168.3.1", 24, "192.168.3.2", h1_node)
-tap2 = add_tap(ec, "192.168.3.3", 24, "192.168.3.4", h2_node)
-tap3 = add_tap(ec, "192.168.3.5", 24, "192.168.3.6", h3_node)
+tap1 = add_tap(ec, "192.168.3.1", "24", "192.168.3.2", h1_node)
+tap2 = add_tap(ec, "192.168.3.3", "24", "192.168.3.4", h2_node)
+tap3 = add_tap(ec, "192.168.3.5", "24", "192.168.3.6", h3_node)
# Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, tap3)
-tunnel4 = add_tunnel(ec, network, port4, port5)
-tunnel5 = add_tunnel(ec, network, port7, port6)
+tunnel1 = add_tunnel(ec, port1, tap1)
+tunnel2 = add_tunnel(ec, port2, tap2)
+tunnel3 = add_tunnel(ec, port3, tap3)
+tunnel4 = add_tunnel(ec, port4, port5)
+tunnel5 = add_tunnel(ec, port7, port6)
#tunnel6 = add_tunnel(ec, network, port8, port9)
# Add ping commands
ec.shutdown()
-
-
-
import os, time
def add_node(ec, host, user, pl_user, pl_password):
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
if pl_user:
ec.set(node, "pluser", pl_user)
if pl_password:
ec.set(node, "plpassword", pl_password)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, node):
- ovs = ec.register_resource("OVSSwitch")
+ ovs = ec.register_resource("planetlab::OVSSwitch")
ec.set(ovs, "bridge_name", bridge_name)
ec.set(ovs, "virtual_ip_pref", virtual_ip_pref)
ec.set(ovs, "controller_ip", controller_ip)
ec.register_connection(ovs, node)
return ovs
-def add_port(ec, port_name, ovs):
- port = ec.register_resource("OVSPort")
+def add_port(ec, port_name, network, ovs):
+ port = ec.register_resource("planetlab::OVSPort")
ec.set(port, "port_name", port_name)
+ ec.set(port, "network", network)
ec.register_connection(port, ovs)
return port
-def add_tap(ec, ip4, prefix4, pointopoint, node):
- tap = ec.register_resource("PlanetlabTap")
- ec.set(tap, "ip4", ip4)
- ec.set(tap, "prefix4", prefix4)
+def add_tap(ec, ip, prefix, pointopoint, node):
+ tap = ec.register_resource("planetlab::Tap")
+ ec.set(tap, "ip", ip)
+ ec.set(tap, "prefix", prefix)
ec.set(tap, "pointopoint", pointopoint)
ec.set(tap, "up", True)
ec.register_connection(tap, node)
return tap
-def add_tunnel(ec, network, port0, tap):
- tunnel = ec.register_resource("OVSTunnel")
- ec.set(tunnel, "network", network)
+def add_tunnel(ec, port0, tap):
+ tunnel = ec.register_resource("linux::UdpTunnel")
ec.register_connection(port0, tunnel)
ec.register_connection(tunnel, tap)
return tunnel
def add_app(ec, command, node):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", command)
ec.register_connection(app, node)
return app
#XXX : Need to put 6 working nodes or to let Nepi find for you
switch1 = "planetlab2.virtues.fi"
switch2 = "planetlab2.upc.es"
-switch3 = "planetlab2.cs.aueb.gr"
+switch3 = "planetlab1.informatik.uni-erlangen.de"
host1 = "planetlab2.ionio.gr"
host2 = "iraplab2.iralab.uni-karlsruhe.de"
host3 = "planetlab2.diku.dk"
ovs3 = add_ovs(ec, "nepi_bridge_3", "192.168.3.6/24", ip_controller, "6633", s3_node)
# Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port4 = add_port(ec, "nepi_port4", ovs1)
-port7 = add_port(ec, "nepi_port7", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port5 = add_port(ec, "nepi_port5", ovs2)
-port8 = add_port(ec, "nepi_port8", ovs2)
-port3 = add_port(ec, "nepi_port3", ovs3)
-port6 = add_port(ec, "nepi_port6", ovs3)
-port9 = add_port(ec, "nepi_port9", ovs3)
+port1 = add_port(ec, "nepi_port1", network, ovs1)
+port4 = add_port(ec, "nepi_port4", network, ovs1)
+port7 = add_port(ec, "nepi_port7", network, ovs1)
+port2 = add_port(ec, "nepi_port2", network, ovs2)
+port5 = add_port(ec, "nepi_port5", network, ovs2)
+port8 = add_port(ec, "nepi_port8", network, ovs2)
+port3 = add_port(ec, "nepi_port3", network, ovs3)
+port6 = add_port(ec, "nepi_port6", network, ovs3)
+port9 = add_port(ec, "nepi_port9", network, ovs3)
h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
h3_node = add_node(ec, host3, slicename, pl_user, pl_password)
# Add tap devices
-tap1 = add_tap(ec, "192.168.3.1", 24, "192.168.3.2", h1_node)
-tap2 = add_tap(ec, "192.168.3.3", 24, "192.168.3.4", h2_node)
-tap3 = add_tap(ec, "192.168.3.5", 24, "192.168.3.6", h3_node)
+tap1 = add_tap(ec, "192.168.3.1", "24", "192.168.3.2", h1_node)
+tap2 = add_tap(ec, "192.168.3.3", "24", "192.168.3.4", h2_node)
+tap3 = add_tap(ec, "192.168.3.5", "24", "192.168.3.6", h3_node)
# Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, tap3)
-tunnel4 = add_tunnel(ec, network, port4, port5)
-tunnel5 = add_tunnel(ec, network, port7, port6)
-tunnel6 = add_tunnel(ec, network, port8, port9)
+tunnel1 = add_tunnel(ec, port1, tap1)
+tunnel2 = add_tunnel(ec, port2, tap2)
+tunnel3 = add_tunnel(ec, port3, tap3)
+tunnel4 = add_tunnel(ec, port4, port5)
+tunnel5 = add_tunnel(ec, port7, port6)
+tunnel6 = add_tunnel(ec, port8, port9)
# Add ping commands
app1 = add_app(ec, "ping -c5 192.168.3.4", s1_node)
ec.shutdown()
-
-
-
### Useful Method to Create RM ##
def add_node(ec, host, user):
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
-def add_tap(ec, ip4, prefix4, pointopoint, node):
- tap = ec.register_resource("PlanetlabTap")
- ec.set(tap, "ip4", ip4)
- ec.set(tap, "prefix4", prefix4)
+def add_tap(ec, ip, prefix, pointopoint, node):
+ tap = ec.register_resource("planetlab::Tap")
+ ec.set(tap, "ip", ip)
+ ec.set(tap, "prefix", prefix)
ec.set(tap, "pointopoint", pointopoint)
ec.set(tap, "up", True)
ec.register_connection(tap, node)
return tap
def add_udptun(ec, tap1, tap2):
- udptun = ec.register_resource("UdpTunnel")
+ udptun = ec.register_resource("udp::Tunnel")
ec.register_connection(tap1, udptun)
ec.register_connection(tap2, udptun)
return udptun
def add_vroute(ec, network, tap):
- vroute = ec.register_resource("PlanetlabVroute")
+ vroute = ec.register_resource("planetlab::Vroute")
ec.set(vroute, "action", "add")
ec.set(vroute, "network", network)
ec.register_connection(vroute, tap)
return vroute
def add_app(ec, command, node):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", command)
ec.register_connection(app, node)
return app
## Create The topology ##
host1 = add_node(ec, hostname_host1, user)
-tap1 = add_tap(ec, "192.168.3.1", 24, "192.168.3.2", host1)
+tap1 = add_tap(ec, "192.168.3.1", "24", "192.168.3.2", host1)
switch1 = add_node(ec, hostname_switch1, user)
-tap2 = add_tap(ec, "192.168.3.2", 24, "192.168.3.1", switch1)
-tap102 = add_tap(ec, "192.168.3.102", 29, "192.168.3.104", switch1)
-tap152 = add_tap(ec, "192.168.3.152", 29, "192.168.3.156", switch1)
+tap2 = add_tap(ec, "192.168.3.2", "24", "192.168.3.1", switch1)
+tap102 = add_tap(ec, "192.168.3.102", "29", "192.168.3.104", switch1)
+tap152 = add_tap(ec, "192.168.3.152", "29", "192.168.3.156", switch1)
host2 = add_node(ec, hostname_host2, user)
-tap13 = add_tap(ec, "192.168.3.13", 24, "192.168.3.14", host2)
+tap13 = add_tap(ec, "192.168.3.13", "24", "192.168.3.14", host2)
switch2 = add_node(ec, hostname_switch2, user)
-tap14 = add_tap(ec, "192.168.3.14", 24, "192.168.3.13", switch2)
-tap104 = add_tap(ec, "192.168.3.104", 29, "192.168.3.102", switch2)
-tap204 = add_tap(ec, "192.168.3.204", 29, "192.168.3.206", switch2)
+tap14 = add_tap(ec, "192.168.3.14", "24", "192.168.3.13", switch2)
+tap104 = add_tap(ec, "192.168.3.104", "29", "192.168.3.102", switch2)
+tap204 = add_tap(ec, "192.168.3.204", "29", "192.168.3.206", switch2)
host3 = add_node(ec, hostname_host3, user)
-tap25 = add_tap(ec, "192.168.3.25", 24, "192.168.3.26", host3)
+tap25 = add_tap(ec, "192.168.3.25", "24", "192.168.3.26", host3)
switch3 = add_node(ec, hostname_switch3, user)
-tap26 = add_tap(ec, "192.168.3.26", 24, "192.168.3.25", switch3)
-tap156 = add_tap(ec, "192.168.3.156", 29, "192.168.3.152", switch3)
-tap206 = add_tap(ec, "192.168.3.206", 29, "192.168.3.204", switch3)
+tap26 = add_tap(ec, "192.168.3.26", "24", "192.168.3.25", switch3)
+tap156 = add_tap(ec, "192.168.3.156", "29", "192.168.3.152", switch3)
+tap206 = add_tap(ec, "192.168.3.206", "29", "192.168.3.204", switch3)
## Create the UDP Tunnel ##
udptun1 = add_udptun(ec, tap1, tap2)
ec.shutdown()
-
-
-
+++ /dev/null
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2014 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-#
-# Instructions to run this example:
-#
-# 1. First edit the script file where required (See ASSING messages)
-#
-# 2. Then, run the script:
-#
-# $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATHS:src python examples/linux/ccn/two_nodes_file_retrieval.py
-#
-
-from nepi.execution.ec import ExperimentController
-
-import os
-
-pl_user = ######### <<< ASSIGN the username used to login to the PlanetLab website >>>
-pl_pass = ######## <<< ASSIGN the password used to login to the PlanetLab website >>>
-pl_ssh_key = ####### <<< ASSING the absolute path to the private SSH key used for Planetlab >>>
-slicename = ####### <<< ASSING the PlanetLab slicename >>>
-
-## Create the experiment controller
-ec = ExperimentController(exp_id = "demo_CCN")
-
-## Register node 1
-node1 = ec.register_resource("PlanetlabNode")
-# Configure NEPI to automatically find and allocate a node in France
-# ec.set(node1, "country", "France")
-# Else, if you want a node in particular set the hostname
-ec.set(node1, "hostname", "peeramidion.irisa.fr")
-# PlanetLab (website) account username
-ec.set(node1, "pluser", pl_user)
-# PlanetLab (website) account password
-ec.set(node1, "plpassword", pl_pass)
-# username should be your PlanetLab slice name
-ec.set(node1, "username", slicename)
-# Absolute path to the SSH private key for PlanetLab
-ec.set(node1, "identity", pl_ssh_key)
-# Clean all files, results, etc, from previous experiments wit the same exp_id
-ec.set(node1, "cleanExperiment", True)
-# Kill all running processes in the PlanetLab node before running the experiment
-ec.set(node1, "cleanProcesses", True)
-
-## Register node 2
-node2 = ec.register_resource("PlanetlabNode")
-# Configure NEPI to automatically find and allocate a node in Spain
-#ec.set(node2, "country", "Spain")
-# Else, if you want a node in particular set the hostname
-ec.set(node2, "hostname", "planetlab2.upc.es")
-# PlanetLab (website) account username
-ec.set(node2, "pluser", pl_user)
-# PlanetLab (website) account password
-ec.set(node2, "plpassword", pl_pass)
-# username should be your PlanetLab slice name
-ec.set(node2, "username", slicename)
-# Absolute path to the SSH private key for PlanetLab
-ec.set(node2, "identity", pl_ssh_key)
-# Clean all files, results, etc, from previous experiments wit the same exp_id
-ec.set(node2, "cleanExperiment", True)
-# Kill all running processes in the PlanetLab node before running the experiment
-ec.set(node2, "cleanProcesses", True)
-
-## Register a CCN daemon in node 1
-ccnd1 = ec.register_resource("LinuxCCND")
-# Set ccnd log level to 7
-ec.set(ccnd1, "debug", 7)
-ec.register_connection(ccnd1, node1)
-
-## Register a CCN daemon in node 2
-ccnd2 = ec.register_resource("LinuxCCND")
-# Set ccnd log level to 7
-ec.set(ccnd2, "debug", 7)
-ec.register_connection(ccnd2, node2)
-
-## Register a repository in node 1
-ccnr1 = ec.register_resource("LinuxCCNR")
-ec.register_connection(ccnr1, ccnd1)
-
-## Push the file into the repository
-local_path_to_content = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..", "..",
- "big_buck_bunny_240p_mpeg4_lq.ts")
-
-co = ec.register_resource("LinuxCCNContent")
-ec.set(co, "contentName", "ccnx:/test/FILE1")
-# NEPI will upload the specified file to the remote node and write it
-# into the CCN repository
-ec.set(co, "content", local_path_to_content)
-ec.register_connection(co, ccnr1)
-
-## Deploy all resources
-ec.deploy()
-
-## Wait until node 1 and 2 are deployed, so we can retrieve the hostnames
-## of the nodes automatically allocated in planetlab
-ec.wait_deployed([node1, node2])
-
-## Get the hostnames of the two PlanetLab nodes
-hostname1 = ec.get(node1, "hostname")
-print "hostname 1: ", hostname1
-hostname2 = ec.get(node2, "hostname")
-print "hostname 2: ", hostname2
-
-# Register a FIB entry from node 1 to node 2
-entry1 = ec.register_resource("LinuxFIBEntry")
-ec.set(entry1, "host", hostname2)
-ec.register_connection(entry1, ccnd1)
-
-# Register a FIB entry from node 1 to node 2
-entry2 = ec.register_resource("LinuxFIBEntry")
-ec.set(entry2, "host", hostname1)
-ec.register_connection(entry2, ccnd2)
-
-## Retrieve the file stored in node 1 from node 2
-command = "ccncat ccnx:/test/FILE1"
-app = ec.register_resource("LinuxCCNApplication")
-ec.set(app, "command", command)
-ec.register_connection(app, ccnd2)
-
-# Register a collector to automatically collect the ccnd logs
-# to a local directory
-results_dir = "/tmp/demo_CCN_results"
-col1 = ec.register_resource("Collector")
-ec.set(col1, "traceName", "stderr")
-ec.set(col1, "storeDir", results_dir)
-ec.set(col1, "subDir", hostname1)
-ec.register_connection(col1, ccnd1)
-
-col2 = ec.register_resource("Collector")
-ec.set(col2, "traceName", "stderr")
-ec.set(col2, "storeDir", results_dir)
-ec.set(col2, "subDir", hostname2)
-ec.register_connection(col2, ccnd2)
-
-## Deploy the rest of the resources
-ec.deploy(guids=[entry1, entry2, app, col1, col2])
-
-# Wait until the ccncat is finished
-ec.wait_finished([app])
-
-## CCND logs will be collected to the results_dir upon shutdown.
-## We can aldo get the content of the logs now:
-#print "LOG2", ec.trace(ccnd1, "stderr")
-#print "LOG 1", ec.trace(ccnd2, "stderr")
-
-ec.shutdown()
-
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/ccn_simple_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+
+# CCN topology:
+#
+#
+#
+# content ccncat
+# Linux host Linux host
+# 0 ------- network -------- 1
+#
+
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser, SUPPRESS_HELP
+import os
+
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+ "-k <pl-ssh-key> -a <hostanme1> -b <hostname2> ")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+ help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+ help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+ help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+ help="Path to private SSH key associated with the PL account",
+ type="str")
+parser.add_option("-a", "--hostname1", dest="hostname1",
+ help="Remote host 1", type="str")
+parser.add_option("-b", "--hostname2", dest="hostname2",
+ help="Remote host 2", type="str")
+(options, args) = parser.parse_args()
+
+hostname1 = options.hostname1
+hostname2 = options.hostname2
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+
+## Create the experiment controller
+ec = ExperimentController(exp_id = "pl_ccn_simple_transfer")
+
+##### CONFIGURING NODE 1
+
+## Register node 1
+node1 = ec.register_resource("planetlab::Node")
+# Set the hostname of the first node to use for the experiment
+ec.set(node1, "hostname", hostname1)
+# username should be your SSH user
+ec.set(node1, "username", pl_slice)
+# Path to the SSH private key
+ec.set(node1, "identity", pl_ssh_key)
+# Planetlab web site user and password
+ec.set(node1, "pluser", pl_user)
+ec.set(node1, "plpassword", pl_password)
+# Clean all files, results, etc, from previous experiments wit the same exp_id
+ec.set(node1, "cleanExperiment", True)
+# Kill all running processes in the node before running the experiment
+ec.set(node1, "cleanProcesses", True)
+
+## Register a CCN daemon in node 1
+ccnd1 = ec.register_resource("linux::CCND")
+# Set ccnd log level to 7
+ec.set(ccnd1, "debug", 7)
+ec.register_connection(ccnd1, node1)
+
+## Register a repository in node 1
+ccnr1 = ec.register_resource("linux::CCNR")
+ec.register_connection(ccnr1, ccnd1)
+
+## Push the file into the repository
+local_path_to_content = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "..", "big_buck_bunny_240p_mpeg4_lq.ts")
+
+content_name = "ccnx:/test/FILE"
+
+# Add a content to the repository
+co = ec.register_resource("linux::CCNContent")
+ec.set(co, "contentName", content_name)
+# NEPI will upload the specified file to the remote node and write it
+# into the CCN repository
+ec.set(co, "content", local_path_to_content)
+ec.register_connection(co, ccnr1)
+
+##### CONFIGURING NODE 2
+
+## Register node 2
+node2 = ec.register_resource("planetlab::Node")
+# Set the hostname of the first node to use for the experiment
+ec.set(node2, "hostname", hostname2)
+# username should be your SSH user
+ec.set(node2, "username", pl_slice)
+# Path to the SSH private key
+ec.set(node2, "identity", pl_ssh_key)
+# Planetlab web site user and password
+ec.set(node2, "pluser", pl_user)
+ec.set(node2, "plpassword", pl_password)
+# Clean all files, results, etc, from previous experiments wit the same exp_id
+ec.set(node2, "cleanExperiment", True)
+# Kill all running processes in the node before running the experiment
+ec.set(node2, "cleanProcesses", True)
+
+## Register a CCN daemon in node 2
+ccnd2 = ec.register_resource("linux::CCND")
+# Set ccnd log level to 7
+ec.set(ccnd2, "debug", 7)
+ec.register_connection(ccnd2, node2)
+
+## Retrieve the file stored in node 1 from node 2
+ccncat = ec.register_resource("linux::CCNCat")
+ec.set(ccncat, "contentName", content_name)
+ec.register_connection(ccncat, ccnd2)
+
+##### INTERCONNECTING CCN NODES ...
+
+# Register a FIB entry from node 1 to node 2
+entry1 = ec.register_resource("linux::FIBEntry")
+ec.set(entry1, "host", hostname2)
+ec.register_connection(entry1, ccnd1)
+
+# Register a FIB entry from node 2 to node 1
+entry2 = ec.register_resource("linux::FIBEntry")
+ec.set(entry2, "host", hostname1)
+ec.register_connection(entry2, ccnd2)
+
+##### STARTING THE EXPERIMENT
+
+## Deploy all resources
+ec.deploy()
+
+# Wait until the ccncat is finished
+ec.wait_finished([ccncat])
+
+stdout = ec.trace(ccncat, "stdout")
+f = open("video.ts", "w")
+f.write(stdout)
+f.close()
+
+ec.shutdown()
+
+print "Transfered FILE stored localy at video.ts"
+
+++ /dev/null
-#!/usr/bin/env python
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-# Alexandros Kouvakas <alexandros.kouvakas@gmail.com>
-#
-# Switch1 ------- Switch2
-# / \
-# / \
-# / \
-# Host1 Host2
-
-
-
-from nepi.execution.ec import ExperimentController
-import os, time
-
-def add_node(ec, host, user, pl_user, pl_password):
- node = ec.register_resource("PlanetlabNode")
- ec.set(node, "hostname", host)
- ec.set(node, "username", user)
- if pl_user:
- ec.set(node, "pluser", pl_user)
- if pl_password:
- ec.set(node, "plpassword", pl_password)
- ec.set(node, "cleanHome", True)
- ec.set(node, "cleanProcesses", True)
-
- return node
-
-def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, node):
- ovs = ec.register_resource("OVSWitch")
- ec.set(ovs, "bridge_name", bridge_name)
- ec.set(ovs, "virtual_ip_pref", virtual_ip_pref)
- ec.set(ovs, "controller_ip", controller_ip)
- ec.set(ovs, "controller_port", controller_port)
- ec.register_connection(ovs, node)
- return ovs
-
-def add_port(ec, port_name, ovs):
- port = ec.register_resource("OVSPort")
- ec.set(port, "port_name", port_name)
- ec.register_connection(port, ovs)
- return port
-
-def add_tap(ec, ip4, prefix4, pointopoint, node):
- tap = ec.register_resource("PlanetlabTap")
- ec.set(tap, "ip4", ip4)
- ec.set(tap, "prefix4", prefix4)
- ec.set(tap, "pointopoint", pointopoint)
- ec.set(tap, "up", True)
- ec.register_connection(tap, node)
- return tap
-
-def add_tunnel(ec, network, port0, tap):
- tunnel = ec.register_resource("OVSTunnel")
- ec.set(tunnel, "network", network)
- ec.register_connection(port0, tunnel)
- ec.register_connection(tunnel, tap)
- return tunnel
-
-def add_app(ec, command, node):
- app = ec.register_resource("LinuxApplication")
- ec.set(app, "command", command)
- ec.register_connection(app, node)
- return app
-
-# Create the EC
-ec = ExperimentController(exp_id = "test")
-
-switch1 = "planetlab2.virtues.fi"
-switch2 = "planetlab2.upc.es"
-host1 = "planetlab2.ionio.gr"
-host2 = "iraplab2.iralab.uni-karlsruhe.de"
-
-network = "192.168.3.0"
-
-slicename = "inria_nepi"
-
-pl_user = os.environ.get("PL_USER")
-pl_password = os.environ.get("PL_PASS")
-
-s1_node = add_node(ec, switch1, slicename, pl_user, pl_password)
-s2_node = add_node(ec, switch2, slicename, pl_user, pl_password)
-
-# Add switches
-ovs1 = add_ovs(ec, "nepi_bridge", "192.168.3.1/24", "85.23.168.77", "6633", s1_node)
-ovs2 = add_ovs(ec, "nepi_bridge", "192.168.3.2/24", "85.23.168.77", "6633", s2_node)
-
-# Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port3 = add_port(ec, "nepi_port3", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port4 = add_port(ec, "nepi_port4", ovs2)
-
-h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
-h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
-
-# Add tap devices
-tap1 = add_tap(ec, "192.168.3.3", 24, "192.168.3.1", h1_node)
-tap2 = add_tap(ec, "192.168.3.4", 24, "192.168.3.2", h2_node)
-
-# Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, port4)
-
-# Add ping commands
-app1 = add_app(ec, "ping -c5 192.168.3.2", s1_node)
-app2 = add_app(ec, "ping -c5 192.168.3.3", s1_node)
-app3 = add_app(ec, "ping -c5 192.168.3.4", s1_node)
-app4 = add_app(ec, "ping -c5 192.168.3.1", s2_node)
-app5 = add_app(ec, "ping -c5 192.168.3.3", s2_node)
-app6 = add_app(ec, "ping -c5 192.168.3.4", s2_node)
-app7 = add_app(ec, "ping -c5 192.168.3.1", h1_node)
-app8 = add_app(ec, "ping -c5 192.168.3.2", h1_node)
-app9 = add_app(ec, "ping -c5 192.168.3.4", h1_node)
-app10 = add_app(ec, "ping -c5 192.168.3.1", h2_node)
-app11 = add_app(ec, "ping -c5 192.168.3.2", h2_node)
-app12 = add_app(ec, "ping -c5 192.168.3.3", h2_node)
-
-ec.deploy()
-
-ec.wait_finished([app1, app2, app3, app4, app5, app6, app7, app8, app9, app10, app11, app12])
-
-# Retreive ping results and save
-# them in a file
-ping1 = ec.trace(app1, 'stdout')
-ping2 = ec.trace(app2, 'stdout')
-ping3 = ec.trace(app3, 'stdout')
-ping4 = ec.trace(app4, 'stdout')
-ping5 = ec.trace(app5, 'stdout')
-ping6 = ec.trace(app6, 'stdout')
-ping7 = ec.trace(app7, 'stdout')
-ping8 = ec.trace(app8, 'stdout')
-ping9 = ec.trace(app9, 'stdout')
-ping10 = ec.trace(app10, 'stdout')
-ping11 = ec.trace(app11, 'stdout')
-ping12 = ec.trace(app12, 'stdout')
-
-
-f = open("examples/openvswitch/ping_res.txt", 'w')
-
-if not ping12:
- ec.shutdown()
-
-f.write(ping1)
-f.write(ping2)
-f.write(ping3)
-f.write(ping4)
-f.write(ping5)
-f.write(ping6)
-f.write(ping7)
-f.write(ping8)
-f.write(ping9)
-f.write(ping10)
-f.write(ping11)
-f.write(ping12)
-f.close()
-
-# Delete the overlay network
-ec.shutdown()
-
-
-
-
-
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
+# Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/ping.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>
+
+
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+ "-k <pl-ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+ help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+ help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+ help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+ help="Path to private SSH key associated with the PL account",
+ type="str")
+
+(options, args) = parser.parse_args()
+
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+
+## Create the experiment controller
+ec = ExperimentController(exp_id = "pl_ping")
+
+# Register a Planetlab Node with no restrictions, it can be any node
+node = ec.register_resource("planetlab::Node")
+
+# The username in this case is the slice name, the one to use for login in
+# via ssh into PlanetLab nodes. Replace with your own slice name.
+ec.set(node, "username", pl_slice)
+ec.set(node, "identity", pl_ssh_key)
+
+# The pluser and plpassword are the ones used to login in the PlanetLab web
+# site. Replace with your own user and password account information.
+ec.set(node, "pluser", pl_user)
+ec.set(node, "plpassword", pl_password)
+
+# Remove previous results
+ec.set(node, "cleanExperiment", True)
+ec.set(node, "cleanProcesses", True)
+
+# Define a ping application
+app = ec.register_resource("linux::Application")
+ec.set(app, "command", "ping -c3 nepi.inria.fr")
+
+# Connect the application to the node
+ec.register_connection(node, app)
+
+# Deploy the experiment:
+ec.deploy()
+
+# Wait until the application is finish to retrive the trace:
+ec.wait_finished(app)
+
+trace = ec.trace(app, "stdout")
+
+print "PING outout ", trace
+
+# Do the experiment controller shutdown
+ec.shutdown()
+
+# END
+++ /dev/null
-#!/usr/bin/env python
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
-
-from nepi.execution.ec import ExperimentController
-from nepi.execution.resource import ResourceAction, ResourceState
-
-import os
-
-exp_id = "ping_exp"
-
-# Create the entity Experiment Controller:
-ec = ExperimentController(exp_id)
-
-# Register the nodes resources:
-
-# The username in this case is the slice name, the one to use for login in
-# via ssh into PlanetLab nodes. Replace with your own slice name.
-username = "inria_sfatest"
-
-# The pluser and plpassword are the ones used to login in the PlanetLab web
-# site. Replace with your own user and password account information.
-pl_user = "lucia.guevgeozian_odizzio@inria.fr"
-pl_password = os.environ.get("PL_PASS")
-
-# Define a Planetlab Node with no restriction, it can be any node
-node = ec.register_resource('PlanetlabNode')
-ec.set(node, "username", username)
-ec.set(node, "pluser", pl_user)
-ec.set(node, "plpassword", pl_password)
-ec.set(node, "cleanHome", True)
-ec.set(node, "cleanProcesses", True)
-
-# Define a ping application
-app = ec.register_resource('LinuxApplication')
-ec.set(app, 'command', 'ping -c5 google.com > ping_google.txt')
-
-# Connect the application to the node
-ec.register_connection(node, app)
-
-# Deploy the experiment:
-ec.deploy()
-
-# Wait until the application is finish to retrive the trace:
-ec.wait_finished(app)
-
-trace = ec.trace(app, 'ping_google.txt')
-
-# Choose a directory to store the traces locally, change to a convenient path for you:
-directory = "examples/planetlab/"
-trace_file = directory + "ping_google.txt"
-f = open(trace_file, "w")
-f.write(trace)
-f.close()
-
-# Do the experiment controller shutdown:
-ec.shutdown()
-
-# END
#
# Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/ping_with_filters.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>
+
from nepi.execution.ec import ExperimentController
from nepi.execution.resource import ResourceAction, ResourceState
+from optparse import OptionParser
import os
-def create_node(ec, username, pl_user, pl_password, hostname=None, country=None,
- operatingSystem=None, minBandwidth=None, minCpu=None):
- node = ec.register_resource("PlanetlabNode")
+def create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password,
+ hostname = None, country = None, operatingSystem = None,
+ minBandwidth = None, minCpu = None):
- if username:
- ec.set(node, "username", username)
- if pl_user:
- ec.set(node, "pluser", pl_user)
- if pl_password:
- ec.set(node, "plpassword", pl_password)
+ node = ec.register_resource("planetlab::Node")
+
+ ec.set(node, "username", pl_slice)
+ ec.set(node, "identity", pl_ssh_key)
+ ec.set(node, "pluser", pl_user)
+ ec.set(node, "plpassword", pl_password)
if hostname:
ec.set(node, "hostname", hostname)
if minCpu:
ec.set(node, "minCpu", minCpu)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
-def add_app(ec, command, node, sudo=None, video=None, depends=None, forward_x11=None, \
- env=None):
- app = ec.register_resource("LinuxApplication")
+def add_app(ec, command, node, newname = None, sudo = None,
+ video = None, depends = None, forward_x11 = None, env = None):
+ app = ec.register_resource("linux::Application")
+
if sudo is not None:
ec.set(app, "sudo", sudo)
if video is not None:
ec.set(app, "forwardX11", forward_x11)
if env is not None:
ec.set(app, "env", env)
+
ec.set(app, "command", command)
ec.register_connection(app, node)
+ # add collector to download application standar output
+ collector = ec.register_resource("Collector")
+ ec.set(collector, "traceName", "stdout")
+ if newname:
+ ec.set(collector, "rename", newname)
+ ec.register_connection(app, collector)
+
return app
-exp_id = "ping_filters_exp"
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+ "-k <pl-ssh-key> -c <country> -o <operating-system> -H <hostname> ")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+ help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+ help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+ help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+ help="Path to private SSH key associated with the PL account",
+ type="str")
+parser.add_option("-c", "--country", dest="country",
+ help="Country for the PL hosts",
+ type="str")
+parser.add_option("-o", "--os", dest="os",
+ help="Operating system for the PL hosts", default="f14",
+ type="str")
+parser.add_option("-H", "--hostname", dest="hostname",
+ help="PlanetLab hostname",
+ type="str")
+
+(options, args) = parser.parse_args()
+
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+hostname = options.hostname
+country = options.country
+os = options.os
# Create the entity Experiment Controller:
-ec = ExperimentController(exp_id)
+ec = ExperimentController("pl_ping_filters")
# Register the nodes resources:
-# The username in this case is the slice name, the one to use for login in
-# via ssh into PlanetLab nodes. Replace with your own slice name.
-username = "inria_sfatest"
-
-# The pluser and plpassword are the ones used to login in the PlanetLab web
-# site. Replace with your own user and password account information.
-pl_user = "lucia.guevgeozian_odizzio@inria.fr"
-pl_password = os.environ.get("PL_PASS")
-
# Choose the PlanetLab nodes for the experiment, in this example 5 nodes are
# used, and they are picked according to different criterias.
# First node will be the one defined by its hostname.
-hostname = "planetlab2.utt.fr"
-node1 = create_node(ec, username, pl_user, pl_password, hostname=hostname)
+node1 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password,
+ hostname = hostname)
-# Second node will be any node in France.
-country = "France"
-node2 = create_node(ec, username, pl_user, pl_password, country=country)
+# Second node will be any node in the selected country.
+node2 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password,
+ country=country)
-# Third node will be a node in France that has Fedora 14 installed.
-operatingSystem = "f14"
-node3 = create_node(ec, username, pl_user, pl_password, country=country,
- operatingSystem=operatingSystem)
+# Third node will be a node in the selected country and with the selected
+# fedora OS
+node3 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password,
+ country = country,
+ operatingSystem = os)
# Forth node will have at least 50% of CPU available
minCpu=50
-node4 = create_node(ec, username, pl_user, pl_password, minCpu=minCpu)
+node4 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password,
+ minCpu = minCpu)
# Fifth node can be any node, constrains are not important.
-node5 = create_node(ec, username, pl_user, pl_password)
+node5 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password)
# Register the applications to run in the nodes, in this case just ping to the
# first node:
-apps_per_node = dict()
apps = []
for node in [node2, node3, node4, node5]:
- command = "ping -c5 %s > ping%s.txt" % (hostname, node)
- app = add_app(ec, command, node)
- apps_per_node[node] = app
+ command = "ping -c5 %s" % hostname
+ trace_name = "%s.ping" % hostname
+ app = add_app(ec, command, node, newname = trace_name)
apps.append(app)
# Register conditions
# before the rest of the nodes. This assures that no other resource will use the
# identified node even if the constraints matchs.
# In this example node2, node3, node4 and node5, are deployed after node1 is
-# provisioned. node1 must be the node planetlab2.utt.fr, meanwhile node2, node3,
+# provisioned. node1 must be the node hostname, meanwhile node2, node3,
# node4 and node5 just need to fulfill certain constraints.
# Applications are always deployed after nodes, so no need to register conditions
# for the apps in this example.
# Wait until the applications are finish to retrive the traces:
ec.wait_finished(apps)
-traces = dict()
-for node, app in apps_per_node.iteritems():
- ping_string = "ping%s.txt" % node
- trace = ec.trace(app, ping_string)
- traces[node]= trace
-
-# Choose a directory to store the traces locally, change to a convenient path for you:
-directory = "examples/planetlab/"
-for node, trace in traces.iteritems():
- trace_file = directory + "ping%s.txt" % node
- f = open(trace_file, "w")
- f.write(trace)
- f.close()
+print "Results stored at", ec.exp_dir
# Do the experiment controller shutdown:
ec.shutdown()
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/select_nodes.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key> -c <country> -o <operating-system> -n <node-count>
+
+
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+ "-k <pl-ssh-key> -c <country> -o <operating-system> -n <node-count> ")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+ help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+ help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+ help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+ help="Path to private SSH key associated with the PL account",
+ type="str")
+parser.add_option("-c", "--country", dest="country",
+ help="Country for the PL hosts",
+ type="str")
+parser.add_option("-o", "--os", dest="os",
+ help="Operating system for the PL hosts",
+ type="str")
+parser.add_option("-n", "--node-count", dest="node_count",
+ help="Number of PL hosts to provision",
+ default = 2,
+ type="int")
+
+(options, args) = parser.parse_args()
+
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+country = options.country
+os = options.os
+node_count = options.node_count
+
+def add_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, country, os):
+ node = ec.register_resource("planetlab::Node")
+ ec.set(node, "username", pl_slice)
+ ec.set(node, "identity", pl_ssh_key)
+ ec.set(node, "pluser", pl_user)
+ ec.set(node, "plpassword", pl_password)
+
+ if country:
+ ec.set(node, "country", country)
+ if os:
+ ec.set(node, "operatingSystem", os)
+
+ ec.set(node, "cleanExperiment", True)
+ ec.set(node, "cleanProcesses", True)
+
+ return node
+
+## Create the experiment controller
+ec = ExperimentController(exp_id="host_select")
+
+nodes = []
+
+for i in xrange(node_count):
+ node = add_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, country, os)
+ nodes.append(node)
+
+ec.deploy()
+
+ec.wait_deployed(nodes)
+
+print "SELECTED HOSTS"
+
+for node in nodes:
+ print ec.get(node, "hostname")
+
+ec.shutdown()
+
+
username = 'inria_sfatest'
# nodes
-node1 = ec.register_resource("PlanetlabNode")
+node1 = ec.register_resource("planetlab::Node")
ec.set(node1, "username", username)
ec.set(node1, "pluser", pl_user)
ec.set(node1, "plpassword", pl_password)
-ec.set(node1, "cleanHome", True)
+ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
-node2 = ec.register_resource("PlanetlabNode")
+node2 = ec.register_resource("planetlab::Node")
ec.set(node2, "username", username)
ec.set(node2, "pluser", pl_user)
ec.set(node2, "plpassword", pl_password)
-ec.set(node2, "cleanHome", True)
+ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
-node3 = ec.register_resource("PlanetlabNode")
+node3 = ec.register_resource("planetlab::Node")
ec.set(node3, "username", username)
ec.set(node3, "pluser", pl_user)
ec.set(node3, "plpassword", pl_password)
-ec.set(node3, "cleanHome", True)
+ec.set(node3, "cleanExperiment", True)
ec.set(node3, "cleanProcesses", True)
# Set the global attribute 'persist_blacklist'
# use of the blacklist, meaning leaving out of the
# provisioning the nodes in that file, and adding the new blacklisted
# nodes to the file.
-ec.set_global('PlanetlabNode', 'persist_blacklist', True)
+ec.set_global("planetlab::Node", "persist_blacklist", True)
# apps
-app1 = ec.register_resource("LinuxApplication")
+app1 = ec.register_resource("linux::Application")
command = "ping -c5 google.com"
ec.set(app1, "command", command)
ec.register_connection(app1, node1)
-app2 = ec.register_resource("LinuxApplication")
+app2 = ec.register_resource("linux::Application")
command = "ping -c5 google.com"
ec.set(app2, "command", command)
ec.register_connection(app2, node2)
-app3 = ec.register_resource("LinuxApplication")
+app3 = ec.register_resource("linux::Application")
command = "ping -c5 google.com"
ec.set(app3, "command", command)
ec.register_connection(app3, node3)
sfaPrivateKey = os.environ.get('SFA_PK')
# server
-node1 = ec.register_resource("PlanetlabSfaNode")
-ec.set(node1, "hostname", 'planetlab3.xeno.cl.cam.ac.uk')
+node1 = ec.register_resource("planetlab::sfa::Node")
+ec.set(node1, "hostname", "planetlab3.xeno.cl.cam.ac.uk")
ec.set(node1, "username", username)
ec.set(node1, "sfauser", sfauser)
ec.set(node1, "sfaPrivateKey", sfaPrivateKey)
-ec.set(node1, "cleanHome", True)
+ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
-node2 = ec.register_resource("PlanetlabSfaNode")
+node2 = ec.register_resource("planetlab::sfa::Node")
ec.set(node2, "username", username)
ec.set(node2, "sfauser", sfauser)
ec.set(node2, "sfaPrivateKey", sfaPrivateKey)
-ec.set(node2, "cleanHome", True)
+ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
-node3 = ec.register_resource("PlanetlabSfaNode")
+node3 = ec.register_resource("planetlab::sfa::Node")
ec.set(node3, "username", username)
ec.set(node3, "sfauser", sfauser)
ec.set(node3, "sfaPrivateKey", sfaPrivateKey)
-app1 = ec.register_resource("LinuxApplication")
+app1 = ec.register_resource("linux::Application")
command = "ping -c5 google.com"
ec.set(app1, "command", command)
ec.register_connection(app1, node1)
-app2 = ec.register_resource("LinuxApplication")
+app2 = ec.register_resource("linux::Application")
command = "ping -c5 google.com"
ec.set(app2, "command", command)
ec.register_connection(app2, node2)
-app3 = ec.register_resource("LinuxApplication")
+app3 = ec.register_resource("linux::Application")
command = "ping -c5 google.com"
ec.set(app3, "command", command)
ec.register_connection(app3, node3)
def create_node(ec, username, pl_user, pl_password, critical=True, hostname=None,
country=None, operatingSystem=None, minBandwidth=None, minCpu=None):
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
if username:
ec.set(node, "username", username)
ec.set(node, "minCpu", minCpu)
ec.set(node, "critical", critical)
- #ec.set(node, "cleanHome", True)
+ #ec.set(node, "cleanExperiment", True)
#ec.set(node, "cleanProcesses", True)
return node
#!/usr/bin/env python
-from nepi import __version__
from distutils.core import setup
import sys
setup(
name = "nepi",
- version = __version__,
+ version = "nepi-3-dev",
description = "Network Experiment Management Framework",
author = "Alina Quereilhac, Julien Tribino, Lucia Guevgeozian",
url = "http://nepi.inria.fr",
platforms = "Linux, OSX",
packages = [
"nepi",
- "nepi.design",
"nepi.execution",
"nepi.resources",
"nepi.resources.all",
"nepi.resources.omf",
"nepi.resources.planetlab",
"nepi.resources.planetlab.openvswitch",
- "nepi.util"],
+ "nepi.util",
+ "nepi.util.parsers",
+ "nepi.data",
+ "nepi.data.processing",
+ "nepi.data.processing.ccn",
+ "nepi.data.processing.ping"],
package_dir = {"": "src"},
package_data = {
"nepi.resources.planetlab" : [ "scripts/*.py" ],
import os
import traceback
-__version__ = "3.0"
-
LOGLEVEL = os.environ.get("NEPI_LOGLEVEL", "INFO").upper()
LOGLEVEL = getattr(logging, LOGLEVEL)
FORMAT = "%(asctime)s %(name)s %(levelname)-4s %(message)s"
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# CCNX benchmark
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+#
+# This library contains functions to parse (CCNx) ccnd logs.
+#
+# Results from experiments must be stored in a directory
+# named with the experiment run id.
+# ccnd logs are stored in .log files in a subdirectory per node.
+# The following diagram exemplifies the experiment result directory
+# structure (nidi is the unique identifier assigned to node i):
+#
+# run_id
+# \ nid1
+# \ nid2.log
+# \ nid2
+# \ nid1.log
+# \ nid3
+# \ nid3.log
+#
+
+import collections
+import functools
+import networkx
+import os
+import pickle
+import tempfile
+
+from nepi.util.timefuncs import compute_delay_ms
+from nepi.util.statfuncs import compute_mean
+import nepi.data.processing.ping.parser as ping_parser
+
+def is_control(content_name):
+ return content_name.startswith("ccnx:/%C1") or \
+ content_name.startswith("ccnx:/ccnx") or \
+ content_name.startswith("ccnx:/...")
+
+
+def parse_file(filename):
+ """ Parses message information from ccnd log files
+
+ filename: path to ccndlog file
+
+ """
+
+ faces = dict()
+ sep = " "
+
+ f = open(filename, "r")
+
+ data = []
+
+ for line in f:
+ cols = line.strip().split(sep)
+
+ # CCN_PEEK
+ # MESSAGE interest_from
+ # 1374181938.808523 ccnd[9245]: debug.4352 interest_from 6 ccnx:/test/bunny.ts (23 bytes,sim=0CDCC1D7)
+ #
+ # MESSAGE interest_to
+ # 1374181938.812750 ccnd[9245]: debug.3502 interest_to 5 ccnx:/test/bunny.ts (39 bytes,i=2844,sim=0CDCC1D7)
+ #
+ # MESSAGE CONTENT FROM
+ # 1374181938.868682 ccnd[9245]: debug.4643 content_from 5 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
+ #
+ # MESSAGE CONTENT_TO
+ # 1374181938.868772 ccnd[9245]: debug.1619 content_to 6 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
+ #
+ # 1375596708.222304 ccnd[9758]: debug.3692 interest_expiry ccnx:/test/bunny.ts/%FD%05%1E%86%B1GS/%00%0A%F7 (44 bytes,c=0:1,i=2819,sim=49FA8048)
+
+ # External face creation
+ # 1374181452.965961 ccnd[9245]: accepted datagram client id=5 (flags=0x40012) 204.85.191.10 port 9695
+
+ if line.find("accepted datagram client") > -1:
+ face_id = (cols[5]).replace("id=",'')
+ ip = cols[7]
+ port = cols[9]
+ faces[face_id] = (ip, port)
+ continue
+
+ # 1374181452.985296 ccnd[9245]: releasing face id 4 (slot 4)
+ if line.find("releasing face id") > -1:
+ face_id = cols[5]
+ if face_id in faces:
+ del faces[face_id]
+ continue
+
+ if len(cols) < 6:
+ continue
+
+ timestamp = cols[0]
+ message_type = cols[3]
+
+ if message_type not in ["interest_from", "interest_to", "content_from",
+ "content_to", "interest_dupnonce", "interest_expiry"]:
+ continue
+
+ face_id = cols[4]
+ content_name = cols[5]
+
+ # Interest Nonce ? -> 412A74-0844-0008-50AA-F6EAD4
+ nonce = ""
+ if message_type in ["interest_from", "interest_to", "interest_dupnonce"]:
+ last = cols[-1]
+ if len(last.split("-")) == 5:
+ nonce = last
+
+ try:
+ size = int((cols[6]).replace('(',''))
+ except:
+ print "interest_expiry without face id!", line
+ continue
+
+ # If no external IP address was identified for this face
+ # asume it is a local face
+ peer = "localhost"
+
+ if face_id in faces:
+ peer, port = faces[face_id]
+
+ data.append((content_name, timestamp, message_type, peer, face_id,
+ size, nonce, line))
+
+ f.close()
+
+ return data
+
+def dump_content_history(content_history):
+ f = tempfile.NamedTemporaryFile(delete=False)
+ pickle.dump(content_history, f)
+ f.close()
+ return f.name
+
+def load_content_history(fname):
+ f = open(fname, "r")
+ content_history = pickle.load(f)
+ f.close()
+
+ os.remove(fname)
+ return content_history
+
+def annotate_cn_node(graph, nid, ips2nid, data, content_history):
+ for (content_name, timestamp, message_type, peer, face_id,
+ size, nonce, line) in data:
+
+ # Ignore control messages for the time being
+ if is_control(content_name):
+ continue
+
+ if message_type == "interest_from" and \
+ peer == "localhost":
+ graph.node[nid]["ccn_consumer"] = True
+ elif message_type == "content_from" and \
+ peer == "localhost":
+ graph.node[nid]["ccn_producer"] = True
+
+ # Ignore local messages for the time being.
+ # They could later be used to calculate the processing times
+ # of messages.
+ if peer == "localhost":
+ continue
+
+ # remove digest
+ if message_type in ["content_from", "content_to"]:
+ content_name = "/".join(content_name.split("/")[:-1])
+
+ if content_name not in content_history:
+ content_history[content_name] = list()
+
+ peernid = ips2nid[peer]
+ graph.add_edge(nid, peernid)
+
+ content_history[content_name].append((timestamp, message_type, nid,
+ peernid, nonce, size, line))
+
+def annotate_cn_graph(logs_dir, graph, parse_ping_logs = False):
+ """ Adds CCN content history for each node in the topology graph.
+
+ """
+
+ # Make a copy of the graph to ensure integrity
+ graph = graph.copy()
+
+ ips2nid = dict()
+
+ for nid in graph.nodes():
+ ips = graph.node[nid]["ips"]
+ for ip in ips:
+ ips2nid[ip] = nid
+
+ found_files = False
+
+ # Now walk through the ccnd logs...
+ for dirpath, dnames, fnames in os.walk(logs_dir):
+ # continue if we are not at the leaf level (if there are subdirectories)
+ if dnames:
+ continue
+
+ # Each dirpath correspond to a different node
+ nid = os.path.basename(dirpath)
+
+ # Cast to numeric nid if necessary
+ if int(nid) in graph.nodes():
+ nid = int(nid)
+
+ content_history = dict()
+
+ for fname in fnames:
+ if fname.endswith(".log"):
+ found_files = True
+ filename = os.path.join(dirpath, fname)
+ data = parse_file(filename)
+ annotate_cn_node(graph, nid, ips2nid, data, content_history)
+
+ # Avoid storing everything in memory, instead dump to a file
+ # and reference the file
+ fname = dump_content_history(content_history)
+ graph.node[nid]["history"] = fname
+
+ if not found_files:
+ msg = "No CCND output files were found to parse at %s " % logs_dir
+ raise RuntimeError, msg
+
+ if parse_ping_logs:
+ ping_parser.annotate_cn_graph(logs_dir, graph)
+
+ return graph
+
+def ccn_producers(graph):
+ """ Returns the nodes that are content providers """
+ return [nid for nid in graph.nodes() \
+ if graph.node[nid].get("ccn_producer")]
+
+def ccn_consumers(graph):
+ """ Returns the nodes that are content consumers """
+ return [nid for nid in graph.nodes() \
+ if graph.node[nid].get("ccn_consumer")]
+
+def process_content_history(graph):
+ """ Compute CCN message counts and aggregates content historical
+ information in the content_names dictionary
+
+ """
+
+ ## Assume single source
+ source = ccn_consumers(graph)[0]
+
+ interest_expiry_count = 0
+ interest_dupnonce_count = 0
+ interest_count = 0
+ content_count = 0
+ content_names = dict()
+
+ # Collect information about exchanged messages by content name and
+ # link delay info.
+ for nid in graph.nodes():
+ # Load the data collected from the node's ccnd log
+ fname = graph.node[nid]["history"]
+ history = load_content_history(fname)
+
+ for content_name in history.keys():
+ hist = history[content_name]
+
+ for (timestamp, message_type, nid1, nid2, nonce, size, line) in hist:
+ if message_type in ["content_from", "content_to"]:
+ # The first Interest sent will not have a version or chunk number.
+ # The first Content sent back in reply, will end in /=00 or /%00.
+ # Make sure to map the first Content to the first Interest.
+ if content_name.endswith("/=00"):
+ content_name = "/".join(content_name.split("/")[0:-2])
+
+ # Add content name to dictionary
+ if content_name not in content_names:
+ content_names[content_name] = dict()
+ content_names[content_name]["interest"] = dict()
+ content_names[content_name]["content"] = list()
+
+ # Classify interests by replica
+ if message_type in ["interest_from"] and \
+ nonce not in content_names[content_name]["interest"]:
+ content_names[content_name]["interest"][nonce] = list()
+
+ # Add consumer history
+ if nid == source:
+ if message_type in ["interest_to", "content_from"]:
+ # content name history as seen by the source
+ if "consumer_history" not in content_names[content_name]:
+ content_names[content_name]["consumer_history"] = list()
+
+ content_names[content_name]["consumer_history"].append(
+ (timestamp, message_type))
+
+ # Add messages per content name and cumulate totals by message type
+ if message_type == "interest_dupnonce":
+ interest_dupnonce_count += 1
+ elif message_type == "interest_expiry":
+ interest_expiry_count += 1
+ elif message_type == "interest_from":
+ interest_count += 1
+ # Append to interest history of the content name
+ content_names[content_name]["interest"][nonce].append(
+ (timestamp, nid2, nid1))
+ elif message_type == "content_from":
+ content_count += 1
+ # Append to content history of the content name
+ content_names[content_name]["content"].append((timestamp, nid2, nid1))
+ else:
+ continue
+ del hist
+ del history
+
+ # Compute the time elapsed between the time an interest is sent
+ # in the consumer node and when the content is received back
+ for content_name in content_names.keys():
+ # order content and interest messages by timestamp
+ content_names[content_name]["content"] = sorted(
+ content_names[content_name]["content"])
+
+ for nonce, timestamps in content_names[content_name][
+ "interest"].iteritems():
+ content_names[content_name]["interest"][nonce] = sorted(
+ timestamps)
+
+ history = sorted(content_names[content_name]["consumer_history"])
+ content_names[content_name]["consumer_history"] = history
+
+ # compute the rtt time of the message
+ rtt = None
+ waiting_content = False
+ interest_timestamp = None
+ content_timestamp = None
+
+ for (timestamp, message_type) in history:
+ if not waiting_content and message_type == "interest_to":
+ waiting_content = True
+ interest_timestamp = timestamp
+ continue
+
+ if waiting_content and message_type == "content_from":
+ content_timestamp = timestamp
+ break
+
+ # If we can't determine who sent the interest, discard it
+ rtt = -1
+ if interest_timestamp and content_timestamp:
+ rtt = compute_delay_ms(content_timestamp, interest_timestamp)
+
+ content_names[content_name]["rtt"] = rtt
+ content_names[content_name]["lapse"] = (interest_timestamp, content_timestamp)
+
+ return (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count)
+
+def process_content_history_logs(logs_dir, graph, parse_ping_logs = False):
+ """ Parse CCN logs and aggregate content history information in graph.
+ Returns annotated graph and message countn and content names history.
+
+ """
+ ## Process logs and analyse data
+ try:
+ graph = annotate_cn_graph(logs_dir, graph,
+ parse_ping_logs = parse_ping_logs)
+ except:
+ print "Skipping: Error parsing ccnd logs", logs_dir
+ raise
+
+ source = ccn_consumers(graph)[0]
+ target = ccn_producers(graph)[0]
+
+ # Process the data from the ccnd logs, but do not re compute
+ # the link delay.
+ try:
+ (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count) = process_content_history(graph)
+ except:
+ print "Skipping: Error processing ccn data", logs_dir
+ raise
+
+ return (graph,
+ content_names,
+ interest_expiry_count,
+ interest_dupnonce_count,
+ interest_count,
+ content_count)
--- /dev/null
+#!/usr/bin/env python
+
+###############################################################################
+#
+# CCNX benchmark
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+#
+# This library contains functions to parse log files generated using ping.
+#
+
+import collections
+import re
+import os
+
+# RE to match line starting "traceroute to"
+_rre = re.compile("\d+ bytes from ((?P<hostname>[^\s]+) )?\(?(?P<ip>[^\s]+)\)??: icmp_.eq=\d+ ttl=\d+ time=(?P<time>[^\s]+) ms")
+
+def parse_file(filename):
+ """
+ filename: path to traceroute file
+
+ """
+
+ f = open(filename, "r")
+
+ # Traceroute info
+ target_ip = None
+ target_hostname = None
+
+ data = []
+
+ for line in f:
+ # match traceroute to ...
+ m = re.match(_rre, line)
+ if not m:
+ continue
+
+ target_ip = m.groupdict()["ip"]
+ # FIX THIS: Make sure the regular expression does not inlcude
+ # the ')' in the ip group
+ target_ip = target_ip.replace(")","")
+ target_hostname = m.groupdict()["hostname"]
+ time = m.groupdict()["time"]
+ data.append((target_ip, target_hostname, time))
+
+ f.close()
+
+ return data
+
+def annotate_cn_node(graph, nid1, ips2nid, data):
+ for (target_ip, target_hostname, time) in data:
+ nid2 = ips2nid[target_ip]
+
+ if "delays" not in graph.edge[nid1][nid2]:
+ graph.edge[nid1][nid2]["delays"] = []
+
+ time = float(time.replace("ms", "").replace(" ",""))
+
+ graph.edge[nid1][nid2]["delays"].append(time)
+
+def annotate_cn_graph(logs_dir, graph):
+ """ Add delay inormation to graph using data collected using
+ ping.
+
+ """
+ ips2nid = dict()
+
+ for nid in graph.nodes():
+ ips = graph.node[nid]["ips"]
+ for ip in ips:
+ ips2nid[ip] = nid
+
+ # Walk through the ping logs...
+ found_files = False
+
+ for dirpath, dnames, fnames in os.walk(logs_dir):
+ # continue if we are not at the leaf level (if there are subdirectories)
+ if dnames:
+ continue
+
+ # Each dirpath correspond to a different host
+ nid = os.path.basename(dirpath)
+
+ for fname in fnames:
+ if fname.endswith(".ping"):
+ found_files = True
+ filename = os.path.join(dirpath, fname)
+ data = parse_file(filename)
+ annotate_cn_node(graph, nid, ips2nid, data)
+
+ if not found_files:
+ msg = "No PING output files were found to parse at %s " % logs_dir
+ raise RuntimeError, msg
+
+ # Take as weight the most frequent value
+ for nid1, nid2 in graph.edges():
+ delays = collections.Counter(graph.edge[nid1][nid2]["delays"])
+ weight = delays.most_common(1)[0][0]
+ del graph.edge[nid1][nid2]["delays"]
+ graph.edge[nid1][nid2]["weight"] = weight
+
+ return graph
+
+
+++ /dev/null
-#!/usr/bin/python
+++ /dev/null
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-from nepi.util import guid
-
-guid_gen = guid.GuidGenerator()
-
-class Attributes(object):
- def __init__(self):
- super(Attributes, self).__init__()
- self._attributes = dict()
-
- def __getattr__(self, name):
- try:
- return self._attributes[name]
- except:
- return super(Attributes, self).__getattribute__(name)
-
- def __setattr__(self, name, value):
- try:
- if value == None:
- old = self._attributes[name]
- del self._attributes[name]
- return old
-
- self._attributes[name] = value
- return value
- except:
- return super(Attributes, self).__setattr__(name, value)
-
-class Connections(object):
- def __init__(self):
- super(Connections, self).__init__()
- self._connections = set()
-
- def __getattr__(self, guid_or_label):
- try:
- for b in self._connections:
- if guid_or_label in [b.guid, b.label]:
- return b
- except:
- return super(Connections, self).__getattribute__(guid_or_label)
-
-class Box(object):
- def __init__(self, label = None, guid = None):
- super(Box, self).__init__()
- self._guid = guid_gen.next(guid)
- self._a = Attributes()
- self._c = Connections()
- self._tags = set()
- self.label = label or self._guid
-
- # Graphical information to draw box
- self.x = 0
- self.y = 0
- self.width = 4
- self.height = 4
-
- @property
- def tags(self):
- return self._tags
-
- @property
- def attributes(self):
- return self._a._attributes.keys()
-
- @property
- def a(self):
- return self._a
-
- @property
- def c(self):
- return self._c
-
- @property
- def guid(self):
- return self._guid
-
- @property
- def connections(self):
- return set(self._c._connections)
-
- def tadd(self, name):
- self._tags.add(name)
-
- def tdel(self, name):
- self._tags.remove(name)
-
- def thas(self, name):
- return name in self._tags
-
- def connect(self, box, cascade = True):
- self._c._connections.add(box)
- if cascade:
- box.connect(self, cascade = False)
-
- def disconnect(self, box, cascade = True):
- self._c._connections.remove(box)
- if cascade:
- box.disconnect(self, cascade = False)
-
- def is_connected(self, box):
- return box in self.connections
-
### Attribute Types
class Types:
+ """ Allowed types for the Attribute value
+ """
String = "STRING"
Bool = "BOOL"
Enumerate = "ENUM"
### Attribute Flags
class Flags:
- """ Differents flags to characterize an attribute
-
+ """ Flags to characterize the scope of an Attribute
"""
# Attribute value can not be read (it is hidden to the user)
NoRead = 1 # 1
# Attribute global is set to all resources of rtype
Global = 1 << 7 # 128
+
class Attribute(object):
+ """ An Attribute exposes a configuration parameter of a resource
"""
- .. class:: Class Args :
- An Attribute reflects a configuration parameter for
- a particular resource. Attributes might be read only or
- not.
-
- :param name: Name of the attribute
+ def __init__(self, name, help, type = Types.String,
+ flags = None, default = None, allowed = None,
+ range = None, set_hook = None):
+ """
+ :param name: Name of the Attribute
:type name: str
- :param help: Attribute description
+ :param help: Description of the Attribute
:type help: str
- :param type: The type expected for the attribute value.
- Should be one of Attribute.Types .
+ :param type: The type expected for the Attribute value.
+ Should be one of Attribute.Types
:type type: str
- :param flags: Defines attribute behavior (i.e. whether it is read-only,
- read and write, etc). This parameter should take its values from
- Attribute.Flags. Flags values can be bitwised.
+ :param flags: Defines Attribute behavior (i.e. whether it is read-only,
+ read and write, etc). This parameter must take its values from
+ Attribute.Flags. Flags values can be bitwised
:type flags: hex
- :param default: Default value of the attribute
- :type default: depends on the type of attribute
+ :param default: Default value for the Attribute
+ :type default: Depends on the type of Attribute
- :param allowed: List of values that the attribute can take.
- This parameter is only meaningful for Enumerate type attributes.
+ :param allowed: List of values that the Attribute can take.
+ This parameter is only meaningful for Enumerate type Attributes
:type allowed: list
:param range: (max, min) tuple with range of possible values for
- attributes.
+ Attributes.
This parameter is only meaningful for Integer or Double type
- attributes.
+ Attributes
:type range: (int, int) or (float, float)
:param set_hook: Function that will be executed whenever a new
- value is set for the attribute.
+ value is set for the Attribute.
:type set_hook: function
"""
- def __init__(self, name, help, type = Types.String,
- flags = None, default = None, allowed = None,
- range = None, set_hook = None):
self._name = name
self._help = help
self._type = type
@property
def name(self):
- """ Returns the name of the attribute """
+ """ Returns the name of the Attribute """
return self._name
@property
def default(self):
- """ Returns the default value of the attribute """
+ """ Returns the default value of the Attribute """
return self._default
@property
def type(self):
- """ Returns the type of the attribute """
+ """ Returns the type of the Attribute """
return self._type
@property
def help(self):
- """ Returns the help of the attribute """
+ """ Returns the description of the Attribute """
return self._help
@property
def flags(self):
- """ Returns the flags of the attribute """
+ """ Returns the flags of the Attribute """
return self._flags
@property
def allowed(self):
- """ Returns the allowed value for this attribute """
+ """ Returns the set of allowed values for the Attribute """
return self._allowed
@property
def range(self):
- """ Returns the range of the attribute """
+ """ Returns the range of allowed numerical values for the Attribute """
return self._range
def has_flag(self, flag):
- """ Returns true if the attribute has the flag 'flag'
+ """ Returns True if the Attribute has the flag 'flag'
:param flag: Flag to be checked
:type flag: Flags
return (self._flags & flag) == flag
def get_value(self):
- """ Returns the value of the attribute """
+ """ Returns the value of the Attribute """
return self._value
def set_value(self, value):
- """ Change the value of the attribute after checking the type """
+ """ Configure a new value for the Attribute """
valid = True
if self.type == Types.Enumerate:
adequate validation"""
return True
+ @property
def has_changed(self):
- """ Returns true if the value has changed from the default """
+ """ Returns True if the value has changed from the default """
return self.value != self.default
+
ResourceState, ResourceState2str
from nepi.execution.scheduler import HeapScheduler, Task, TaskStatus
from nepi.execution.trace import TraceAttr
+from nepi.util.serializer import ECSerializer, SFormats
+from nepi.util.plotter import ECPlotter, PFormats
+from nepi.util.netgraph import NetGraph, TopologyType
# TODO: use multiprocessing instead of threading
# TODO: Allow to reconnect to a running experiment instance! (reconnect mode vs deploy mode)
import logging
import os
import sys
+import tempfile
import time
import threading
import weakref
class FailureLevel(object):
- """ Describes the system failure state """
+ """ Possible failure states for the experiment """
OK = 1
RM_FAILURE = 2
EC_FAILURE = 3
class FailureManager(object):
""" The FailureManager is responsible for handling errors
and deciding whether an experiment should be aborted or not
-
"""
- def __init__(self, ec):
- self._ec = weakref.ref(ec)
+ def __init__(self):
+ self._ec = None
self._failure_level = FailureLevel.OK
+ self._abort = False
+
+ def set_ec(self, ec):
+ self._ec = weakref.ref(ec)
@property
def ec(self):
""" Returns the ExperimentController associated to this FailureManager
-
"""
-
return self._ec()
@property
def abort(self):
+ return self._abort
+
+ def eval_failure(self, guid):
+ """ Implements failure policy and sets the abort state of the
+ experiment based on the failure state and criticality of
+ the RM
+
+ :param guid: Guid of the RM upon which the failure of the experiment
+ is evaluated
+ :type guid: int
+
+ """
if self._failure_level == FailureLevel.OK:
- for guid in self.ec.resources:
- try:
- state = self.ec.state(guid)
- critical = self.ec.get(guid, "critical")
- if state == ResourceState.FAILED and critical:
- self._failure_level = FailureLevel.RM_FAILURE
- self.ec.logger.debug("RM critical failure occurred on guid %d." \
- " Setting EC FAILURE LEVEL to RM_FAILURE" % guid)
- break
- except:
- # An error might occure because a RM was deleted abruptly.
- # In this case the error should be ignored.
- if guid in self.ec._resources:
- raise
-
- return self._failure_level != FailureLevel.OK
+ rm = self.ec.get_resource(guid)
+ state = rm.state
+ critical = rm.get("critical")
+
+ if state == ResourceState.FAILED and critical:
+ self._failure_level = FailureLevel.RM_FAILURE
+ self._abort = True
+ self.ec.logger.debug("RM critical failure occurred on guid %d." \
+ " Setting EC FAILURE LEVEL to RM_FAILURE" % guid)
def set_ec_failure(self):
self._failure_level = FailureLevel.EC_FAILURE
class ECState(object):
- """ Possible states for an ExperimentController
+ """ Possible states of the ExperimentController
"""
RUNNING = 1
FAILED = 2
- TERMINATED = 3
+ RELEASED = 3
+ TERMINATED = 4
class ExperimentController(object):
"""
- .. class:: Class Args :
-
- :param exp_id: Human readable identifier for the experiment scenario.
- :type exp_id: str
-
.. note::
An experiment, or scenario, is defined by a concrete set of resources,
- behavior, configuration and interconnection of those resources.
+ and the behavior, configuration and interconnection of those resources.
The Experiment Description (ED) is a detailed representation of a
single experiment. It contains all the necessary information to
allow repeating the experiment. NEPI allows to describe
recreated (and re-run) by instantiating an EC and recreating
the same experiment description.
- In NEPI, an experiment is represented as a graph of interconnected
+ An experiment is represented as a graph of interconnected
resources. A resource is a generic concept in the sense that any
component taking part of an experiment, whether physical of
virtual, is considered a resource. A resources could be a host,
single resource. ResourceManagers are specific to a resource
type (i.e. An RM to control a Linux application will not be
the same as the RM used to control a ns-3 simulation).
- To support a new type of resource in NEPI, a new RM must be
- implemented. NEPI already provides a variety of
- RMs to control basic resources, and new can be extended from
- the existing ones.
+ To support a new type of resource, a new RM must be implemented.
+ NEPI already provides a variety of RMs to control basic resources,
+ and new can be extended from the existing ones.
Through the EC interface the user can create ResourceManagers (RMs),
configure them and interconnect them, to describe an experiment.
exp_id, which can be re-used in different ExperimentController,
and the run_id, which is unique to one ExperimentController instance, and
is automatically generated by NEPI.
-
+
"""
- def __init__(self, exp_id = None):
+ @classmethod
+ def load(cls, filepath, format = SFormats.XML):
+ serializer = ECSerializer()
+ ec = serializer.load(filepath)
+ return ec
+
+ def __init__(self, exp_id = None, local_dir = None, persist = False,
+ fm = None, add_node_callback = None, add_edge_callback = None,
+ **kwargs):
+ """ ExperimentController entity to model an execute a network
+ experiment.
+
+ :param exp_id: Human readable name to identify the experiment
+ :type exp_id: str
+
+ :param local_dir: Path to local directory where to store experiment
+ related files
+ :type local_dir: str
+
+ :param persist: Save an XML description of the experiment after
+ completion at local_dir
+ :type persist: bool
+
+ :param fm: FailureManager object. If None is given, the default
+ FailureManager class will be used
+ :type fm: FailureManager
+
+ :param add_node_callback: Callback to invoke for node instantiation
+ when automatic topology creation mode is used
+ :type add_node_callback: function
+
+ :param add_edge_callback: Callback to invoke for edge instantiation
+ when automatic topology creation mode is used
+ :type add_edge_callback: function
+
+ """
super(ExperimentController, self).__init__()
# Logging
# resources used, etc)
self._exp_id = exp_id or "exp-%s" % os.urandom(8).encode('hex')
+ # Local path where to store experiment related files (results, etc)
+ if not local_dir:
+ local_dir = tempfile.gettempdir() # /tmp
+
+ self._local_dir = local_dir
+ self._exp_dir = os.path.join(local_dir, self.exp_id)
+ self._run_dir = os.path.join(self.exp_dir, self.run_id)
+
+ # If True persist the experiment controller in XML format, after completion
+ self._persist = persist
+
# generator of globally unique ids
self._guid_generator = guid.GuidGenerator()
self._stop = False
# Entity in charge of managing system failures
- self._fm = FailureManager(self)
+ if not fm:
+ self._fm = FailureManager()
+ self._fm.set_ec(self)
# EC state
self._state = ECState.RUNNING
+ # Automatically construct experiment description
+ self._netgraph = None
+ if add_node_callback or add_edge_callback or kwargs.get("topology"):
+ self._build_from_netgraph(add_node_callback, add_edge_callback,
+ **kwargs)
+
# The runner is a pool of threads used to parallelize
# execution of tasks
- nthreads = int(os.environ.get("NEPI_NTHREADS", "3"))
- self._runner = ParallelRun(maxthreads = nthreads)
+ self._nthreads = 20
+ self._runner = None
# Event processing thread
self._cond = threading.Condition()
"""
return self._logger
+ @property
+ def fm(self):
+ """ Returns the failure manager
+
+ """
+
+ return self._fm
+
+ @property
+ def failure_level(self):
+ """ Returns the level of FAILURE of th experiment
+
+ """
+
+ return self._fm._failure_level
+
@property
def ecstate(self):
""" Returns the state of the Experiment Controller
"""
return self._run_id
+ @property
+ def nthreads(self):
+ """ Returns the number of processing nthreads used
+
+ """
+ return self._nthreads
+
+ @property
+ def local_dir(self):
+ """ Root local directory for experiment files
+
+ """
+ return self._local_dir
+
+ @property
+ def exp_dir(self):
+ """ Local directory to store results and other files related to the
+ experiment.
+
+ """
+ return self._exp_dir
+
+ @property
+ def run_dir(self):
+ """ Local directory to store results and other files related to the
+ experiment run.
+
+ """
+ return self._run_dir
+
+ @property
+ def persist(self):
+ """ If True, persists the ExperimentController to XML format upon
+ experiment completion
+
+ """
+ return self._persist
+
+ @property
+ def netgraph(self):
+ """ Return NetGraph instance if experiment description was automatically
+ generated
+
+ """
+ return self._netgraph
+
@property
def abort(self):
""" Returns True if the experiment has failed and should be interrupted,
"""
return self._fm.abort
+ def inform_failure(self, guid):
+ """ Reports a failure in a RM to the EC for evaluation
+
+ :param guid: Resource id
+ :type guid: int
+
+ """
+
+ return self._fm.eval_failure(guid)
+
def wait_finished(self, guids):
""" Blocking method that waits until all RMs in the 'guids' list
have reached a state >= STOPPED (i.e. STOPPED, FAILED or
break
# If a guid reached one of the target states, remove it from list
- guid = guids[0]
- rstate = self.state(guid)
+ guid = guids.pop()
+ rm = self.get_resource(guid)
+ rstate = rm.state
- hrrstate = ResourceState2str.get(rstate)
- hrstate = ResourceState2str.get(state)
-
if rstate >= state:
- guids.remove(guid)
- rm = self.get_resource(guid)
self.logger.debug(" %s guid %d DONE - state is %s, required is >= %s " % (
- rm.get_rtype(), guid, hrrstate, hrstate))
+ rm.get_rtype(), guid, rstate, state))
else:
# Debug...
self.logger.debug(" WAITING FOR guid %d - state is %s, required is >= %s " % (
- guid, hrrstate, hrstate))
+ guid, rstate, state))
+
+ guids.append(guid)
+
time.sleep(0.5)
-
+
+ def plot(self, dirpath = None, format= PFormats.FIGURE, show = False):
+ plotter = ECPlotter()
+ fpath = plotter.plot(self, dirpath = dirpath, format= format,
+ show = show)
+ return fpath
+
+ def serialize(self, format = SFormats.XML):
+ serializer = ECSerializer()
+ sec = serializer.load(self, format = format)
+ return sec
+
+ def save(self, dirpath = None, format = SFormats.XML):
+ if dirpath == None:
+ dirpath = self.run_dir
+
+ try:
+ os.makedirs(dirpath)
+ except OSError:
+ pass
+
+ serializer = ECSerializer()
+ path = serializer.save(self, dirpath, format = format)
+ return path
+
def get_task(self, tid):
""" Returns a task by its id
def get_resource(self, guid):
""" Returns a registered ResourceManager by its guid
- :param guid: Id of the task
+ :param guid: Id of the resource
:type guid: int
:rtype: ResourceManager
rm = self._resources.get(guid)
return rm
+ def get_resources_by_type(self, rtype):
+ """ Returns the ResourceManager objects of type rtype
+
+ :param rtype: Resource type
+ :type rtype: string
+
+ :rtype: list of ResourceManagers
+
+ """
+ rms = []
+ for guid, rm in self._resources.iteritems():
+ if rm.get_rtype() == rtype:
+ rms.append(rm)
+ return rms
+
def remove_resource(self, guid):
del self._resources[guid]
@property
def resources(self):
- """ Returns the set() of guids of all the ResourceManager
+ """ Returns the guids of all ResourceManagers
:return: Set of all RM guids
- :rtype: set
+ :rtype: list
"""
keys = self._resources.keys()
return keys
+ def filter_resources(self, rtype):
+ """ Returns the guids of all ResourceManagers of type rtype
+
+ :param rtype: Resource type
+ :type rtype: string
+
+ :rtype: list of guids
+
+ """
+ rms = []
+ for guid, rm in self._resources.iteritems():
+ if rm.get_rtype() == rtype:
+ rms.append(rm.guid)
+ return rms
+
def register_resource(self, rtype, guid = None):
""" Registers a new ResourceManager of type 'rtype' in the experiment
if not guids:
# If no guids list was passed, all 'NEW' RMs will be deployed
guids = []
- for guid in self.resources:
- if self.state(guid) == ResourceState.NEW:
+ for guid, rm in self._resources.iteritems():
+ if rm.state == ResourceState.NEW:
guids.append(guid)
if isinstance(guids, int):
:type guids: list
"""
+ if self._state == ECState.RELEASED:
+ return
+
if isinstance(guids, int):
guids = [guids]
self.wait_released(guids)
+ if self.persist:
+ self.save()
+
for guid in guids:
if self.get(guid, "hardRelease"):
- self.remove_resource(guid)
+ self.remove_resource(guid)\
+
+ # Mark the EC state as RELEASED
+ self._state = ECState.RELEASED
def shutdown(self):
""" Releases all resources and stops the ExperimentController
"""
+ self._nthreads = int(os.environ.get("NEPI_NTHREADS", str(self._nthreads)))
+ self._runner = ParallelRun(maxthreads = self.nthreads)
self._runner.start()
while not self._stop:
self._cond.notify()
self._cond.release()
+ def _build_from_netgraph(self, add_node_callback, add_edge_callback,
+ **kwargs):
+ """ Automates experiment description using a NetGraph instance.
+ """
+ self._netgraph = NetGraph(**kwargs)
+
+ if add_node_callback:
+ ### Add resources to the EC
+ for nid in self.netgraph.nodes():
+ add_node_callback(self, nid)
+
+ if add_edge_callback:
+ #### Add connections between resources
+ for nid1, nid2 in self.netgraph.edges():
+ add_edge_callback(self, nid1, nid2)
+
import threading
import weakref
-reschedule_delay = "1s"
-
class ResourceAction:
""" Action that a user can order to a Resource Manager
try:
return func(self, *args, **kwargs)
except:
+ self.fail()
+
import traceback
err = traceback.format_exc()
- self.error(err)
- self.debug("SETTING guid %d to state FAILED" % self.guid)
- self.fail()
+ logger = Logger(self._rtype)
+ logger.error(err)
+ logger.error("SETTING guid %d to state FAILED" % self.guid)
raise
return wrapped
_traces = None
_help = None
_backend = None
+ _reschedule_delay = "0.5s"
@classmethod
def _register_attribute(cls, attr):
"""
return copy.deepcopy(cls._attributes[name])
-
@classmethod
def get_traces(cls):
""" Returns a copy of the traces
# instance lock to synchronize exclusive state change methods (such
# as deploy and release methods), in order to prevent them from being
- # executed at the same time
+ # executed at the same time and corrupt internal resource state
self._release_lock = threading.Lock()
@property
""" Get the current state of the RM """
return self._state
+ @property
+ def reschedule_delay(self):
+ """ Returns default reschedule delay """
+ return self._reschedule_delay
+
def log_message(self, msg):
""" Returns the log message formatted with added information.
try:
self.do_release()
except:
+ self.set_released()
+
import traceback
err = traceback.format_exc()
- self.error(err)
-
- self.set_released()
+ msg = " %s guid %d ----- FAILED TO RELEASE ----- \n %s " % (
+ self._rtype, self.guid, err)
+ logger = Logger(self._rtype)
+ logger.debug(msg)
def fail(self):
""" Sets the RM to state FAILED.
:rtype: str
"""
attr = self._attrs[name]
+
+ """
+ A.Q. Commenting due to performance impact
if attr.has_flag(Flags.Global):
self.warning( "Attribute %s is global. Use get_global instead." % name)
+ """
return attr.value
:rtype: str
"""
attr = self._attrs[name]
- return attr.has_changed()
+ return attr.has_changed
def has_flag(self, name, flag):
""" Returns true if the attribute has the flag 'flag'
connected.append(rm)
return connected
+ def is_rm_instance(self, rtype):
+ """ Returns True if the RM is instance of 'rtype'
+
+ :param rtype: Type of the RM we look for
+ :type rtype: str
+ :return: True|False
+ """
+ rclass = ResourceFactory.get_resource_type(rtype)
+ if isinstance(self, rclass):
+ return True
+ return False
+
@failtrap
def _needs_reschedule(self, group, state, time):
""" Internal method that verify if 'time' has elapsed since
"""
reschedule = False
- delay = reschedule_delay
+ delay = self.reschedule_delay
# check state and time elapsed on all RMs
for guid in group:
"""
reschedule = False
- delay = reschedule_delay
+ delay = self.reschedule_delay
## evaluate if set conditions are met
#import pdb;pdb.set_trace()
reschedule = False
- delay = reschedule_delay
+ delay = self.reschedule_delay
## evaluate if conditions to start are met
# Verify all start conditions are met
for (group, state, time) in start_conditions:
# Uncomment for debug
- unmet = []
- for guid in group:
- rm = self.ec.get_resource(guid)
- unmet.append((guid, rm._state))
-
- self.debug("---- WAITED STATES ---- %s" % unmet )
+ #unmet = []
+ #for guid in group:
+ # rm = self.ec.get_resource(guid)
+ # unmet.append((guid, rm._state))
+ #
+ #self.debug("---- WAITED STATES ---- %s" % unmet )
reschedule, delay = self._needs_reschedule(group, state, time)
if reschedule:
"""
reschedule = False
- delay = reschedule_delay
+ delay = self.reschedule_delay
## evaluate if conditions to stop are met
if self.ec.abort:
"""
reschedule = False
- delay = reschedule_delay
+ delay = self.reschedule_delay
## evaluate if conditions to deploy are met
if self.ec.abort:
# only can deploy when RM is either NEW, DISCOVERED or PROVISIONED
if self.state not in [ResourceState.NEW, ResourceState.DISCOVERED,
ResourceState.PROVISIONED]:
+ #### XXX: A.Q. IT SHOULD FAIL IF DEPLOY IS CALLED IN OTHER STATES!
reschedule = True
self.debug("---- RESCHEDULING DEPLOY ---- state %s " % self.state )
else:
def do_fail(self):
self.set_failed()
+ self.ec.inform_failure(self.guid)
def set_started(self, time = None):
""" Mark ResourceManager as STARTED """
def set_released(self, time = None):
""" Mark ResourceManager as REALEASED """
self.set_state(ResourceState.RELEASED, "_release_time", time)
- self.debug("----- RELEASED ---- ")
+
+ msg = " %s guid %d ----- RELEASED ----- " % (self._rtype, self.guid)
+ logger = Logger(self._rtype)
+ logger.debug(msg)
def set_failed(self, time = None):
""" Mark ResourceManager as FAILED """
self.set_state(ResourceState.FAILED, "_failed_time", time)
- self.debug("----- FAILED ---- ")
+
+ msg = " %s guid %d ----- FAILED ----- " % (self._rtype, self.guid)
+ logger = Logger(self._rtype)
+ logger.debug(msg)
def set_discovered(self, time = None):
""" Mark ResourceManager as DISCOVERED """
return rclass(ec, guid)
def populate_factory():
- """Register all the possible RM that exists in the current version of Nepi.
+ """Find and rgister all available RMs
"""
# Once the factory is populated, don't repopulate
if not ResourceFactory.resource_types():
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController, ECState
+
+import math
+import numpy
+import os
+import time
+
+class ExperimentRunner(object):
+ """ The ExperimentRunner entity is responsible of
+ re-running an experiment described by an ExperimentController
+ multiple time
+
+ """
+ def __init__(self):
+ super(ExperimentRunner, self).__init__()
+
+ def run(self, ec, min_runs = 1, max_runs = -1, wait_time = 0,
+ wait_guids = [], compute_metric_callback = None,
+ evaluate_convergence_callback = None ):
+ """ Run a same experiment independently multiple times, until the
+ evaluate_convergence_callback function returns True
+
+ :param ec: Description of experiment to replicate.
+ The runner takes care of deploying the EC, so ec.deploy()
+ must not be invoked directly before or after invoking
+ runner.run().
+ :type ec: ExperimentController
+
+ :param min_runs: Minimum number of times the experiment must be
+ replicated
+ :type min_runs: int
+
+ :param max_runs: Maximum number of times the experiment can be
+ replicated
+ :type max_runs: int
+
+ :param wait_time: Time to wait in seconds on each run between invoking
+ ec.deploy() and ec.release().
+ :type wait_time: float
+
+ :param wait_guids: List of guids wait for finalization on each run.
+ This list is passed to ec.wait_finished()
+ :type wait_guids: list
+
+ :param compute_metric_callback: User defined function invoked after
+ each experiment run to compute a metric. The metric is usually
+ a network measurement obtained from the data collected
+ during experiment execution.
+ The function is invoked passing the ec and the run number as arguments.
+ It must return the value for the computed metric(s) (usually a single
+ numerical value, but it can be several).
+
+ metric = compute_metric_callback(ec, run)
+
+ :type compute_metric_callback: function
+
+ :param evaluate_convergence_callback: User defined function invoked after
+ computing the metric on each run, to evaluate the experiment was
+ run enough times. It takes the list of cumulated metrics produced by
+ the compute_metric_callback up to the current run, and decided
+ whether the metrics have statistically converged to a meaningful value
+ or not. It must return either True or False.
+
+ stop = evaluate_convergence_callback(ec, run, metrics)
+
+ If stop is True, then the runner will exit.
+
+ :type evaluate_convergence_callback: function
+
+ """
+
+ if (not max_runs or max_runs < 0) and not compute_metric_callback:
+ msg = "Undefined STOP condition, set stop_callback or max_runs"
+ raise RuntimeError, msg
+
+ if compute_metric_callback and not evaluate_convergence_callback:
+ evaluate_convergence_callback = self.evaluate_normal_convergence
+ ec.logger.info(" Treating data as normal to evaluate convergence. "
+ "Experiment will stop when the standard error with 95% "
+ "confidence interval is >= 5% of the mean of the collected samples ")
+
+ # Force persistence of experiment controller
+ ec._persist = True
+
+ filepath = ec.save(dirpath = ec.exp_dir)
+
+ samples = []
+ run = 0
+ stop = False
+
+ while not stop:
+ run += 1
+
+ ec = self.run_experiment(filepath, wait_time, wait_guids)
+
+ ec.logger.info(" RUN %d \n" % run)
+
+ if compute_metric_callback:
+ metric = compute_metric_callback(ec, run)
+ if metric is not None:
+ samples.append(metric)
+
+ if run >= min_runs and evaluate_convergence_callback:
+ if evaluate_convergence_callback(ec, run, samples):
+ stop = True
+
+ if run >= min_runs and max_runs > -1 and run >= max_runs :
+ stop = True
+
+ ec.shutdown()
+ del ec
+
+ return run
+
+ def evaluate_normal_convergence(self, ec, run, metrics):
+ """ Returns True when the confidence interval of the sample mean is
+ less than 5% of the mean value, for a 95% confidence level,
+ assuming normal distribution of the data
+ """
+
+ if len(metrics) == 0:
+ msg = "0 samples collected"
+ raise RuntimeError, msg
+
+ x = numpy.array(metrics)
+ n = len(metrics)
+ std = x.std()
+ se = std / math.sqrt(n)
+ m = x.mean()
+
+ # Confidence interval for 95% confidence level,
+ # assuming normally distributed data.
+ ci95 = se * 2
+
+ ec.logger.info(" RUN %d - SAMPLES %d MEAN %.2f STD %.2f CI (95%%) %.2f \n" % (
+ run, n, m, std, ci95 ) )
+
+ return m * 0.05 >= ci95
+
+ def run_experiment(self, filepath, wait_time, wait_guids):
+ """ Run an experiment based on the description stored
+ in filepath.
+
+ """
+ ec = ExperimentController.load(filepath)
+
+ ec.deploy()
+
+ ec.wait_finished(wait_guids)
+ time.sleep(wait_time)
+
+ ec.release()
+
+ if ec.state == ECState.FAILED:
+ raise RuntimeError, "Experiment failed"
+
+ return ec
+
import heapq
class TaskStatus:
+ """ Execution state of the Task
+ """
NEW = 0
DONE = 1
ERROR = 2
class Task(object):
- """ This class is to define a task, that is represented by an id,
- an execution time 'timestamp' and an action 'callback """
+ """ A Task represents an operation to be executed by the
+ ExperimentController scheduler
+ """
def __init__(self, timestamp, callback):
+ """
+ :param timestamp: Future execution date of the operation
+ :type timestamp: str
+
+ :param callback: A function to invoke in order to execute the operation
+ :type callback: function
+
+ """
self.id = None
self.timestamp = timestamp
self.callback = callback
self.status = TaskStatus.NEW
class HeapScheduler(object):
- """ Create a Heap Scheduler.
+ """ Create a Heap Scheduler
.. note::
return self._valid
def schedule(self, task):
- """ Add the task 'task' in the heap of the scheduler
+ """ Add a task to the queue ordered by task.timestamp and arrival order
- :param task: task that need to be schedule
+ :param task: task to schedule
:type task: task
"""
if task.id == None:
return task
def remove(self, tid):
- """ Remove a task form the heap
+ """ Remove a task form the queue
- :param tid: Id of the task that need to be removed
+ :param tid: Id of the task to be removed
:type tid: int
+
"""
try:
self._valid.remove(tid)
pass
def next(self):
- """ Get the next task in the scheduler
-
+ """ Get the next task in the queue by timestamp and arrival order
"""
while self._queue:
try:
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
class TraceAttr:
- """ Trace attributes represent different information
- aspects that can be retrieved from a trace.
+ """A Trace attribute defines information about a Trace that can
+ be queried
"""
- ALL = 'all'
- STREAM = 'stream'
- PATH = 'path'
- SIZE = 'size'
+ ALL = "all"
+ STREAM = "stream"
+ PATH = "path"
+ SIZE = "size"
class Trace(object):
- """
- .. class:: Class Args :
-
- :param name: Name of the trace
- :type name: str
- :param help: Help about the trace
- :type help: str
-
+ """ A Trace represents information about a Resource that can
+ be collected
"""
def __init__(self, name, help, enabled = False):
+ """
+ :param name: Name of the Trace
+ :type name: str
+
+ :param help: Description of the Trace
+ :type help: str
+
+ :param enabled: Sets activation state of Trace
+ :type enabled: bool
+ """
self._name = name
self._help = help
self.enabled = enabled
@clsinit_copy
class Collector(ResourceManager):
- """ The collector is reponsible of collecting traces
+ """ The collector entity is reponsible of collecting traces
of a same type associated to RMs into a local directory.
.. class:: Class Args :
"Name of the trace to be collected",
flags = Flags.Design)
- store_dir = Attribute("storeDir",
- "Path to local directory to store trace results",
- default = tempfile.gettempdir(),
- flags = Flags.Design)
-
- use_run_id = Attribute("useRunId",
- "If set to True stores traces into a sub directory named after "
- "the RUN ID assigned by the EC",
- type = Types.Bool,
- default = False,
- flags = Flags.Design)
-
sub_dir = Attribute("subDir",
"Sub directory to collect traces into",
flags = Flags.Design)
flags = Flags.Design)
cls._register_attribute(trace_name)
- cls._register_attribute(store_dir)
cls._register_attribute(sub_dir)
cls._register_attribute(rename)
- cls._register_attribute(use_run_id)
def __init__(self, ec, guid):
super(Collector, self).__init__(ec, guid)
self.error(msg)
raise RuntimeError, msg
- self._store_path = self.get("storeDir")
-
- if self.get("useRunId"):
- self._store_path = os.path.join(self._store_path, self.ec.run_id)
+ self._store_path = self.ec.run_dir
subdir = self.get("subDir")
if subdir:
- self._store_path = os.path.join(self._store_path, subdir)
+ self._store_path = os.path.join(self.store_path, subdir)
msg = "Creating local directory at %s to store %s traces " % (
- self._store_path, trace_name)
+ self.store_path, trace_name)
self.info(msg)
try:
rms = self.get_connected()
for rm in rms:
- result = self.ec.trace(rm.guid, trace_name)
fpath = os.path.join(self.store_path, "%d.%s" % (rm.guid,
- rename))
+ rename))
+
try:
+ result = self.ec.trace(rm.guid, trace_name)
f = open(fpath, "w")
f.write(result)
f.close()
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.node import LinuxNode
from nepi.util.sshfuncs import ProcStatus
from nepi.util.timefuncs import tnow, tdiffsec
"""
- _rtype = "LinuxApplication"
+ _rtype = "linux::Application"
_help = "Runs an application on a Linux host with a BASH command "
_backend_type = "linux"
super(LinuxApplication, self).__init__(ec, guid)
self._pid = None
self._ppid = None
+ self._node = None
self._home = "app-%s" % self.guid
+
# whether the command should run in foreground attached
# to a terminal
self._in_foreground = False
@property
def node(self):
- node = self.get_connected(LinuxNode.get_rtype())
- if node: return node[0]
- return None
+ if not self._node:
+ node = self.get_connected(LinuxNode.get_rtype())
+ if not node:
+ msg = "Application %s guid %d NOT connected to Node" % (
+ self._rtype, self.guid)
+ raise RuntimeError, msg
+
+ self._node = node[0]
+
+ return self._node
@property
def app_home(self):
node = self.node
if not node or node.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
command = self.get("command") or ""
self.info("Deploying command '%s' " % command)
(out, err), proc = self.node.kill(self.pid, self.ppid,
sudo = self._sudo_kill)
+ """
# TODO: check if execution errors occurred
if (proc and proc.poll()) or err:
msg = " Failed to STOP command '%s' " % self.get("command")
self.error(msg, out, err)
-
+ """
+
super(LinuxApplication, self).do_stop()
def do_release(self):
def execute_command(self, command,
env = None,
sudo = False,
+ tty = False,
forward_x11 = False,
blocking = False):
return self.node.execute(command,
sudo = sudo,
+ tty = tty,
forward_x11 = forward_x11,
blocking = blocking)
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.linux.ccn.ccnd import LinuxCCND
@clsinit_copy
class LinuxCCNApplication(LinuxApplication):
- _rtype = "LinuxCCNApplication"
+ _rtype = "linux::CCNApplication"
def __init__(self, ec, guid):
super(LinuxCCNApplication, self).__init__(ec, guid)
def do_deploy(self):
if not self.ccnd or self.ccnd.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
command = self.get("command") or ""
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ccn.ccnapplication import LinuxCCNApplication
import os
@clsinit_copy
class LinuxCCNCat(LinuxCCNApplication):
- _rtype = "LinuxCCNCat"
+ _rtype = "linux::CCNCat"
@classmethod
def _register_attributes(cls):
def do_deploy(self):
if not self.ccnd or self.ccnd.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
command = self.get("command")
if not command:
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import clsinit_copy, ResourceState, \
- ResourceAction, reschedule_delay
+ ResourceAction
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.linux.ccn.ccnr import LinuxCCNR
from nepi.util.timefuncs import tnow
@clsinit_copy
class LinuxCCNContent(LinuxApplication):
- _rtype = "LinuxCCNContent"
+ _rtype = "linux::CCNContent"
@classmethod
def _register_attributes(cls):
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
# ccnr needs to wait until ccnd is deployed and running
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("command"):
self.set("command", self._start_command)
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.linux.node import OSType
from nepi.util.timefuncs import tnow, tdiffsec
@clsinit_copy
class LinuxCCND(LinuxApplication):
- _rtype = "LinuxCCND"
+ _rtype = "linux::CCND"
@classmethod
def _register_attributes(cls):
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
# ccnd needs to wait until node is deployed and running
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("command"):
self.set("command", self._start_command)
@property
def _sources(self):
- return "http://www.ccnx.org/releases/ccnx-0.8.1.tar.gz"
+ return "http://www.ccnx.org/releases/ccnx-0.8.2.tar.gz"
@property
def _build(self):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ccn.ccnapplication import LinuxCCNApplication
import os
@clsinit_copy
class LinuxCCNPeek(LinuxCCNApplication):
- _rtype = "LinuxCCNPeek"
+ _rtype = "linux::CCNPeek"
@classmethod
def _register_attributes(cls):
def do_deploy(self):
if not self.ccnd or self.ccnd.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
command = self.get("command")
if not command:
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.ccn.ccnpingserver import LinuxCCNPingServer
from nepi.util.timefuncs import tnow, tdiffsec
@clsinit_copy
class LinuxCCNPing(LinuxCCNPingServer):
- _rtype = "LinuxCCNPing"
+ _rtype = "linux::CCNPing"
@classmethod
def _register_attributes(cls):
self.ccnpingserver.state < ResourceState.STARTED:
self.debug("---- RESCHEDULING START---- ccnpingserver state %s " % \
self.ccnpingserver.state )
- self.ec.schedule(reschedule_delay, self.start)
+ self.ec.schedule(self.reschedule_delay, self.start)
else:
super(LinuxCCNPing, self).do_start()
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.ccn.ccnapplication import LinuxCCNApplication
from nepi.util.timefuncs import tnow, tdiffsec
@clsinit_copy
class LinuxCCNPingServer(LinuxCCNApplication):
- _rtype = "LinuxCCNPingServer"
+ _rtype = "linux::CCNPingServer"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ccn.ccnapplication import LinuxCCNApplication
import os
@clsinit_copy
class LinuxCCNPoke(LinuxCCNApplication):
- _rtype = "LinuxCCNPoke"
+ _rtype = "linux::CCNPoke"
@classmethod
def _register_attributes(cls):
def do_deploy(self):
if not self.ccnd or self.ccnd.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
command = self.get("command")
if not command:
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import clsinit_copy, ResourceState, \
- ResourceAction, reschedule_delay
+ ResourceAction
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.linux.ccn.ccnd import LinuxCCND
from nepi.util.timefuncs import tnow
@clsinit_copy
class LinuxCCNR(LinuxApplication):
- _rtype = "LinuxCCNR"
+ _rtype = "linux::CCNR"
@classmethod
def _register_attributes(cls):
self.debug("---- RESCHEDULING DEPLOY ---- CCND state %s " % self.ccnd.state )
# ccnr needs to wait until ccnd is deployed and running
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("command"):
self.set("command", self._start_command)
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import clsinit_copy, ResourceState, \
- ResourceAction, reschedule_delay
+ ResourceAction
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.linux.ccn.ccnd import LinuxCCND
from nepi.util.timefuncs import tnow
@clsinit_copy
class LinuxFIBEntry(LinuxApplication):
- _rtype = "LinuxFIBEntry"
+ _rtype = "linux::FIBEntry"
@classmethod
def _register_attributes(cls):
super(LinuxFIBEntry, self).__init__(ec, guid)
self._home = "fib-%s" % self.guid
self._ping = None
- self._mtr = None
self._traceroute = None
+ self._ccnd = None
@property
def ccnd(self):
- ccnd = self.get_connected(LinuxCCND.get_rtype())
- if ccnd: return ccnd[0]
- return None
+ if not self._ccnd:
+ ccnd = self.get_connected(LinuxCCND.get_rtype())
+ if ccnd:
+ self._ccnd = ccnd[0]
+
+ return self._ccnd
+
+ @property
+ def ping(self):
+ if not self._ping:
+ from nepi.resources.linux.ping import LinuxPing
+ ping = self.get_connected(LinuxPing.get_rtype())
+ if ping:
+ self._ping = ping[0]
+
+ return self._ping
+
+ @property
+ def traceroute(self):
+ if not self._traceroute:
+ from nepi.resources.linux.traceroute import LinuxTraceroute
+ traceroute = self.get_connected(LinuxTraceroute.get_rtype())
+ if traceroute:
+ self._traceroute = traceroute[0]
+
+ return self._traceroute
@property
def node(self):
def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
if name == "ping":
- return self.ec.trace(self._ping, "stdout", attr, block, offset)
- if name == "mtr":
- return self.ec.trace(self._mtr, "stdout", attr, block, offset)
+ if not self.ping:
+ return None
+ return self.ec.trace(self.ping.guid, "stdout", attr, block, offset)
+
if name == "traceroute":
- return self.ec.trace(self._traceroute, "stdout", attr, block, offset)
+ if not self.traceroute:
+ return None
+ return self.ec.trace(self.traceroute.guid, "stdout", attr, block, offset)
return super(LinuxFIBEntry, self).trace(name, attr, block, offset)
# Wait until associated ccnd is provisioned
if not self.ccnd or self.ccnd.state < ResourceState.READY:
# ccnr needs to wait until ccnd is deployed and running
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("ip"):
host = self.get("host")
raise RuntimeError, msg
def configure(self):
- if self.trace_enabled("ping"):
+ if self.trace_enabled("ping") and not self.ping:
self.info("Configuring PING trace")
- self._ping = self.ec.register_resource("LinuxPing")
- self.ec.set(self._ping, "printTimestamp", True)
- self.ec.set(self._ping, "target", self.get("host"))
- self.ec.set(self._ping, "earlyStart", True)
- self.ec.register_connection(self._ping, self.node.guid)
+ ping = self.ec.register_resource("linux::Ping")
+ self.ec.set(ping, "printTimestamp", True)
+ self.ec.set(ping, "target", self.get("host"))
+ self.ec.set(ping, "earlyStart", True)
+ self.ec.register_connection(ping, self.node.guid)
+ self.ec.register_connection(ping, self.guid)
# schedule ping deploy
- self.ec.deploy(guids=[self._ping], group = self.deployment_group)
-
- if self.trace_enabled("mtr"):
- self.info("Configuring MTR trace")
- self._mtr = self.ec.register_resource("LinuxMtr")
- self.ec.set(self._mtr, "noDns", True)
- self.ec.set(self._mtr, "printTimestamp", True)
- self.ec.set(self._mtr, "continuous", True)
- self.ec.set(self._mtr, "target", self.get("host"))
- self.ec.set(self._mtr, "earlyStart", True)
- self.ec.register_connection(self._mtr, self.node.guid)
- # schedule mtr deploy
- self.ec.deploy(guids=[self._mtr], group = self.deployment_group)
+ self.ec.deploy(guids=[ping], group = self.deployment_group)
- if self.trace_enabled("traceroute"):
+ if self.trace_enabled("traceroute") and not self.traceroute:
self.info("Configuring TRACEROUTE trace")
- self._traceroute = self.ec.register_resource("LinuxTraceroute")
- self.ec.set(self._traceroute, "printTimestamp", True)
- self.ec.set(self._traceroute, "continuous", True)
- self.ec.set(self._traceroute, "target", self.get("host"))
- self.ec.set(self._traceroute, "earlyStart", True)
- self.ec.register_connection(self._traceroute, self.node.guid)
+ traceroute = self.ec.register_resource("linux::Traceroute")
+ self.ec.set(traceroute, "printTimestamp", True)
+ self.ec.set(traceroute, "continuous", True)
+ self.ec.set(traceroute, "target", self.get("host"))
+ self.ec.set(traceroute, "earlyStart", True)
+ self.ec.register_connection(traceroute, self.node.guid)
+ self.ec.register_connection(traceroute, self.guid)
# schedule mtr deploy
- self.ec.deploy(guids=[self._traceroute], group = self.deployment_group)
+ self.ec.deploy(guids=[traceroute], group = self.deployment_group)
def do_start(self):
if self.state == ResourceState.READY:
@clsinit_copy
class LinuxChannel(ResourceManager):
- _rtype = "LinuxChannel"
+ _rtype = "linux::Channel"
_help = "Represents a wireless channel on a network of Linux hosts"
_backend = "linux"
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.resource import clsinit_copy, ResourceState
+from nepi.resources.linux.tunnel import LinuxTunnel
+from nepi.util.sshfuncs import ProcStatus
+from nepi.util.timefuncs import tnow, tdiffsec
+
+import re
+import socket
+import time
+import os
+
+@clsinit_copy
+class LinuxGRETunnel(LinuxTunnel):
+ _rtype = "linux::GRETunnel"
+ _help = "Constructs a tunnel between two Linux endpoints using a UDP connection "
+ _backend = "linux"
+
+ def log_message(self, msg):
+ return " guid %d - GRE tunnel %s - %s - %s " % (self.guid,
+ self.endpoint1.node.get("hostname"),
+ self.endpoint2.node.get("hostname"),
+ msg)
+
+ def get_endpoints(self):
+ """ Returns the list of RM that are endpoints to the tunnel
+ """
+ connected = []
+ for guid in self.connections:
+ rm = self.ec.get_resource(guid)
+ if hasattr(rm, "gre_connect"):
+ connected.append(rm)
+ return connected
+
+ def initiate_connection(self, endpoint, remote_endpoint):
+ # Return the command to execute to initiate the connection to the
+ # other endpoint
+ connection_run_home = self.run_home(endpoint)
+ connection_app_home = self.app_home(endpoint)
+ data = endpoint.gre_connect(remote_endpoint,
+ connection_app_home,
+ connection_run_home)
+ return data
+
+ def establish_connection(self, endpoint, remote_endpoint, data):
+ pass
+
+ def verify_connection(self, endpoint, remote_endpoint):
+ remote_ip = socket.gethostbyname(remote_endpoint.node.get("hostname"))
+
+ command = "ping -c 4 %s" % remote_ip
+ (out, err), proc = endpoint.node.execute(command,
+ blocking = True)
+
+ m = re.search("(\d+)% packet loss", str(out))
+ if not m or int(m.groups()[0]) == 100:
+ msg = " Error establishing GRE Tunnel"
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ def terminate_connection(self, endpoint, remote_endpoint):
+ pass
+
+ def check_state_connection(self):
+ pass
+
+ def valid_connection(self, guid):
+ # TODO: Validate!
+ return True
+
from nepi.execution.attribute import Attribute, Types, Flags
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.node import LinuxNode
from nepi.resources.linux.channel import LinuxChannel
@clsinit_copy
class LinuxInterface(ResourceManager):
- _rtype = "LinuxInterface"
+ _rtype = "linux::Interface"
_help = "Controls network devices on Linux hosts through the ifconfig tool"
_backend = "linux"
chan = self.channel
if not node or node.state < ResourceState.PROVISIONED:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
elif not chan or chan.state < ResourceState.READY:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
# Verify if the interface exists in node. If not, configue
# if yes, load existing configuration
@clsinit_copy
class LinuxMtr(LinuxApplication):
- _rtype = "LinuxMtr"
+ _rtype = "linux::Mtr"
@classmethod
def _register_attributes(cls):
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux import rpmfuncs, debfuncs
from nepi.util import sshfuncs, execfuncs
from nepi.util.sshfuncs import ProcStatus
import os
import random
import re
+import socket
import tempfile
import time
import threading
source compilation, file download, etc)
"""
- _rtype = "LinuxNode"
+ _rtype = "linux::Node"
_help = "Controls Linux host machines ( either localhost or a host " \
"that can be accessed using a SSH key)"
_backend_type = "linux"
gateway = Attribute("gateway", "Hostname of the gateway machine",
flags = Flags.Design)
+ ip = Attribute("ip", "Linux host public IP address. "
+ "Must not be modified by the user unless hostname is 'localhost'",
+ flags = Flags.Design)
+
cls._register_attribute(hostname)
cls._register_attribute(username)
cls._register_attribute(port)
cls._register_attribute(tear_down)
cls._register_attribute(gateway_user)
cls._register_attribute(gateway)
+ cls._register_attribute(ip)
def __init__(self, ec, guid):
super(LinuxNode, self).__init__(ec, guid)
if self._os:
return self._os
- if self.get("hostname") not in ["localhost", "127.0.0.1"] and \
- not self.get("username"):
+ if not self.localhost and not self.get("username"):
msg = "Can't resolve OS, insufficient data "
self.error(msg)
raise RuntimeError, msg
@property
def localhost(self):
- return self.get("hostname") in ['localhost', '127.0.0.7', '::1']
+ return self.get("hostname") in ['localhost', '127.0.0.1', '::1']
def do_provision(self):
# check if host is alive
self.mkdir(paths)
+ # Get Public IP address if possible
+ if not self.get("ip"):
+ ip = None
+
+ if self.localhost:
+ ip = socket.gethostbyname(socket.gethostname())
+ else:
+ try:
+ ip = socket.gethostbyname(self.get("hostname"))
+ except:
+ msg = "DNS can not resolve hostname %s" % self.get("hostname")
+ self.debug(msg)
+
+ self.set("ip", ip)
+
super(LinuxNode, self).do_provision()
def do_deploy(self):
ifaces = self.get_connected(LinuxInterface.get_rtype())
for iface in ifaces:
if iface.state < ResourceState.READY:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return
super(LinuxNode, self).do_deploy()
# Node needs to wait until all associated RMs are released
# before it can be released
if rm.state != ResourceState.RELEASED:
- self.ec.schedule(reschedule_delay, self.release)
+ self.ec.schedule(self.reschedule_delay, self.release)
return
tear_down = self.get("tearDown")
def clean_processes(self):
self.info("Cleaning up processes")
-
- if self.get("hostname") in ["localhost", "127.0.0.2"]:
+
+ if self.localhost:
return
if self.get("username") != 'root':
stdout = 'stdout',
stderr = 'stderr',
sudo = False,
- tty = False):
+ tty = False,
+ strict_host_checking = False):
self.debug("Running command '%s'" % command)
agent = True,
identity = self.get("identity"),
server_key = self.get("serverKey"),
- tty = tty
+ tty = tty,
+ strict_host_checking = strict_host_checking
)
return (out, err), proc
gw = self.get("gateway"),
agent = True,
identity = self.get("identity"),
- server_key = self.get("serverKey")
+ server_key = self.get("serverKey"),
+ strict_host_checking = False
)
return pidtuple
gw = self.get("gateway"),
agent = True,
identity = self.get("identity"),
- server_key = self.get("serverKey")
+ server_key = self.get("serverKey"),
+ strict_host_checking = False
)
return status
agent = True,
sudo = sudo,
identity = self.get("identity"),
- server_key = self.get("serverKey")
+ server_key = self.get("serverKey"),
+ strict_host_checking = False
)
return (out, err), proc
@clsinit_copy
class LinuxNPing(LinuxApplication):
- _rtype = "LinuxNPing"
+ _rtype = "linux::NPing"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceCCNCat(LinuxNS3CCNDceApplication):
- _rtype = "ns3::LinuxDceCCNCat"
+ _rtype = "linux::ns3::dce::CCNCat"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.ns3.ns3ccndceapplication import NS3BaseCCNDceApplication
@clsinit_copy
class LinuxNS3CCNDceApplication(NS3BaseCCNDceApplication):
- _rtype = "ns3::LinuxCCNDceApplication"
+ _rtype = "linux::ns3::dce::CCNApplication"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
import os
@clsinit_copy
class LinuxNS3DceCCND(LinuxNS3CCNDceApplication):
- _rtype = "ns3::LinuxDceCCND"
+ _rtype = "linux::ns3::dce::CCND"
@classmethod
def _register_attributes(cls):
" ecryptfs-utils-devel libxml2-devel automake gawk "
" gcc gcc-c++ git pcre-devel make ")
elif self.simulation.node.use_deb:
- return ( " autoconf libssl-dev libexpat-dev libpcap-dev "
+ return ( " autoconf libssl-dev libexpat1-dev libpcap-dev "
" libecryptfs0 libxml2-utils automake gawk gcc g++ "
" git-core pkg-config libpcre3-dev make ")
return ""
-
@property
def _sources(self):
- return "http://www.ccnx.org/releases/ccnx-0.8.1.tar.gz"
+ #return "http://www.ccnx.org/releases/ccnx-0.8.1.tar.gz"
+ return "http://www.ccnx.org/releases/ccnx-0.8.2.tar.gz"
@property
def _build(self):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceCCNPeek(LinuxNS3CCNDceApplication):
- _rtype = "ns3::LinuxDceCCNPeek"
+ _rtype = "linux::ns3::dce::CCNPeek"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication \
import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceCCNPoke(LinuxNS3CCNDceApplication):
- _rtype = "ns3::LinuxDceCCNPoke"
+ _rtype = "linux::ns3::dce::CCNPoke"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceCCNR(LinuxNS3CCNDceApplication):
- _rtype = "ns3::LinuxDceCCNR"
+ _rtype = "linux::ns3::dce::CCNR"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceFIBEntry(LinuxNS3CCNDceApplication):
- _rtype = "ns3::LinuxDceFIBEntry"
+ _rtype = "linux::ns3::dce::FIBEntry"
@classmethod
def _register_attributes(cls):
"Peer host public IP used in network connection for this FIB entry. ",
flags = Flags.Design)
+ home = Attribute("home", "Sets HOME environmental variable. ",
+ default = "/root",
+ flags = Flags.Design)
+
cls._register_attribute(uri)
cls._register_attribute(protocol)
cls._register_attribute(host)
cls._register_attribute(port)
cls._register_attribute(ip)
+ cls._register_attribute(home)
def _instantiate_object(self):
if not self.get("binary"):
if not self.get("arguments"):
self.set("arguments", self._arguments)
+ if not self.get("environment"):
+ self.set("environment", self._environment)
+
super(LinuxNS3DceFIBEntry, self)._instantiate_object()
+ @property
+ def _environment(self):
+ envs = dict({
+ "home": "HOME",
+ })
+
+ env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
+ [k for k in envs.keys() if self.get(k)]))
+
+ return env
+
@property
def _arguments(self):
args = ["-v", "add"]
import socket
import time
import weakref
+import threading
from optparse import OptionParser, SUPPRESS_HELP
def __init__(self, simulation):
super(LinuxNS3Client, self).__init__()
self._simulation = weakref.ref(simulation)
-
- self._socat_proc = None
+ self._socket_lock = threading.Lock()
@property
def simulation(self):
encoded = "|".join(map(encode, msg))
- if self.simulation.node.get("hostname") in ['localhost', '127.0.0.1']:
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.simulation.remote_socket)
- sock.send("%s\n" % encoded)
- reply = sock.recv(1024)
- sock.close()
- else:
- command = ( "python -c 'import socket;"
- "sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM);"
- "sock.connect(\"%(socket_addr)s\");"
- "msg = \"%(encoded_message)s\\n\";"
- "sock.send(msg);"
- "reply = sock.recv(1024);"
- "sock.close();"
- "print reply'") % {
- "encoded_message": encoded,
- "socket_addr": self.simulation.remote_socket,
- }
-
- (reply, err), proc = self.simulation.node.execute(command,
- with_lock = True)
-
- if (err and proc.poll()) or reply.strip() == "":
- msg = (" Couldn't connect to remote socket %s - REPLY: %s "
- "- ERROR: %s ") % (
- self.simulation.remote_socket, reply, err)
- self.simulation.error(msg, reply, err)
- raise RuntimeError(msg)
-
+ with self._socket_lock:
+ if self.simulation.node.get("hostname") in ['localhost', '127.0.0.1']:
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.simulation.remote_socket)
+ sock.send("%s\n" % encoded)
+ reply = sock.recv(1024)
+ sock.close()
+ else:
+ command = ( "python -c 'import socket;"
+ "sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM);"
+ "sock.connect(\"%(socket_addr)s\");"
+ "msg = \"%(encoded_message)s\\n\";"
+ "sock.send(msg);"
+ "reply = sock.recv(1024);"
+ "sock.close();"
+ "print reply'") % {
+ "encoded_message": encoded,
+ "socket_addr": self.simulation.remote_socket,
+ }
+
+ (reply, err), proc = self.simulation.node.execute(command,
+ with_lock = True)
+
+ if (err and proc.poll()) or reply.strip() == "":
+ msg = (" Couldn't connect to remote socket %s - REPLY: %s "
+ "- ERROR: %s ") % (
+ self.simulation.remote_socket, reply, err)
+ self.simulation.error(msg, reply, err)
+ raise RuntimeError(msg)
+
reply = cPickle.loads(base64.b64decode(reply))
return reply
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
@clsinit_copy
class LinuxNS3DceApplication(NS3BaseDceApplication):
- _rtype = "ns3::LinuxDceApplication"
+ _rtype = "linux::ns3::dce::Application"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ns3dceapplication import LinuxNS3DceApplication
@clsinit_copy
class LinuxDcePing(LinuxNS3DceApplication):
- _rtype = "ns3::LinuxDcePing"
+ _rtype = "linux::ns3::dce::Ping"
@classmethod
def _register_attributes(cls):
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, ResourceFactory, reschedule_delay
+ ResourceState, ResourceFactory
from nepi.resources.linux.application import LinuxApplication
from nepi.util.timefuncs import tnow, tdiffsec
from nepi.resources.ns3.ns3simulation import NS3Simulation
@clsinit_copy
class LinuxNS3Simulation(LinuxApplication, NS3Simulation):
- _rtype = "LinuxNS3Simulation"
+ _rtype = "linux::ns3::Simulation"
@classmethod
def _register_attributes(cls):
ns3_version = Attribute("ns3Version",
"Version of ns-3 to install from nsam repo",
- #default = "ns-3.19",
default = "ns-3.20",
#default = "ns-3-dev",
flags = Flags.Design)
pybindgen_version = Attribute("pybindgenVersion",
"Version of pybindgen to install from bazar repo",
- #default = "864",
default = "868",
#default = "876",
flags = Flags.Design)
dce_version = Attribute("dceVersion",
"Version of dce to install from nsam repo (tag branch for repo)",
- default = "dce-1.3",
- #default = "dce-dev",
+ #default = "dce-1.3",
+ default = "dce-dev",
flags = Flags.Design)
populate_routing_tables = Attribute("populateRoutingTables",
type = Types.Bool,
flags = Flags.Design)
- stoptime = Attribute("stopTime",
+ stoptime = Attribute("StopTime",
"Time at which the simulation will stop",
flags = Flags.Design)
self._client = None
self._home = "ns3-simu-%s" % self.guid
self._socket_name = "ns3-%s.sock" % os.urandom(4).encode('hex')
- self._dce_manager_helper_uuid = None
- self._dce_application_helper_uuid = None
self._enable_dce = None
@property
self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
# ccnd needs to wait until node is deployed and running
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("command"):
self.set("command", self._start_command)
if self.get("populateRoutingTables") == True:
self.invoke(IPV4_GLOBAL_ROUTING_HELPER_UUID, "PopulateRoutingTables")
+ time = self.get("StopTime")
+ if time:
+ self._client.stop(time=time)
+
self._client.start()
self.set_started()
"""
if self.state == ResourceState.STARTED:
- time = None
- if self.get("stopTime"):
- time = self.get("stopTime")
-
- self._client.stop(time=time)
+ if not self.get("StopTime"):
+ self._client.stop()
self.set_stopped()
def do_release(self):
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.resource import ResourceManager, ResourceState, \
+ clsinit_copy
+
+import os
+import socket
+import struct
+import fcntl
+
+@clsinit_copy
+class TapFdLink(ResourceManager):
+ """ Interconnects a TAP or TUN Linux device to a FdNetDevice
+ """
+ _rtype = "linux::ns3::TapFdLink"
+
+ def __init__(self, ec, guid):
+ super(TapFdLink, self).__init__(ec, guid)
+ self._tap = None
+ self._fdnetdevice = None
+ self._fd = None
+
+ @property
+ def fdnetdevice(self):
+ if not self._fdnetdevice:
+ from nepi.resources.ns3.ns3fdnetdevice import NS3BaseFdNetDevice
+ devices = self.get_connected(NS3BaseFdNetDevice.get_rtype())
+ if not devices or len(devices) != 1:
+ msg = "TapFdLink must be connected to exactly one FdNetDevices"
+ self.error(msg)
+ raise RuntimeError, msg
+
+ self._fdnetdevice = devices[0]
+
+ return self._fdnetdevice
+
+ @property
+ def fdnode(self):
+ return self.fdnetdevice.node
+
+ @property
+ def tap(self):
+ if not self._tap:
+ from nepi.resources.linux.tap import LinuxTap
+ devices = self.get_connected(LinuxTap.get_rtype())
+ if not devices or len(devices) != 1:
+ msg = "TapFdLink must be connected to exactly one LinuxTap"
+ self.error(msg)
+ raise RuntimeError, msg
+
+ self._tap = devices[0]
+
+ return self._tap
+
+ @property
+ def tapnode(self):
+ return self.tap.node
+
+ def do_provision(self):
+ tap = self.tap
+ fdnetdevice = self.fdnetdevice
+
+ vif_name = self.ec.get(tap.guid, "deviceName")
+ vif_type = tap.vif_type_flag
+ pi = self.ec.get(tap.guid, "pi")
+
+ self._fd = self.open_tap(vif_name, vif_type, pi)
+
+ fdnetdevice.send_fd(self._fd)
+
+ super(TapFdLink, self).do_provision()
+
+ def do_deploy(self):
+ if self.tap.state < ResourceState.READY or \
+ self.fdnetdevice.state < ResourceState.READY:
+ self.ec.schedule(self.reschedule_delay, self.deploy)
+ else:
+ self.do_discover()
+ self.do_provision()
+
+ super(TapFdLink, self).do_deploy()
+
+ def open_tap(self, vif_name, vif_type, pi):
+ IFF_NO_PI = 0x1000
+ TUNSETIFF = 0x400454ca
+
+ flags = 0
+ flags |= vif_type
+
+ if not pi:
+ flags |= IFF_NO_PI
+
+ fd = os.open("/dev/net/tun", os.O_RDWR)
+
+ err = fcntl.ioctl(fd, TUNSETIFF, struct.pack("16sH", vif_name, flags))
+ if err < 0:
+ os.close(fd)
+ raise RuntimeError("Could not configure device %s" % vif_name)
+
+ return fd
+
+
@clsinit_copy
class LinuxPing(LinuxApplication):
- _rtype = "LinuxPing"
+ _rtype = "linux::Ping"
@classmethod
def _register_attributes(cls):
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.resource import clsinit_copy, ResourceState
+from nepi.resources.linux.application import LinuxApplication
+
+import os
+
+@clsinit_copy
+class LinuxRoute(LinuxApplication):
+ _rtype = "linux::Route"
+ _help = "Adds a route to the host using iptools "
+ _backend = "linux"
+
+ @classmethod
+ def _register_attributes(cls):
+ network = Attribute("network", "Network address", flags=Flags.Design)
+ prefix = Attribute("prefix", "IP prefix length", flags=Flags.Design)
+ nexthop = Attribute("nexthop", "Nexthop IP", flags=Flags.Design)
+
+ cls._register_attribute(network)
+ cls._register_attribute(prefix)
+ cls._register_attribute(nexthop)
+
+ def __init__(self, ec, guid):
+ super(LinuxRoute, self).__init__(ec, guid)
+ self._home = "route-%s" % self.guid
+ self._device = None
+
+ @property
+ def device(self):
+ if not self._device:
+ from nepi.resources.linux.tap import LinuxTap
+ from nepi.resources.linux.tun import LinuxTun
+ from nepi.resources.linux.interface import LinuxInterface
+ tap = self.get_connected(LinuxTap.get_rtype())
+ tun = self.get_connected(LinuxTun.get_rtype())
+ interface = self.get_connected(LinuxInterface.get_rtype())
+ if tap: self._device = tap[0]
+ elif tun: self._device = tun[0]
+ elif interface: self._device = interface[0]
+ else:
+ raise RuntimeError, "linux::Routes must be connected to a "\
+ "linux::TAP, linux::TUN, or linux::Interface"
+ return self._device
+
+ @property
+ def node(self):
+ return self.device.node
+
+ def upload_start_command(self):
+ # We want to make sure the route is configured
+ # before the deploy is over, so we execute the
+ # start script now and wait until it finishes.
+ command = self.get("command")
+ command = self.replace_paths(command)
+
+ shfile = os.path.join(self.app_home, "start.sh")
+ self.node.run_and_wait(command, self.run_home,
+ shfile = shfile,
+ overwrite = True)
+
+ def upload_sources(self):
+ # upload stop.sh script
+ stop_command = self.replace_paths(self._stop_command)
+
+ self.node.upload(stop_command,
+ os.path.join(self.app_home, "stop.sh"),
+ text = True,
+ # Overwrite file every time.
+ # The stop.sh has the path to the socket, which should change
+ # on every experiment run.
+ overwrite = True)
+
+ def do_deploy(self):
+ if not self.device or self.device.state < ResourceState.PROVISIONED:
+ self.ec.schedule(self.reschedule_delay, self.deploy)
+ else:
+ if not self.get("command"):
+ self.set("command", self._start_command)
+
+ self.do_discover()
+ self.do_provision()
+
+ self.set_ready()
+
+ def do_start(self):
+ if self.state == ResourceState.READY:
+ command = self.get("command")
+ self.info("Starting command '%s'" % command)
+
+ self.set_started()
+ else:
+ msg = " Failed to execute command '%s'" % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ def do_stop(self):
+ command = self.get('command') or ''
+
+ if self.state == ResourceState.STARTED:
+ self.info("Stopping command '%s'" % command)
+
+ command = "bash %s" % os.path.join(self.app_home, "stop.sh")
+ (out, err), proc = self.execute_command(command,
+ blocking = True)
+
+ if err:
+ msg = " Failed to stop command '%s' " % command
+ self.error(msg, out, err)
+
+ self.set_stopped()
+
+ @property
+ def _start_command(self):
+ network = self.get("network")
+ prefix = self.get("prefix")
+ nexthop = self.get("nexthop")
+ devicename = self.device.get("deviceName")
+
+ command = []
+ command.append("sudo -S ip route add %s/%s %s dev %s" % (
+ self.get("network"),
+ self.get("prefix"),
+ "default" if not nexthop else "via %s" % nexthop,
+ devicename))
+
+ return " ".join(command)
+
+ @property
+ def _stop_command(self):
+ network = self.get("network")
+ prefix = self.get("prefix")
+ nexthop = self.get("nexthop")
+ devicename = self.device.get("deviceName")
+
+ command = []
+ command.append("sudo -S ip route del %s/%s %s dev %s" % (
+ self.get("network"),
+ self.get("prefix"),
+ "default" if not nexthop else "via %s" % nexthop,
+ devicename))
+
+ return " ".join(command)
+
--- /dev/null
+import errno
+import os
+import time
+import signal
+import socket
+import tunchannel
+import struct
+import fcntl
+
+from optparse import OptionParser
+
+IFF_TUN = 0x0001
+IFF_TAP = 0x0002
+IFF_NO_PI = 0x1000
+TUNSETIFF = 0x400454ca
+
+# Trak SIGTERM, and set global termination flag instead of dying
+TERMINATE = []
+def _finalize(sig,frame):
+ global TERMINATE
+ TERMINATE.append(None)
+signal.signal(signal.SIGTERM, _finalize)
+
+# SIGUSR1 suspends forwading, SIGUSR2 resumes forwarding
+SUSPEND = []
+def _suspend(sig,frame):
+ global SUSPEND
+ if not SUSPEND:
+ SUSPEND.append(None)
+signal.signal(signal.SIGUSR1, _suspend)
+
+def _resume(sig,frame):
+ global SUSPEND
+ if SUSPEND:
+ SUSPEND.remove(None)
+signal.signal(signal.SIGUSR2, _resume)
+
+def open_tap(vif_name, vif_type, pi):
+ flags = 0
+ flags |= vif_type
+
+ if not pi:
+ flags |= IFF_NO_PI
+
+ fd = os.open("/dev/net/tun", os.O_RDWR)
+
+ err = fcntl.ioctl(fd, TUNSETIFF, struct.pack("16sH", vif_name, flags))
+ if err < 0:
+ os.close(fd)
+ raise RuntimeError("Could not configure device %s" % vif_name)
+
+ return fd
+
+def get_options():
+ usage = ("usage: %prog -N <vif_name> -t <vif-type> -p <pi> "
+ "-b <bwlimit> -c <cipher> -k <cipher-key> -q <txqueuelen> "
+ "-l <local-port-file> -r <remote-port-file> -H <remote-host> "
+ "-R <ret-file> ")
+
+ parser = OptionParser(usage = usage)
+
+ parser.add_option("-N", "--vif-name", dest="vif_name",
+ help = "The name of the virtual interface",
+ type="str")
+
+ parser.add_option("-t", "--vif-type", dest="vif_type",
+ help = "Virtual interface type. Either IFF_TAP or IFF_TUN. "
+ "Defaults to IFF_TAP. ",
+ default = IFF_TAP,
+ type="str")
+
+ parser.add_option("-n", "--pi", dest="pi",
+ action="store_true",
+ default = False,
+ help="Enable PI header")
+
+ parser.add_option("-b", "--bwlimit", dest="bwlimit",
+ help = "Specifies the interface's emulated bandwidth in bytes ",
+ default = None, type="int")
+
+ parser.add_option("-q", "--txqueuelen", dest="txqueuelen",
+ help = "Specifies the interface's transmission queue length. ",
+ default = 1000, type="int")
+
+ parser.add_option("-c", "--cipher", dest="cipher",
+ help = "Cipher to encript communication. "
+ "One of PLAIN, AES, Blowfish, DES, DES3. ",
+ default = None, type="str")
+
+ parser.add_option("-k", "--cipher-key", dest="cipher_key",
+ help = "Specify a symmetric encryption key with which to protect "
+ "packets across the tunnel. python-crypto must be installed "
+ "on the system." ,
+ default = None, type="str")
+
+ parser.add_option("-l", "--local-port-file", dest="local_port_file",
+ help = "File where to store the local binded UDP port number ",
+ default = "local_port_file", type="str")
+
+ parser.add_option("-r", "--remote-port-file", dest="remote_port_file",
+ help = "File where to read the remote UDP port number to connect to",
+ default = "remote_port_file", type="str")
+
+ parser.add_option("-H", "--remote-host", dest="remote_host",
+ help = "Remote host IP",
+ default = "remote_host", type="str")
+
+ parser.add_option("-R", "--ret-file", dest="ret_file",
+ help = "File where to store return code (success of connection) ",
+ default = "ret_file", type="str")
+
+ (options, args) = parser.parse_args()
+
+ vif_type = IFF_TAP
+ if options.vif_type and options.vif_type == "IFF_TUN":
+ vif_type = IFF_TUN
+
+ return ( options.vif_name, vif_type, options.pi,
+ options.local_port_file, options.remote_port_file,
+ options.remote_host, options.ret_file, options.bwlimit,
+ options.cipher, options.cipher_key, options.txqueuelen )
+
+if __name__ == '__main__':
+
+ ( vif_name, vif_type, pi, local_port_file, remote_port_file,
+ remote_host, ret_file, bwlimit, cipher, cipher_key, txqueuelen
+ ) = get_options()
+
+ # Get the file descriptor of the TAP device from the process
+ # that created it
+ fd = open_tap(vif_name, vif_type, pi)
+
+ # Create a local socket to stablish the tunnel connection
+ hostaddr = socket.gethostbyname(socket.gethostname())
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ sock.bind((hostaddr, 0))
+ (local_host, local_port) = sock.getsockname()
+
+ # Save local port information to file
+ f = open(local_port_file, 'w')
+ f.write("%d\n" % local_port)
+ f.close()
+
+ # Wait until remote port information is available
+ while not os.path.exists(remote_port_file):
+ time.sleep(2)
+
+ remote_port = ''
+ # Read remote port from file
+ # Try until something is read...
+ # xxx: There seems to be a weird behavior where
+ # even if the file exists and had the port number,
+ # the read operation returns empty string!
+ # Maybe a race condition?
+ for i in xrange(10):
+ f = open(remote_port_file, 'r')
+ remote_port = f.read()
+ f.close()
+
+ if remote_port:
+ break
+
+ time.sleep(2)
+
+ remote_port = remote_port.strip()
+ remote_port = int(remote_port)
+
+ # Connect local socket to remote port
+ sock.connect((remote_host, remote_port))
+ remote = os.fdopen(sock.fileno(), 'r+b', 0)
+
+ # TODO: Test connectivity!
+
+ # Create a ret_file to indicate success
+ f = open(ret_file, 'w')
+ f.write("0")
+ f.close()
+
+ # Establish tunnel
+ tunchannel.tun_fwd(tun, remote,
+ with_pi = True, # Planetlab TAP devices add PI headers
+ ether_mode = (vif_type == IFF_TAP),
+ udp = True,
+ cipher_key = cipher_key,
+ cipher = cipher,
+ TERMINATE = TERMINATE,
+ SUSPEND = SUSPEND,
+ tunqueue = txqueuelen,
+ tunkqueue = 500,
+ bwlimit = bwlimit
+ )
+
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.resource import clsinit_copy, ResourceState
+from nepi.resources.linux.application import LinuxApplication
+from nepi.resources.linux.node import LinuxNode
+from nepi.util.timefuncs import tnow, tdiffsec
+
+import os
+import time
+
+PYTHON_VSYS_VERSION = "1.0"
+
+@clsinit_copy
+class LinuxTap(LinuxApplication):
+ _rtype = "linux::Tap"
+ _help = "Creates a TAP device on a Linux host"
+ _backend = "linux"
+
+ IFF_TUN = 0x0001
+ IFF_TAP = 0x0002
+
+ @classmethod
+ def _register_attributes(cls):
+ ip = Attribute("ip", "IPv4 Address",
+ flags = Flags.Design)
+
+ mac = Attribute("mac", "MAC Address",
+ flags = Flags.Design)
+
+ prefix = Attribute("prefix", "IPv4 network prefix",
+ flags = Flags.Design)
+
+ mtu = Attribute("mtu", "Maximum transmition unit for device",
+ type = Types.Integer)
+
+ devname = Attribute("deviceName",
+ "Name of the network interface (e.g. eth0, wlan0, etc)",
+ flags = Flags.NoWrite)
+
+ up = Attribute("up", "Link up",
+ type = Types.Bool)
+
+ pointopoint = Attribute("pointopoint", "Peer IP address",
+ flags = Flags.Design)
+
+ txqueuelen = Attribute("txqueuelen", "Length of transmission queue",
+ flags = Flags.Design)
+
+ txqueuelen = Attribute("txqueuelen", "Length of transmission queue",
+ flags = Flags.Design)
+
+ gre_key = Attribute("greKey",
+ "GRE key to be used to configure GRE tunnel",
+ default = "1",
+ flags = Flags.Design)
+
+ gre_remote = Attribute("greRemote",
+ "Public IP of remote endpoint for GRE tunnel",
+ flags = Flags.Design)
+
+ pi = Attribute("pi", "Add PI (protocol information) header",
+ default = False,
+ type = Types.Bool)
+
+ tear_down = Attribute("tearDown",
+ "Bash script to be executed before releasing the resource",
+ flags = Flags.Design)
+
+ cls._register_attribute(ip)
+ cls._register_attribute(mac)
+ cls._register_attribute(prefix)
+ cls._register_attribute(mtu)
+ cls._register_attribute(devname)
+ cls._register_attribute(up)
+ cls._register_attribute(pointopoint)
+ cls._register_attribute(txqueuelen)
+ cls._register_attribute(gre_key)
+ cls._register_attribute(gre_remote)
+ cls._register_attribute(pi)
+ cls._register_attribute(tear_down)
+
+ def __init__(self, ec, guid):
+ super(LinuxTap, self).__init__(ec, guid)
+ self._home = "tap-%s" % self.guid
+ self._gre_enabled = False
+ self._tunnel_mode = False
+
+ @property
+ def node(self):
+ node = self.get_connected(LinuxNode.get_rtype())
+ if node: return node[0]
+ raise RuntimeError, "linux::TAP/TUN devices must be connected to a linux::Node"
+
+ @property
+ def gre_enabled(self):
+ if not self._gre_enabled:
+ from nepi.resources.linux.gretunnel import LinuxGRETunnel
+ gre = self.get_connected(LinuxGRETunnel.get_rtype())
+ if gre: self._gre_enabled = True
+
+ return self._gre_enabled
+
+ @property
+ def tunnel_mode(self):
+ if not self._tunnel_mode:
+ from nepi.resources.linux.tunnel import LinuxTunnel
+ tunnel = self.get_connected(LinuxTunnel.get_rtype())
+ if tunnel: self._tunnel_mode = True
+
+ return self._tunnel_mode
+
+ def upload_sources(self):
+ scripts = []
+
+ # udp-connect python script
+ udp_connect = os.path.join(os.path.dirname(__file__), "scripts",
+ "linux-udp-connect.py")
+
+ scripts.append(udp_connect)
+
+ # tunnel creation python script
+ tunchannel = os.path.join(os.path.dirname(__file__), "scripts",
+ "tunchannel.py")
+
+ scripts.append(tunchannel)
+
+ # Upload scripts
+ scripts = ";".join(scripts)
+
+ self.node.upload(scripts,
+ os.path.join(self.node.src_dir),
+ overwrite = False)
+
+ # upload stop.sh script
+ stop_command = self.replace_paths(self._stop_command)
+
+ self.node.upload(stop_command,
+ os.path.join(self.app_home, "stop.sh"),
+ text = True,
+ # Overwrite file every time.
+ # The stop.sh has the path to the socket, which should change
+ # on every experiment run.
+ overwrite = True)
+
+ def upload_start_command(self):
+ # If GRE mode is enabled, TAP creation is delayed until the
+ # tunnel is established
+ if not self.tunnel_mode:
+ # We want to make sure the device is up and running
+ # before the deploy is over, so we execute the
+ # start script now and wait until it finishes.
+ command = self.get("command")
+ command = self.replace_paths(command)
+
+ shfile = os.path.join(self.app_home, "start.sh")
+ self.node.run_and_wait(command, self.run_home,
+ shfile = shfile,
+ overwrite = True)
+
+ def do_deploy(self):
+ if not self.node or self.node.state < ResourceState.PROVISIONED:
+ self.ec.schedule(self.reschedule_delay, self.deploy)
+ else:
+ if not self.get("deviceName"):
+ self.set("deviceName", "%s%d" % (self.vif_prefix, self.guid))
+
+ if not self.get("command"):
+ self.set("command", self._start_command)
+
+ self.do_discover()
+ self.do_provision()
+
+ self.set_ready()
+
+ def do_start(self):
+ if self.state == ResourceState.READY:
+ command = self.get("command")
+ self.info("Starting command '%s'" % command)
+
+ self.set_started()
+ else:
+ msg = " Failed to execute command '%s'" % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ def do_stop(self):
+ command = self.get('command') or ''
+
+ if self.state == ResourceState.STARTED:
+ self.info("Stopping command '%s'" % command)
+
+ command = "bash %s" % os.path.join(self.app_home, "stop.sh")
+ (out, err), proc = self.execute_command(command,
+ blocking = True)
+
+ if err:
+ msg = " Failed to stop command '%s' " % command
+ self.error(msg, out, err)
+
+ self.set_stopped()
+
+ @property
+ def state(self):
+ state_check_delay = 0.5
+ if self._state == ResourceState.STARTED and \
+ tdiffsec(tnow(), self._last_state_check) > state_check_delay:
+
+ if self.get("deviceName"):
+ (out, err), proc = self.node.execute("ip a")
+
+ if out.strip().find(self.get("deviceName")) == -1:
+ # tap is not running is not running (socket not found)
+ self.set_stopped()
+
+ self._last_state_check = tnow()
+
+ return self._state
+
+ def do_release(self):
+ # Node needs to wait until all associated RMs are released
+ # to be released
+ from nepi.resources.linux.tunnel import LinuxTunnel
+ rms = self.get_connected(LinuxTunnel.get_rtype())
+
+ for rm in rms:
+ if rm.state < ResourceState.STOPPED:
+ self.ec.schedule(self.reschedule_delay, self.release)
+ return
+
+ super(LinuxTap, self).do_release()
+
+ def gre_connect(self, remote_endpoint, connection_app_home,
+ connection_run_home):
+ gre_connect_command = self._gre_connect_command(
+ remote_endpoint, connection_run_home)
+
+ # upload command to connect.sh script
+ shfile = os.path.join(connection_app_home, "gre-connect.sh")
+ self.node.upload_command(gre_connect_command,
+ shfile = shfile,
+ overwrite = False)
+
+ # invoke connect script
+ cmd = "bash %s" % shfile
+ (out, err), proc = self.node.run(cmd, connection_run_home)
+
+ # check if execution errors occurred
+ msg = " Failed to connect endpoints "
+
+ if proc.poll() or err:
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # Wait for pid file to be generated
+ pid, ppid = self.node.wait_pid(connection_run_home)
+
+ # If the process is not running, check for error information
+ # on the remote machine
+ if not pid or not ppid:
+ (out, err), proc = self.node.check_errors(connection_run_home)
+ # Out is what was written in the stderr file
+ if err:
+ msg = " Failed to start command '%s' " % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ return True
+
+ def initiate_udp_connection(self, remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+ port = self.udp_connect(remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen)
+ return port
+
+ def udp_connect(self, remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+ udp_connect_command = self._udp_connect_command(
+ remote_endpoint, connection_run_home,
+ cipher, cipher_key, bwlimit, txqueuelen)
+
+ # upload command to connect.sh script
+ shfile = os.path.join(self.app_home, "udp-connect.sh")
+ self.node.upload_command(udp_connect_command,
+ shfile = shfile,
+ overwrite = False)
+
+ # invoke connect script
+ cmd = "bash %s" % shfile
+ (out, err), proc = self.node.run(cmd, self.run_home)
+
+ # check if execution errors occurred
+ msg = "Failed to connect endpoints "
+
+ if proc.poll():
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # Wait for pid file to be generated
+ self._pid, self._ppid = self.node.wait_pid(self.run_home)
+
+ # If the process is not running, check for error information
+ # on the remote machine
+ if not self._pid or not self._ppid:
+ (out, err), proc = self.node.check_errors(self.run_home)
+ # Out is what was written in the stderr file
+ if err:
+ msg = " Failed to start command '%s' " % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ port = self.wait_local_port()
+
+ return port
+
+ def _udp_connect_command(self, remote_endpoint, connection_run_home,
+ cipher, cipher_key, bwlimit, txqueuelen):
+
+ # Set the remote endpoint to the IP of the device
+ self.set("pointopoint", remote_endpoint.get("ip"))
+
+ # Planetlab TAPs always use PI headers
+ from nepi.resources.planetlab.tap import PlanetlabTap
+ if self.is_rm_instance(PlanetlabTap.get_rtype()):
+ self.set("pi", True)
+
+ # Public IP of the remote NODE to stablish tunnel
+ remote_ip = remote_endpoint.node.get("ip")
+
+ local_port_file = os.path.join(self.run_home,
+ "local_port")
+
+ remote_port_file = os.path.join(self.run_home,
+ "remote_port")
+
+ ret_file = os.path.join(self.run_home,
+ "ret_file")
+
+ # Generate UDP connect command
+ # Use the start command to configure TAP with peer info
+ start_command = self._start_command
+
+ command = ["( "]
+ command.append(start_command)
+
+ # Use pl-vid-udp-connect.py to stablish the tunnel between endpoints
+ command.append(") & (")
+ command.append("sudo -S")
+ command.append("PYTHONPATH=$PYTHONPATH:${SRC}")
+ command.append("python ${SRC}/linux-udp-connect.py")
+ command.append("-N %s" % self.get("deviceName"))
+ command.append("-t %s" % self.vif_type)
+ if self.get("pi"):
+ command.append("-p")
+ command.append("-l %s " % local_port_file)
+ command.append("-r %s " % remote_port_file)
+ command.append("-H %s " % remote_ip)
+ command.append("-R %s " % ret_file)
+ if cipher:
+ command.append("-c %s " % cipher)
+ if cipher_key:
+ command.append("-k %s " % cipher_key)
+ if txqueuelen:
+ command.append("-q %s " % txqueuelen)
+ if bwlimit:
+ command.append("-b %s " % bwlimit)
+
+ command.append(")")
+
+ command = " ".join(command)
+ command = self.replace_paths(command)
+
+ return command
+
+ def _gre_connect_command(self, remote_endpoint, connection_run_home):
+ # Set the remote endpoint to (private) device IP
+ self.set("pointopoint", remote_endpoint.get("ip"))
+ ## public node IP
+ self.set("greRemote", remote_endpoint.node.get("ip"))
+
+ # Generate GRE connect command
+ command = ["("]
+ command.append(self._stop_command)
+ command.append(") ; (")
+ command.append(self._start_gre_command)
+ command.append(")")
+
+ command = " ".join(command)
+ command = self.replace_paths(command)
+
+ return command
+
+ def establish_udp_connection(self, remote_endpoint, port):
+ # upload remote port number to file
+ rem_port = "%s\n" % port
+ self.node.upload(rem_port,
+ os.path.join(self.run_home, "remote_port"),
+ text = True,
+ overwrite = False)
+
+ def verify_connection(self):
+ self.wait_result()
+
+ def terminate_connection(self):
+ if self._pid and self._ppid:
+ (out, err), proc = self.node.kill(self._pid, self._ppid,
+ sudo = True)
+
+ # check if execution errors occurred
+ if proc.poll() and err:
+ msg = " Failed to Kill the Tap"
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ def check_status(self):
+ return self.node.status(self._pid, self._ppid)
+
+ def wait_local_port(self):
+ """ Waits until the local_port file for the endpoint is generated,
+ and returns the port number
+
+ """
+ return self.wait_file("local_port")
+
+ def wait_result(self):
+ """ Waits until the return code file for the endpoint is generated
+
+ """
+ return self.wait_file("ret_file")
+
+ def wait_file(self, filename):
+ """ Waits until file on endpoint is generated """
+ result = None
+ delay = 1.0
+
+ for i in xrange(20):
+ (out, err), proc = self.node.check_output(
+ self.run_home, filename)
+ if out:
+ result = out.strip()
+ break
+ else:
+ time.sleep(delay)
+ delay = delay * 1.5
+ else:
+ msg = "Couldn't retrieve %s" % filename
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ return result
+
+ @property
+ def _start_command(self):
+ command = []
+ if not self.gre_enabled:
+ # Make sure to clean TAP if it existed
+ stop_command = self._stop_command
+
+ start_command = []
+ start_command.append("sudo -S ip tuntap add %s mode %s %s" % (
+ self.get("deviceName"),
+ self.vif_prefix,
+ "pi" if self.get("pi") else ""))
+ start_command.append("sudo -S ip link set %s up" % self.get("deviceName"))
+ start_command.append("sudo -S ip addr add %s/%s dev %s" % (
+ self.get("ip"),
+ self.get("prefix"),
+ self.get("deviceName"),
+ ))
+
+ start_command = ";".join(start_command)
+
+ command.append("(")
+ command.append(stop_command)
+ command.append(") ; (")
+ command.append(start_command)
+ command.append(")")
+
+ return " ".join(command)
+
+ @property
+ def _stop_command(self):
+ command = []
+ command.append("sudo -S ip link set %s down" % self.get("deviceName"))
+ command.append("sudo -S ip link del %s" % self.get("deviceName"))
+
+ return ";".join(command)
+
+ @property
+ def _start_gre_command(self):
+ command = []
+ command.append("sudo -S modprobe ip_gre")
+ command.append("sudo -S ip link add %s type gre remote %s local %s ttl 64 csum key %s" % (
+ self.get("deviceName"),
+ self.get("greRemote"),
+ self.node.get("ip"),
+ self.get("greKey")
+ ))
+ command.append("sudo -S ip addr add %s/%s peer %s/%s dev %s" % (
+ self.get("ip"),
+ self.get("prefix"),
+ self.get("pointopoint"),
+ self.get("prefix"),
+ self.get("deviceName"),
+ ))
+ command.append("sudo -S ip link set %s up " % self.get("deviceName"))
+
+ return ";".join(command)
+
+ @property
+ def vif_type(self):
+ return "IFF_TAP"
+
+ @property
+ def vif_type_flag(self):
+ return LinuxTap.IFF_TAP
+
+ @property
+ def vif_prefix(self):
+ return "tap"
+
+ def sock_name(self):
+ return os.path.join(self.run_home, "tap.sock")
+
+ def valid_connection(self, guid):
+ # TODO: Validate!
+ return True
+
@clsinit_copy
class LinuxTcpdump(LinuxApplication):
- _rtype = "LinuxTcpdump"
+ _rtype = "linux::Tcpdump"
@classmethod
def _register_attributes(cls):
@clsinit_copy
class LinuxTraceroute(LinuxApplication):
- _rtype = "LinuxTraceroute"
+ _rtype = "linux::Traceroute"
@classmethod
def _register_attributes(cls):
-#!/usr/bin/env python
#
# NEPI, a framework to manage network experiments
# Copyright (C) 2013 INRIA
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+from nepi.execution.resource import clsinit_copy
+from nepi.resources.linux.tap import LinuxTap
-from nepi.design.box import Box
-from nepi.util.parser import XMLParser
+import os
-import unittest
+@clsinit_copy
+class LinuxTun(LinuxTap):
+ _rtype = "linux::Tun"
+ _help = "Creates a TUN device on a Linux host"
+ _backend = "linux"
-class BoxDesignTestCase(unittest.TestCase):
- def test_to_xml(self):
- node1 = Box()
- node2 = Box()
+ def __init__(self, ec, guid):
+ super(LinuxTun, self).__init__(ec, guid)
+ self._home = "tun-%s" % self.guid
- node1.label = "node1"
- node2.label = "node2"
+ @property
+ def sock_name(self):
+ return os.path.join(self.run_home, "tun.sock")
+
+ @property
+ def vif_type(self):
+ return "IFF_TUN"
- node1.connect(node2)
+ @property
+ def vif_type_flag(self):
+ return LinuxTap.IFF_TAP
- node1.a.dog = "cat"
- node1.a.one = "two"
- node1.a.t = "q"
+ @property
+ def vif_prefix(self):
+ return "tun"
- node1.c.node2.a.sky = "sea"
- node2.a.bee = "honey"
-
- node1.tadd("unooo")
- node2.tadd("dosss")
-
- parser = XMLParser()
- xml = parser.to_xml(node1)
-
- node = parser.from_xml(xml)
- xml2 = parser.to_xml(node)
-
- self.assertEquals(xml, xml2)
-
-if __name__ == '__main__':
- unittest.main()
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.resource import clsinit_copy, ResourceState
+from nepi.resources.linux.application import LinuxApplication
+from nepi.util.timefuncs import tnow, tdiffsec
+
+import os
+import time
+
+state_check_delay = 0.5
+
+@clsinit_copy
+class LinuxTunnel(LinuxApplication):
+ _rtype = "abstract::linux::Tunnel"
+ _help = "Constructs a tunnel between two Linux endpoints"
+ _backend = "linux"
+
+ def __init__(self, ec, guid):
+ super(LinuxTunnel, self).__init__(ec, guid)
+ self._home = "tunnel-%s" % self.guid
+
+ def log_message(self, msg):
+ return " guid %d - tunnel %s - %s - %s " % (self.guid,
+ self.endpoint1.node.get("hostname"),
+ self.endpoint2.node.get("hostname"),
+ msg)
+
+ def get_endpoints(self):
+ """ Returns the list of RM that are endpoints to the tunnel
+ """
+ raise NotImplementedError
+
+ @property
+ def endpoint1(self):
+ endpoints = self.get_endpoints()
+ if endpoints: return endpoints[0]
+ return None
+
+ @property
+ def endpoint2(self):
+ endpoints = self.get_endpoints()
+ if endpoints and len(endpoints) > 1: return endpoints[1]
+ return None
+
+ def app_home(self, endpoint):
+ return os.path.join(endpoint.node.exp_home, self._home)
+
+ def run_home(self, endpoint):
+ return os.path.join(self.app_home(endpoint), self.ec.run_id)
+
+ def initiate_connection(self, endpoint, remote_endpoint):
+ raise NotImplementedError
+
+ def establish_connection(self, endpoint, remote_endpoint, data):
+ raise NotImplementedError
+
+ def verify_connection(self, endpoint, remote_endpoint):
+ raise NotImplementedError
+
+ def terminate_connection(self, endpoint, remote_endpoint):
+ raise NotImplementedError
+
+ def check_state_connection(self, endpoint, remote_endpoint):
+ raise NotImplementedError
+
+ def do_provision(self):
+ # create run dir for tunnel on each node
+ self.endpoint1.node.mkdir(self.run_home(self.endpoint1))
+ self.endpoint2.node.mkdir(self.run_home(self.endpoint2))
+
+ self.debug("Initiate the connection")
+ # Start 2 step connection
+ # Initiate connection from endpoint 1 to endpoint 2
+ data1 = self.initiate_connection(self.endpoint1, self.endpoint2)
+
+ # Initiate connection from endpoint 2 to endpoint 1
+ data2 = self.initiate_connection(self.endpoint2, self.endpoint1)
+
+ self.debug("Establish the connection")
+ # Establish connection from endpoint 1 to endpoint 2
+ self.establish_connection(self.endpoint1, self.endpoint2, data2)
+
+ # Establish connection from endpoint 2 to endpoint 1
+ self.establish_connection(self.endpoint2, self.endpoint1, data1)
+
+ self.debug("Verify the connection")
+ # check if connection was successful on both sides
+ self.verify_connection(self.endpoint1, self.endpoint2)
+ self.verify_connection(self.endpoint2, self.endpoint1)
+
+ self.info("Provisioning finished")
+
+ self.set_provisioned()
+
+ def do_deploy(self):
+ if (not self.endpoint1 or self.endpoint1.state < ResourceState.READY) or \
+ (not self.endpoint2 or self.endpoint2.state < ResourceState.READY):
+ self.ec.schedule(self.reschedule_delay, self.deploy)
+ else:
+ self.do_discover()
+ self.do_provision()
+
+ self.set_ready()
+
+ def do_start(self):
+ if self.state == ResourceState.READY:
+ command = self.get("command")
+ self.info("Starting command '%s'" % command)
+
+ self.set_started()
+ else:
+ msg = " Failed to execute command '%s'" % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ def do_stop(self):
+ """ Stops application execution
+ """
+
+ if self.state == ResourceState.STARTED:
+ self.info("Stopping tunnel")
+
+ self.terminate_connection(self.endpoint1, self.endpoint2)
+ self.terminate_connection(self.endpoint2, self.endpoint1)
+
+ self.set_stopped()
+
+ @property
+ def state(self):
+ """ Returns the state of the application
+ """
+ if self._state == ResourceState.STARTED:
+ # In order to avoid overwhelming the remote host and
+ # the local processor with too many ssh queries, the state is only
+ # requested every 'state_check_delay' seconds.
+ if tdiffsec(tnow(), self._last_state_check) > state_check_delay:
+
+ self.check_state_connection()
+
+ self._last_state_check = tnow()
+
+ return self._state
+
+ def valid_connection(self, guid):
+ # TODO: Validate!
+ return True
+
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.application import LinuxApplication
from nepi.util.timefuncs import tnow
http://hpcbench.sourceforge.net/
"""
- _rtype = "LinuxUdpTest"
+ _rtype = "linux::UdpTest"
@classmethod
def _register_attributes(cls):
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
-from nepi.resources.linux.application import LinuxApplication
+from nepi.execution.resource import clsinit_copy, ResourceState
+from nepi.resources.linux.tunnel import LinuxTunnel
from nepi.util.sshfuncs import ProcStatus
from nepi.util.timefuncs import tnow, tdiffsec
import time
@clsinit_copy
-class UdpTunnel(LinuxApplication):
- _rtype = "UdpTunnel"
+class LinuxUdpTunnel(LinuxTunnel):
+ _rtype = "linux::UdpTunnel"
_help = "Constructs a tunnel between two Linux endpoints using a UDP connection "
_backend = "linux"
-
@classmethod
def _register_attributes(cls):
cipher = Attribute("cipher",
cls._register_attribute(bwlimit)
def __init__(self, ec, guid):
- super(UdpTunnel, self).__init__(ec, guid)
+ super(LinuxUdpTunnel, self).__init__(ec, guid)
self._home = "udp-tunnel-%s" % self.guid
- self._pid1 = None
- self._ppid1 = None
- self._pid2 = None
- self._ppid2 = None
+ self._pids = dict()
def log_message(self, msg):
- return " guid %d - tunnel %s - %s - %s " % (self.guid,
+ return " guid %d - udptunnel %s - %s - %s " % (self.guid,
self.endpoint1.node.get("hostname"),
self.endpoint2.node.get("hostname"),
msg)
connected = []
for guid in self.connections:
rm = self.ec.get_resource(guid)
- if hasattr(rm, "udp_connect_command"):
+ if hasattr(rm, "initiate_udp_connection"):
connected.append(rm)
return connected
- @property
- def endpoint1(self):
- endpoints = self.get_endpoints()
- if endpoints: return endpoints[0]
- return None
-
- @property
- def endpoint2(self):
- endpoints = self.get_endpoints()
- if endpoints and len(endpoints) > 1: return endpoints[1]
- return None
-
- def app_home(self, endpoint):
- return os.path.join(endpoint.node.exp_home, self._home)
-
- def run_home(self, endpoint):
- return os.path.join(self.app_home(endpoint), self.ec.run_id)
-
- def udp_connect(self, endpoint, remote_ip):
- # Get udp connect command
- local_port_file = os.path.join(self.run_home(endpoint),
- "local_port")
- remote_port_file = os.path.join(self.run_home(endpoint),
- "remote_port")
- ret_file = os.path.join(self.run_home(endpoint),
- "ret_file")
+ def initiate_connection(self, endpoint, remote_endpoint):
cipher = self.get("cipher")
cipher_key = self.get("cipherKey")
bwlimit = self.get("bwLimit")
txqueuelen = self.get("txQueueLen")
- udp_connect_command = endpoint.udp_connect_command(
- remote_ip, local_port_file, remote_port_file,
- ret_file, cipher, cipher_key, bwlimit, txqueuelen)
-
- # upload command to connect.sh script
- shfile = os.path.join(self.app_home(endpoint), "udp-connect.sh")
- endpoint.node.upload(udp_connect_command,
- shfile,
- text = True,
- overwrite = False)
-
- # invoke connect script
- cmd = "bash %s" % shfile
- (out, err), proc = endpoint.node.run(cmd, self.run_home(endpoint))
-
- # check if execution errors occurred
- msg = " Failed to connect endpoints "
-
- if proc.poll():
- self.error(msg, out, err)
- raise RuntimeError, msg
-
- # Wait for pid file to be generated
- pid, ppid = endpoint.node.wait_pid(self.run_home(endpoint))
-
- # If the process is not running, check for error information
- # on the remote machine
- if not pid or not ppid:
- (out, err), proc = endpoint.node.check_errors(self.run_home(endpoint))
- # Out is what was written in the stderr file
- if err:
- msg = " Failed to start command '%s' " % command
- self.error(msg, out, err)
- raise RuntimeError, msg
-
- # wait until port is written to file
- port = self.wait_local_port(endpoint)
- return (port, pid, ppid)
-
- def do_provision(self):
- # create run dir for tunnel on each node
- self.endpoint1.node.mkdir(self.run_home(self.endpoint1))
- self.endpoint2.node.mkdir(self.run_home(self.endpoint2))
-
- # Invoke connect script in endpoint 1
- remote_ip1 = socket.gethostbyname(self.endpoint2.node.get("hostname"))
- (port1, self._pid1, self._ppid1) = self.udp_connect(self.endpoint1,
- remote_ip1)
-
- # Invoke connect script in endpoint 2
- remote_ip2 = socket.gethostbyname(self.endpoint1.node.get("hostname"))
- (port2, self._pid2, self._ppid2) = self.udp_connect(self.endpoint2,
- remote_ip2)
+ connection_app_home = self.app_home(endpoint)
+ connection_run_home = self.run_home(endpoint)
- # upload file with port 2 to endpoint 1
- self.upload_remote_port(self.endpoint1, port2)
-
- # upload file with port 1 to endpoint 2
- self.upload_remote_port(self.endpoint2, port1)
+ port = endpoint.initiate_udp_connection(
+ remote_endpoint,
+ connection_app_home,
+ connection_run_home,
+ cipher, cipher_key, bwlimit, txqueuelen)
- # check if connection was successful on both sides
- self.wait_result(self.endpoint1)
- self.wait_result(self.endpoint2)
-
- self.info("Provisioning finished")
-
- self.set_provisioned()
+ return port
- def do_deploy(self):
- if (not self.endpoint1 or self.endpoint1.state < ResourceState.READY) or \
- (not self.endpoint2 or self.endpoint2.state < ResourceState.READY):
- self.ec.schedule(reschedule_delay, self.deploy)
- else:
- self.do_discover()
- self.do_provision()
-
- self.set_ready()
+ def establish_connection(self, endpoint, remote_endpoint, port):
+ endpoint.establish_udp_connection(remote_endpoint, port)
- def do_start(self):
- if self.state == ResourceState.READY:
- command = self.get("command")
- self.info("Starting command '%s'" % command)
-
- self.set_started()
- else:
- msg = " Failed to execute command '%s'" % command
- self.error(msg, out, err)
- raise RuntimeError, msg
+ def verify_connection(self, endpoint, remote_endpoint):
+ endpoint.verify_connection()
- def do_stop(self):
- """ Stops application execution
- """
- if self.state == ResourceState.STARTED:
- self.info("Stopping tunnel")
-
- # Only try to kill the process if the pid and ppid
- # were retrieved
- if self._pid1 and self._ppid1 and self._pid2 and self._ppid2:
- (out1, err1), proc1 = self.endpoint1.node.kill(self._pid1,
- self._ppid1, sudo = True)
- (out2, err2), proc2 = self.endpoint2.node.kill(self._pid2,
- self._ppid2, sudo = True)
+ def terminate_connection(self, endpoint, remote_endpoint):
+ endpoint.terminate_connection()
- if err1 or err2 or proc1.poll() or proc2.poll():
- # check if execution errors occurred
- msg = " Failed to STOP tunnel"
- self.error(msg, err1, err2)
- raise RuntimeError, msg
+ def check_state_connection(self):
+ # Make sure the process is still running in background
+ # No execution errors occurred. Make sure the background
+ # process with the recorded pid is still running.
- self.set_stopped()
+ status1 = self.endpoint1.check_status()
+ status2 = self.endpoint2.check_status()
- @property
- def state(self):
- """ Returns the state of the application
- """
- if self._state == ResourceState.STARTED:
- # In order to avoid overwhelming the remote host and
- # the local processor with too many ssh queries, the state is only
- # requested every 'state_check_delay' seconds.
- state_check_delay = 0.5
- if tdiffsec(tnow(), self._last_state_check) > state_check_delay:
- if self._pid1 and self._ppid1 and self._pid2 and self._ppid2:
- # Make sure the process is still running in background
- # No execution errors occurred. Make sure the background
- # process with the recorded pid is still running.
- status1 = self.endpoint1.node.status(self._pid1, self._ppid1)
- status2 = self.endpoint2.node.status(self._pid2, self._ppid2)
-
- if status1 == ProcStatus.FINISHED and \
- status2 == ProcStatus.FINISHED:
-
- # check if execution errors occurred
- (out1, err1), proc1 = self.endpoint1.node.check_errors(
- self.run_home(self.endpoint1))
-
- (out2, err2), proc2 = self.endpoint2.node.check_errors(
- self.run_home(self.endpoint2))
+ if status1 == ProcStatus.FINISHED and \
+ status2 == ProcStatus.FINISHED:
- if err1 or err2:
- msg = "Error occurred in tunnel"
- self.error(msg, err1, err2)
- self.fail()
- else:
- self.set_stopped()
+ # check if execution errors occurred
+ (out1, err1), proc1 = self.endpoint1.node.check_errors(
+ self.run_home(self.endpoint1))
- self._last_state_check = tnow()
+ (out2, err2), proc2 = self.endpoint2.node.check_errors(
+ self.run_home(self.endpoint2))
- return self._state
+ if err1 or err2:
+ msg = "Error occurred in tunnel"
+ self.error(msg, err1, err2)
+ self.fail()
+ else:
+ self.set_stopped()
def wait_local_port(self, endpoint):
""" Waits until the local_port file for the endpoint is generated,
return result
- def upload_remote_port(self, endpoint, port):
- # upload remote port number to file
- port = "%s\n" % port
- endpoint.node.upload(port,
- os.path.join(self.run_home(endpoint), "remote_port"),
- text = True,
- overwrite = False)
-
- def valid_connection(self, guid):
- # TODO: Validate!
- return True
-
import sys
import uuid
+try:
+ import netns
+except ImportError:
+ pass
+
class NetNSWrapper(object):
def __init__(self, loglevel = logging.INFO, enable_dump = False):
super(NetNSWrapper, self).__init__()
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifimac import NS3BaseWifiMac
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifimac import NS3BaseWifiMac
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3arpl3protocol import NS3BaseArpL3Protocol
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errormodel import NS3BaseErrorModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errormodel import NS3BaseErrorModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errormodel import NS3BaseErrorModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationdelaymodel import NS3BasePropagationDelayModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3queue import NS3BaseQueue
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
attr_linkacknowledgment = Attribute("LinkAcknowledgment",
"Enable Link layer acknowledgment mechanism",
type = Types.Bool,
- default = "False",
+ default = "True",
allowed = None,
range = None,
flags = Flags.Reserved | Flags.Construct)
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
-from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
+ ResourceState
+from nepi.resources.ns3.ns3fdnetdevice import NS3BaseFdNetDevice
@clsinit_copy
-class NS3FdNetDevice(NS3BaseNetDevice):
+class NS3FdNetDevice(NS3BaseFdNetDevice):
_rtype = "ns3::FdNetDevice"
@classmethod
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3icmpv4l4protocol import NS3BaseIcmpv4L4Protocol
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3ipv4l3protocol import NS3BaseIpv4L3Protocol
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errormodel import NS3BaseErrorModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
-from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
+ ResourceState
+from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
-class NS3DceApplication(NS3BaseDceApplication):
- _rtype = "ns3::DceApplication"
+class NS3LrWpanNetDevice(NS3BaseNetDevice):
+ _rtype = "ns3::LrWpanNetDevice"
@classmethod
def _register_attributes(cls):
- attr_starttime = Attribute("StartTime",
- "Time at which the application will start",
- type = Types.String,
- default = "+0.0ns",
+ attr_useacks = Attribute("UseAcks",
+ "Request acknowledgments for data frames.",
+ type = Types.Bool,
+ default = "True",
allowed = None,
range = None,
flags = Flags.Reserved | Flags.Construct)
- cls._register_attribute(attr_starttime)
-
- attr_stoptime = Attribute("StopTime",
- "Time at which the application will stop",
- type = Types.String,
- default = "+0.0ns",
- allowed = None,
- range = None,
- flags = Flags.Reserved | Flags.Construct)
-
- cls._register_attribute(attr_stoptime)
+ cls._register_attribute(attr_useacks)
@classmethod
def _register_traces(cls):
-
- processstarted = Trace("ProcessStarted", "notify when the dce is started")
-
- cls._register_trace(processstarted)
-
-
+ pass
def __init__(self, ec, guid):
- super(NS3DceApplication, self).__init__(ec, guid)
- self._home = "ns3-dce-application-%s" % self.guid
+ super(NS3LrWpanNetDevice, self).__init__(ec, guid)
+ self._home = "ns3-lr-wpan-net-device-%s" % self.guid
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifimac import NS3BaseWifiMac
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errorratemodel import NS3BaseErrorRateModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3node import NS3BaseNode
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifimac import NS3BaseWifiMac
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationdelaymodel import NS3BasePropagationDelayModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
attr_bounds = Attribute("Bounds",
"Bounds of the area to cruise.",
type = Types.String,
- default = "0|0|100|100",
+ default = "0|100|0|100",
allowed = None,
range = None,
flags = Flags.Reserved | Flags.Construct)
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errormodel import NS3BaseErrorModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errormodel import NS3BaseErrorModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3queue import NS3BaseQueue
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiremotestationmanager import NS3BaseWifiRemoteStationManager
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifimac import NS3BaseWifiMac
@clsinit_copy
cls._register_attribute(attr_maxmissedbeacons)
+ attr_activeprobing = Attribute("ActiveProbing",
+ "If true, we send probe requests. If false, we don\'t. NOTE: if more than one STA in your simulation is using active probing, you should enable it at a different simulation time for each STA, otherwise all the STAs will start sending probes at the same time resulting in collisions. See bug 1060 for more info.",
+ type = Types.Bool,
+ default = "False",
+ allowed = None,
+ range = None,
+ flags = Flags.Reserved | Flags.Construct)
+
+ cls._register_attribute(attr_activeprobing)
+
attr_qossupported = Attribute("QosSupported",
"This Boolean attribute is set to enable 802.11e/WMM-style QoS support at this STA",
type = Types.Bool,
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3propagationlossmodel import NS3BasePropagationLossModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3channel import NS3BaseChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifinetdevice import NS3BaseWifiNetDevice
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3errorratemodel import NS3BaseErrorRateModel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifichannel import NS3BaseWifiChannel
@clsinit_copy
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.ns3.ns3wifiphy import NS3BaseWifiPhy
@clsinit_copy
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
class NS3BaseApplication(NS3Base):
_rtype = "abstract::ns3::Application"
+ def __init__(self, ec, guid):
+ super(NS3BaseApplication, self).__init__(ec, guid)
+ self._node = None
+
@property
def node(self):
- from nepi.resources.ns3.ns3node import NS3BaseNode
- nodes = self.get_connected(NS3BaseNode.get_rtype())
+ if not self._node:
+ from nepi.resources.ns3.ns3node import NS3BaseNode
+ nodes = self.get_connected(NS3BaseNode.get_rtype())
- if not nodes:
- msg = "Application not connected to node"
- self.error(msg)
- raise RuntimeError, msg
+ if not nodes:
+ msg = "Application not connected to node"
+ self.error(msg)
+ raise RuntimeError, msg
- return nodes[0]
+ self._node = nodes[0]
+
+ return self._node
@property
def _rms_to_wait(self):
def do_start(self):
if self.simulation.state < ResourceState.STARTED:
self.debug("---- RESCHEDULING START ----" )
- self.ec.schedule(reschedule_delay, self.start)
+ self.ec.schedule(self.reschedule_delay, self.start)
else:
super(NS3BaseApplication, self).do_start()
self._start_time = self.simulation.start_time
@property
def state(self):
if self._state == ResourceState.STARTED:
- is_running = self.simulation.invoke(self.uuid, "isAppRunning")
-
- if not is_running:
- self.set_stopped()
+ try:
+ is_running = self.simulation.invoke(self.uuid, "isAppRunning")
+
+ if not is_running:
+ self.set_stopped()
+ except:
+ msg = "Application failed. Can not retrieve state"
+ out = ""
+
+ import traceback
+ err = traceback.format_exc()
+ self.error(msg, out, err)
+ self.do_fail()
return self._state
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.execution.attribute import Flags
from nepi.execution.trace import TraceAttr
self._uuid = None
self._connected = set()
self._trace_filename = dict()
+ self._node = None
@property
def connected(self):
@property
def node(self):
- from nepi.resources.ns3.ns3node import NS3BaseNode
- nodes = self.get_connected(NS3BaseNode.get_rtype())
- if nodes: return nodes[0]
- return None
+ if not self._node:
+ from nepi.resources.ns3.ns3node import NS3BaseNode
+ nodes = self.get_connected(NS3BaseNode.get_rtype())
+ if nodes: self._node = nodes[0]
+
+ return self._node
def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
filename = self._trace_filename.get(name)
kwargs = dict()
for attr in self._attrs.values():
- if not ( attr.has_flag(Flags.Construct) and attr.has_changed() ):
+ if not ( attr.has_flag(Flags.Construct) and attr.has_changed ):
continue
kwargs[attr.name] = attr._value
def do_deploy(self):
if self._wait_rms():
self.debug("---- RESCHEDULING DEPLOY ----" )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
self.do_discover()
self.do_provision()
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
import os
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.ns3.ns3application import NS3BaseApplication
+from nepi.execution.trace import TraceAttr
from nepi.resources.ns3.ns3wrapper import SIMULATOR_UUID
_rtype = "abstract::ns3::DceApplication"
# Lock used to synchronize usage of DceManagerHelper
- dce_manager_lock = threading.Lock()
+ _dce_manager_lock = threading.Lock()
# Lock used to synchronize usage of DceApplicationHelper
- dce_application_lock = threading.Lock()
+ _dce_application_lock = threading.Lock()
_dce_manager_helper_uuid = None
_dce_application_helper_uuid = None
"DCE environment variables.",
flags = Flags.Design)
+ use_dlm = Attribute("useDlmLoader",
+ "Use ns3::DlmLoaderFactory as library loader",
+ type = Types.Bool,
+ flags = Flags.Design)
+
starttime = Attribute("StartTime",
"Time at which the application will start",
default = "+0.0ns",
cls._register_attribute(stack_size)
cls._register_attribute(arguments)
cls._register_attribute(environment)
+ cls._register_attribute(use_dlm)
cls._register_attribute(stoptime)
cls._register_attribute(starttime)
- @property
- def node(self):
- from nepi.resources.ns3.ns3node import NS3BaseNode
- nodes = self.get_connected(NS3BaseNode.get_rtype())
+ def __init__(self, ec, guid):
+ super(NS3BaseDceApplication, self).__init__(ec, guid)
+ self._pid = None
- if not nodes:
- msg = "DceApplication not connected to node"
- self.error(msg)
- raise RuntimeError, msg
-
- return nodes[0]
+ @property
+ def pid(self):
+ return self._pid
@property
def dce_manager_helper_uuid(self):
- if not self._dce_manager_helper_uuid:
- self._dce_manager_helper_uuid = self.simulation.create("DceManagerHelper")
- return self._dce_manager_helper_uuid
+ if not NS3BaseDceApplication._dce_manager_helper_uuid:
+ NS3BaseDceApplication._dce_manager_helper_uuid = \
+ self.simulation.create("DceManagerHelper")
+
+ if self.get("useDlmLoader"):
+ self.simulation.invoke(
+ NS3BaseDceApplication._dce_manager_helper_uuid,
+ "SetLoader",
+ "ns3::DlmLoaderFactory")
+
+ return NS3BaseDceApplication._dce_manager_helper_uuid
@property
def dce_application_helper_uuid(self):
- if not self._dce_application_helper_uuid:
- self._dce_application_helper_uuid = self.simulation.create("DceApplicationHelper")
- return self._dce_application_helper_uuid
+ if not NS3BaseDceApplication._dce_application_helper_uuid:
+ NS3BaseDceApplication._dce_application_helper_uuid = \
+ self.simulation.create("DceApplicationHelper")
+
+ return NS3BaseDceApplication._dce_application_helper_uuid
+
+ @property
+ def dce_manager_lock(self):
+ return NS3BaseDceApplication._dce_manager_lock
+
+ @property
+ def dce_application_lock(self):
+ return NS3BaseDceApplication._dce_application_lock
def _instantiate_object(self):
pass
self.dce_application_helper_uuid,
"InstallInNode", self.node.uuid)
+
"""
container_uuid = self.simulation.create("NodeContainer")
self.simulation.invoke(container_uuid, "Add", self.node.uuid)
def do_start(self):
if self.simulation.state < ResourceState.STARTED:
self.debug("---- RESCHEDULING START ----" )
- self.ec.schedule(reschedule_delay, self.start)
+ self.ec.schedule(self.reschedule_delay, self.start)
else:
- self._configure_traces()
- super(NS3BaseApplication, self).do_start()
- self._start_time = self.simulation.start_time
+ is_app_running = self.simulation.invoke(self.uuid, "isAppRunning")
- def _configure_traces(self):
- # Waiting until dce application is actually started
- is_running = False
- for i in xrange(200):
- is_running = self.simulation.invoke(self.uuid, "isAppRunning")
- is_finished = self.simulation.invoke(SIMULATOR_UUID, "isFinished")
-
- if is_running or is_finished:
- break
+ if is_app_running or self.simulation.state > ResourceState.STARTED:
+ super(NS3BaseApplication, self).do_start()
+ self._start_time = self.simulation.start_time
else:
- time.sleep(1)
- else:
- if not is_running:
- msg = " Application did not start"
- self.error(msg)
- raise RuntimeError
+ # Reschedule until dce application is actually started
+ self.debug("---- RESCHEDULING START ----" )
+ self.ec.schedule(self.reschedule_delay, self.start)
+
+ def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
+ self._configure_traces()
+ return super(NS3BaseDceApplication, self).trace(name, attr = attr,
+ block = block, offset = offset)
+
+ def _configure_traces(self):
+ if self.pid is not None:
+ return
# Using lock to prevent concurrent access to the DceApplicationHelper
# from different DceApplication RMs
with self.dce_application_lock:
- pid = self.simulation.invoke(self.dce_application_helper_uuid,
+ self._pid = self.simulation.invoke(self.dce_application_helper_uuid,
"GetPid", self.uuid)
-
- node_id = self.simulation.invoke(self.node.uuid, "GetId")
- self._trace_filename["stdout"] = "files-%s/var/log/%s/stdout" % (node_id, pid)
- self._trace_filename["stderr"] = "files-%s/var/log/%s/stderr" % (node_id, pid)
- self._trace_filename["status"] = "files-%s/var/log/%s/status" % (node_id, pid)
- self._trace_filename["cmdline"] = "files-%s/var/log/%s/cmdline" % (node_id, pid)
+
+ node_id = self.node.node_id
+ self._trace_filename["stdout"] = "files-%s/var/log/%s/stdout" % (node_id, self.pid)
+ self._trace_filename["stderr"] = "files-%s/var/log/%s/stderr" % (node_id, self.pid)
+ self._trace_filename["status"] = "files-%s/var/log/%s/status" % (node_id, self.pid)
+ self._trace_filename["cmdline"] = "files-%s/var/log/%s/cmdline" % (node_id, self.pid)
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.resource import clsinit_copy
+from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
+
+@clsinit_copy
+class NS3BaseFdNetDevice(NS3BaseNetDevice):
+ _rtype = "abstract::ns3::FdNetDevice"
+
+ @property
+ def _rms_to_wait(self):
+ rms = set([self.node])
+ return rms
+
+ def _configure_mac_address(self):
+ # The wifimac is the one responsible for
+ # configuring the MAC address
+ pass
+
+ def _connect_object(self):
+ node = self.node
+ if node and node.uuid not in self.connected:
+ self.simulation.invoke(node.uuid, "AddDevice", self.uuid)
+ self._connected.add(node.uuid)
+
+ def _instantiate_object(self):
+ """ just validate that the simulator is in real time
+ mode, otherwise it is not going to work
+ """
+
+ mode = self.simulation.get("simulatorImplementationType")
+ if mode != "ns3::RealtimeSimulatorImpl":
+ msg = "The simulation must run in real time!!"
+ self.error(msg)
+ raise RuntimeError, msg
+
+ super(NS3BaseFdNetDevice, self)._instantiate_object()
+
+ def send_fd(self, fd):
+ import passfd
+ import socket
+
+ address = self.simulation.invoke(self.uuid, "recvFD")
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ sock.connect(address)
+ passfd.sendfd(sock, fd, '0')
+
classname = "WimaxHelper"
elif rtype == "ns3::WifiNetDevice":
classname = "YansWifiPhyHelper"
+ elif rtype == "ns3::FdNetDevice":
+ classname = "FdNetDeviceHelper"
self._device_helper_uuid = self.simulation.create(classname)
class NS3BaseNode(NS3Base):
_rtype = "abstract::ns3::Node"
+ def __init__(self, ec, guid):
+ super(NS3BaseNode, self).__init__(ec, guid)
+ self._simulation = None
+ self._node_id = None
+ self._ipv4 = None
+ self._arp = None
+ self._mobility = None
+ self._devices = None
+ self._dceapplications = None
+
@classmethod
def _register_attributes(cls):
enablestack = Attribute("enableStack",
@property
def simulation(self):
- from nepi.resources.ns3.ns3simulation import NS3Simulation
- for guid in self.connections:
- rm = self.ec.get_resource(guid)
- if isinstance(rm, NS3Simulation):
- return rm
-
- msg = "Node not connected to simulation"
- self.error(msg)
- raise RuntimeError, msg
-
+ if not self._simulation:
+ from nepi.resources.ns3.ns3simulation import NS3Simulation
+ for guid in self.connections:
+ rm = self.ec.get_resource(guid)
+ if isinstance(rm, NS3Simulation):
+ self._simulation = rm
+
+ if not self._simulation:
+ msg = "Node not connected to simulation"
+ self.error(msg)
+ raise RuntimeError, msg
+
+ return self._simulation
+
@property
def ipv4(self):
- from nepi.resources.ns3.ns3ipv4l3protocol import NS3BaseIpv4L3Protocol
- ipv4s = self.get_connected(NS3BaseIpv4L3Protocol.get_rtype())
- if ipv4s: return ipv4s[0]
- return None
+ if not self._ipv4:
+ from nepi.resources.ns3.ns3ipv4l3protocol import NS3BaseIpv4L3Protocol
+ ipv4s = self.get_connected(NS3BaseIpv4L3Protocol.get_rtype())
+ if ipv4s:
+ self._ipv4 = ipv4s[0]
+
+ return self._ipv4
@property
def arp(self):
- from nepi.resources.ns3.ns3arpl3protocol import NS3BaseArpL3Protocol
- arps = self.get_connected(NS3BaseArpL3Protocol.get_rtype())
- if arps: return arps[0]
- return None
+ if not self._arp:
+ from nepi.resources.ns3.ns3arpl3protocol import NS3BaseArpL3Protocol
+ arps = self.get_connected(NS3BaseArpL3Protocol.get_rtype())
+ if arps:
+ self._arp = arps[0]
+
+ return self._arp
@property
def mobility(self):
- from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
- mobility = self.get_connected(NS3BaseMobilityModel.get_rtype())
- if mobility: return mobility[0]
- return None
+ if not self._mobility:
+ from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
+ mobility = self.get_connected(NS3BaseMobilityModel.get_rtype())
+ if mobility:
+ self._mobility = mobility[0]
+
+ return self._mobility
@property
def devices(self):
- from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
- devices = self.get_connected(NS3BaseNetDevice.get_rtype())
+ if not self._devices:
+ from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
+ devices = self.get_connected(NS3BaseNetDevice.get_rtype())
- if not devices:
- msg = "Node not connected to devices"
- self.error(msg)
- raise RuntimeError, msg
+ if not devices:
+ msg = "Node not connected to devices"
+ self.error(msg)
+ raise RuntimeError, msg
- return devices
+ self._devices = devices
+
+ return self._devices
+
+ @property
+ def node_id(self):
+ return self._node_id
@property
def dceapplications(self):
- from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
- dceapplications = self.get_connected(NS3BaseDceApplication.get_rtype())
+ if not self._dceapplications:
+ from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
+ self._dceapplications = self.get_connected(NS3BaseDceApplication.get_rtype())
- return dceapplications
+ return self._dceapplications
@property
def _rms_to_wait(self):
if self.get("enableStack"):
uuid_stack_helper = self.simulation.create("InternetStackHelper")
self.simulation.invoke(uuid_stack_helper, "Install", self.uuid)
+
+ # Retrieve IPV4 object
+ ipv4_uuid = self.simulation.invoke(self.uuid, "retrieveObject",
+ "ns3::Ipv4L3Protocol")
+
+ # Add IPv4 RM to the node
+ ipv4 = self.ec.register_resource("ns3::Ipv4L3Protocol")
+ self.ec.register_connection(self.guid, ipv4)
+ ipv4rm = self.ec.get_resource(ipv4)
+ ipv4rm._uuid = ipv4_uuid
+ ipv4rm.set_started()
else:
### node.AggregateObject(PacketSocketFactory())
uuid_packet_socket_factory = self.simulation.create("PacketSocketFactory")
self.simulation.invoke(self.uuid, "AggregateObject", uuid_packet_socket_factory)
+ self._node_id = self.simulation.invoke(self.uuid, "GetId")
+
dceapplications = self.dceapplications
if dceapplications:
self._add_dce(dceapplications)
def _connect_object(self):
- ipv4 = self.ipv4
- if ipv4:
- self.simulation.invoke(self.uuid, "AggregateObject", ipv4.uuid)
- self._connected.add(ipv4.uuid)
- ipv4._connected.add(self.uuid)
-
- arp = self.arp
- if arp:
- self.simulation.invoke(self.uuid, "AggregateObject", arp.uuid)
- self._connected.add(arp.uuid)
- arp._connected.add(self.uuid)
+ if not self.get("enableStack"):
+ ipv4 = self.ipv4
+ if ipv4:
+ self.simulation.invoke(self.uuid, "AggregateObject", ipv4.uuid)
+ self._connected.add(ipv4.uuid)
+ ipv4._connected.add(self.uuid)
+
+ arp = self.arp
+ if arp:
+ self.simulation.invoke(self.uuid, "AggregateObject", arp.uuid)
+ self._connected.add(arp.uuid)
+ arp._connected.add(self.uuid)
mobility = self.mobility
if mobility:
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2014 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.resource import clsinit_copy
+from nepi.resources.ns3.ns3base import NS3Base
+
+import socket
+
+@clsinit_copy
+class NS3BasePipeChannel(NS3Base):
+ """ Interconnects two FdNetDevices with a PIPE
+ """
+ _rtype = "ns3::PipeChannel"
+
+ def __init__(self, ec, guid):
+ super(NS3BasePipeChannel, self).__init__(ec, guid)
+ self._devices = None
+
+ @property
+ def devices(self):
+ if not self._devices:
+ from nepi.resources.ns3.ns3fdnetdevice import NS3BaseFdNetDevice
+ devices = self.get_connected(NS3BaseFdNetDevice.get_rtype())
+ if not devices or len(devices) != 2:
+ msg = "PipeChannel must be connected to exactly to two FdNetDevices"
+ self.error(msg)
+ raise RuntimeError, msg
+
+ self._devices = devices
+
+ return self._devices
+
+ @property
+ def node(self):
+ return self.devices[0].node
+
+ @property
+ def _rms_to_wait(self):
+ rms = set(self.devices)
+ return rms
+
+ def _instantiate_object(self):
+ """ The pipe channel does not really exists as an ns-3 object.
+ Do nothing.
+ """
+ pass
+
+ def _connect_object(self):
+ dev1 = self.devices[0]
+ dev2 = self.devices[1]
+
+ if dev1.uuid not in self.connected and dev2.uuid not in self.connected:
+ (s0, s1) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+
+ dev1.send_fd(s0)
+
+ self._connected.add(dev1.uuid)
+ dev1._connected.add(self.uuid)
+
+ dev2.send_fd(s1)
+
+ self._connected.add(dev2.uuid)
+ dev2._connected.add(self.uuid)
+
+
return "FLUSHED"
-def create_socket(socket_name):
+def open_socket(socket_name):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(socket_name)
return sock
+def close_socket(sock):
+ try:
+ sock.close()
+ except:
+ pass
+
def recv_msg(conn):
msg = []
chunk = ''
ns3_wrapper.logger.info("STARTING...")
# create unix socket to receive instructions
- sock = create_socket(socket_name)
+ sock = open_socket(socket_name)
sock.listen(0)
# wait for messages to arrive and process them
(msg_type, args, kwargs) = recv_msg(conn)
except socket.timeout, e:
# Ingore time-out
+ close_socket(conn)
continue
if not msg_type:
# Ignore - connection lost
- break
+ close_socket(conn)
+ continue
if msg_type == NS3WrapperMessage.SHUTDOWN:
stop = True
import traceback
err = traceback.format_exc()
ns3_wrapper.logger.error(err)
+ close_socket(conn)
raise
try:
send_reply(conn, reply)
except socket.error:
- break
+ import traceback
+ err = traceback.format_exc()
+ ns3_wrapper.logger.error(err)
+ close_socket(conn)
+ raise
+ close_socket(conn)
+
+ close_socket(sock)
+
ns3_wrapper.logger.info("EXITING...")
if __name__ == '__main__':
@property
def _rms_to_wait(self):
- rms = set()
-
- node = self.node
- rms.add(node)
-
- ipv4 = node.ipv4
- if node.ipv4:
- rms.add(ipv4)
-
+ rms = set([self.node, self.node.ipv4])
return rms
def _configure_mac_address(self):
@property
def is_running(self):
- return self._started and not self.ns3.Simulator.IsFinished()
+ return self.is_started and not self.ns3.Simulator.IsFinished()
+
+ @property
+ def is_started(self):
+ if not self._started:
+ now = self.ns3.Simulator.Now()
+ if not now.IsZero():
+ self._started = True
+
+ return self._started
@property
def is_finished(self):
### DEBUG
self.logger.debug("FACTORY %s( %s )" % (type_name, str(kwargs)))
+ ### DUMP
self.debuger.dump_factory(uuid, type_name, kwargs)
- ########
factory = self.ns3.ObjectFactory()
factory.SetTypeId(type_name)
### DEBUG
self.logger.debug("RET FACTORY ( uuid %s ) %s = %s( %s )" % (
str(uuid), str(obj), type_name, str(kwargs)))
- ########
return uuid
### DEBUG
self.logger.debug("CREATE %s( %s )" % (clazzname, str(args)))
+ ### DUMP
self.debuger.dump_create(uuid, clazzname, args)
- ########
clazz = getattr(self.ns3, clazzname)
### DEBUG
self.logger.debug("RET CREATE ( uuid %s ) %s = %s( %s )" % (str(uuid),
str(obj), clazzname, str(args)))
- ########
return uuid
if operation == "isRunning":
result = self.is_running
+
+ elif operation == "isStarted":
+ result = self.is_started
+
elif operation == "isFinished":
result = self.is_finished
+
elif operation == "isAppRunning":
result = self._is_app_running(uuid)
- elif operation == "addStaticRoute":
- ### DEBUG
- self.debuger.dump_add_static_route(uuid, args)
- ########
+ elif operation == "recvFD":
+ ### passFD operation binds to a different random socket
+ ### en every execution, so the socket name that could be
+ ### dumped to the debug script using dump_invoke is
+ ### not be valid accross debug executions.
+ result = self._recv_fd(uuid, *args, **kwargs)
+
+ elif operation == "addStaticRoute":
result = self._add_static_route(uuid, *args)
+
+ ### DUMP - result is static, so will be dumped as plain text
+ self.debuger.dump_invoke(result, uuid, operation, args, kwargs)
+
+ elif operation == "retrieveObject":
+ result = self._retrieve_object(uuid, *args, **kwargs)
+
+ ### DUMP - result is static, so will be dumped as plain text
+ self.debuger.dump_invoke(result, uuid, operation, args, kwargs)
+
else:
newuuid = self.make_uuid()
- ### DEBUG
+ ### DUMP - result is a uuid that encoded an dynamically generated
+ ### object
self.debuger.dump_invoke(newuuid, uuid, operation, args, kwargs)
- ########
if uuid.startswith(SINGLETON):
obj = self._singleton(uuid)
### DEBUG
self.logger.debug("SET %s %s %s" % (uuid, name, str(value)))
+ ### DUMP
self.debuger.dump_set(uuid, name, value)
- ########
obj = self.get_object(uuid)
type_name = obj.GetInstanceTypeId().GetName()
### DEBUG
self.logger.debug("RET SET %s = %s -> set(%s, %s)" % (str(value), uuid, name,
str(value)))
- ########
return value
### DEBUG
self.logger.debug("GET %s %s" % (uuid, name))
+ ### DUMP
self.debuger.dump_get(uuid, name)
- ########
obj = self.get_object(uuid)
type_name = obj.GetInstanceTypeId().GetName()
### DEBUG
self.logger.debug("RET GET %s = %s -> get(%s)" % (str(result), uuid, name))
- ########
return result
def start(self):
- ### DEBUG
+ ### DUMP
self.debuger.dump_start()
- ########
# Launch the simulator thread and Start the
# simulator in that thread
args = [self._condition])
self._simulator_thread.setDaemon(True)
self._simulator_thread.start()
- self._started = True
-
+
### DEBUG
self.logger.debug("START")
- ########
def stop(self, time = None):
- ### DEBUG
+ ### DUMP
self.debuger.dump_stop(time=time)
- ########
if time is None:
self.ns3.Simulator.Stop()
### DEBUG
self.logger.debug("STOP time=%s" % str(time))
- ########
def shutdown(self):
- ### DEBUG
+ ### DUMP
self.debuger.dump_shutdown()
- ########
while not self.ns3.Simulator.IsFinished():
#self.logger.debug("Waiting for simulation to finish")
### DEBUG
self.logger.debug("SHUTDOWN")
- ########
def _simulator_run(self, condition):
# Run simulation
return realkwargs
- def _is_app_running(self, uuid):
+ def _is_app_running(self, uuid):
now = self.ns3.Simulator.Now()
if now.IsZero():
return False
+ if self.ns3.Simulator.IsFinished():
+ return False
+
app = self.get_object(uuid)
stop_time_value = self.ns3.TimeValue()
app.GetAttribute("StopTime", stop_time_value)
app.GetAttribute("StartTime", start_time_value)
start_time = start_time_value.Get()
- if now.Compare(start_time) >= 0 and now.Compare(stop_time) < 0:
- return True
+ if now.Compare(start_time) >= 0:
+ if stop_time.IsZero() or now.Compare(stop_time) < 0:
+ return True
return False
return ifindex
return ifindex
+ def _retrieve_object(self, uuid, typeid, search = False):
+ obj = self.get_object(uuid)
+
+ type_id = self.ns3.TypeId()
+ tid = type_id.LookupByName(typeid)
+ nobj = obj.GetObject(tid)
+
+ newuuid = None
+ if search:
+ # search object
+ for ouuid, oobj in self._objects.iteritems():
+ if nobj == oobj:
+ newuuid = ouuid
+ break
+ else:
+ newuuid = self.make_uuid()
+ self._objects[newuuid] = nobj
+
+ return newuuid
+
+ def _recv_fd(self, uuid):
+ """ Waits on a local address to receive a file descriptor
+ from a local process. The file descriptor is associated
+ to a FdNetDevice to stablish communication between the
+ simulation and what ever process writes on that file descriptor
+ """
+
+ def recvfd(sock, fdnd):
+ (fd, msg) = passfd.recvfd(sock)
+ # Store a reference to the endpoint to keep the socket alive
+ fdnd.SetFileDescriptor(fd)
+
+ import passfd
+ import socket
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ sock.bind("")
+ address = sock.getsockname()
+
+ fdnd = self.get_object(uuid)
+ t = threading.Thread(target=recvfd, args=(sock,fdnd))
+ t.start()
+
+ return address
+
+
command = "wrapper.shutdown()\n\n"
self.dump_to_script(command)
- def dump_add_static_route(self, uuid, args):
- if not self.enabled:
- return
-
- command = ("args = %(args)s\n"
- "wrapper._add_static_route(%(uuid)s, *args)\n\n"
- ) % dict({
- "uuid": self.format_value(uuid),
- "args": self.format_args(args),
- })
-
- self.dump_to_script(command)
-
def format_value(self, value):
if isinstance(value, str) and value.startswith("uuid"):
return value.replace("-", "")
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+# Instructions to automatically generate ns-3 ResourceManagers
#
-# Instructions. Run with:
+# Configure the ns-3 enviorment (e.g.):
+#
+# export PYTHONPATH=~/.nepi/nepi-usr/bin/ns-3/ns-3.20/optimized/build/lib/python/site-packages
+# export LD_LIBRARY_PATH=~/.nepi/nepi-usr/bin/ns-3/ns-3.20/optimized/build/lib
+#
+# Run the RM generator:
#
# PYTHONPATH=$PYTHONPATH:~/repos/nepi/src python src/nepi/resources/ns3/resource_manager_generator.py
#
"ns3::ErrorModel",
"ns3::ErrorRateModel",
"ns3::Application",
+ "ns3::FdNetDevice",
#"ns3::DceApplication",
"ns3::NetDevice",
"ns3::Channel",
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
<BASE_CLASS_IMPORT>
@clsinit_copy
import os
+from nepi.util.timefuncs import tnow
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
+from nepi.execution.trace import Trace, TraceAttr
from nepi.execution.attribute import Attribute, Flags
from nepi.resources.omf.omf_resource import ResourceGateway, OMFResource
from nepi.resources.omf.node import OMFNode, confirmation_counter, reschedule_check
:type guid: int
"""
- _rtype = "OMFApplication"
- _authorized_connections = ["OMFNode", "WilabtSfaNode"]
+ _rtype = "omf::Application"
+ _authorized_connections = ["omf::Node", "wilabt::sfa::Node"]
@classmethod
def _register_attributes(cls):
self.release_id = None
self._release_cnt = 0
+ # For performance tests
+ self.begin_deploy_time = None
+ self.begin_start_time = None
+ self.begin_release_time = None
+ self.dperf = True
+ self.sperf = True
+ self.rperf = True
+
self.add_set_hook()
def _init_command(self):
if not self.node or self.node.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s "
% self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return
+ ## For performance test
+ if self.dperf:
+ self.begin_deploy_time = tnow()
+ self.dperf = False
+
self._init_command()
self.set('xmppUser',self.node.get('xmppUser'))
self.get('xmppPassword'), exp_id = self.exp_id)
if self.get('version') == "5":
+
+ self.begin_deploy_time = tnow()
+
if self.get('sources'):
gateway = ResourceGateway.AMtoGateway[self.get('xmppServer')]
user = self.get('sshUser') or self.get('xmppUser')
return uid
return False
+ def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
+ self.info("Retrieving '%s' trace %s " % (name, attr))
+ if name == 'stdout' :
+ suffix = '.out'
+ elif name == 'stderr' :
+ suffix = '.err'
+ else :
+ suffix = '.misc'
+
+ trace_path = '/tmp/'+ self._topic_app + suffix
+
+ if attr == TraceAttr.PATH:
+ return trace_path
+
+ if attr == TraceAttr.ALL:
+ try:
+ f = open(trace_path ,'r')
+ except IOError:
+ print "File with traces has not been found"
+ return False
+ out = f.read()
+ f.close()
+ return out
+
+
def do_start(self):
""" Start the RM. It means : Send Xmpp Message Using OMF protocol
to execute the application.
"""
+ ## For performance test
+ if self.sperf:
+ self.begin_start_time = tnow()
+ self.sperf = False
if not self.get('env'):
self.set('env', " ")
if self.get('version') == "5":
+ self.begin_start_time = tnow()
# Some information to check the command for OMF5
msg = " " + self.get_rtype() + " ( Guid : " + str(self._guid) +") : " + \
self.get('appid') + " : " + self._path + " : " + \
State is set to STOPPED after the message is sent.
"""
+
+
if self.get('version') == 5:
self._omf_api.exit(self.node.get('hostname'),self.get('appid'))
super(OMFApplication, self).do_stop()
""" Clean the RM at the end of the experiment and release the API.
"""
+ ## For performance test
+ if self.rperf:
+ self.begin_release_time = tnow()
+ self.rperf = False
+
if self._omf_api:
if self.get('version') == "6" and self._topic_app:
if not self.release_id:
msg = "Couldn't retrieve the confirmation of the release"
self.error(msg)
+ # Remove the stdout and stderr of the application
+ try:
+ os.remove('/tmp/'+self._topic_app +'.out')
+ os.remove('/tmp/'+self._topic_app +'.err')
+ except OSError:
+ pass
OMFAPIFactory.release_api(self.get('version'),
self.get('xmppServer'), self.get('xmppUser'), self.get('xmppPort'),
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
# Julien Tribino <julien.tribino@inria.fr>
+from nepi.util.timefuncs import tnow
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.execution.attribute import Attribute, Flags
from nepi.resources.omf.omf_resource import ResourceGateway, OMFResource
:type creds: dict
"""
- _rtype = "OMFChannel"
- _authorized_connections = ["OMFWifiInterface", "OMFNode"]
+ _rtype = "omf::Channel"
+ _authorized_connections = ["omf::WifiInterface", "omf::Node"]
ChannelToFreq = dict({
"1" : "2412",
self._omf_api = None
+ # For performance tests
+ self.perf = True
+ self.begin_deploy_time = None
+
+
@property
def exp_id(self):
return self.ec.exp_id
rm_iface = self.ec.get_resource(elt)
for conn in rm_iface.connections:
rm_node = self.ec.get_resource(conn)
- if rm_node.get_rtype() == "OMFNode" and rm_node.get('hostname'):
+ if rm_node.get_rtype() == "omf::Node" and rm_node.get('hostname'):
if rm_iface.state < ResourceState.PROVISIONED or \
rm_node.state < ResourceState.READY:
return "reschedule"
using OMF 5.4 or 6 protocol to configure the channel.
"""
+
+ ## For performance test
+ if self.perf:
+ self.begin_deploy_time = tnow()
+ self.perf = False
+
if not self.get('channel'):
msg = "Channel's value is not initialized"
self.error(msg)
super(OMFChannel, self).do_deploy()
return
-
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
self._nodes_guid = self._get_target(self._connections)
-
-
if self._nodes_guid == "reschedule" :
self.ec.schedule("1s", self.deploy)
else:
# Julien Tribino <julien.tribino@inria.fr>
import os, time
+from nepi.util.timefuncs import tnow
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.execution.attribute import Attribute, Flags
from nepi.resources.omf.node import OMFNode, confirmation_counter, reschedule_check
:type guid: int
"""
- _rtype = "OMFWifiInterface"
- _authorized_connections = ["OMFNode" , "OMFChannel", "WilabtSfaNode"]
+ _rtype = "omf::WifiInterface"
+ _authorized_connections = ["omf::Node" , "omf::Channel", "wilabt::sfa::Node"]
@classmethod
def _register_attributes(cls):
self._omf_api = None
self._type = ""
+ # For performance tests
+ self.perf = True
+ self.begin_deploy_time = None
def valid_connection(self, guid):
""" Check if the connection with the guid in parameter is possible.
"""
if self.node.state < ResourceState.READY:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return False
for attrname in ["mode", "type", "essid"]:
"""
if self.channel.state < ResourceState.READY:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return False
attrval = self.get("ip")
if not self.node or self.node.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s "
% self.node.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return
if not self.channel or self.channel.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- channel state %s "
% self.channel.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return
+ ## For performance test
+ if self.perf:
+ self.begin_deploy_time = tnow()
+ self.perf = False
+
self.set('xmppUser',self.node.get('xmppUser'))
self.set('xmppServer',self.node.get('xmppServer'))
self.set('xmppPort',self.node.get('xmppPort'))
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
# Julien Tribino <julien.tribino@inria.fr>
+from nepi.util.timefuncs import tnow
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.execution.attribute import Attribute, Flags
from nepi.resources.omf.omf_resource import ResourceGateway, OMFResource
from nepi.resources.omf.omf_api_factory import OMFAPIFactory
import time
-reschedule_check = "0.2s"
-confirmation_counter = 600
+reschedule_check = "1s"
+confirmation_counter = 3600
@clsinit_copy
class OMFNode(OMFResource):
:type creds: dict
"""
- _rtype = "OMFNode"
- _authorized_connections = ["OMFApplication" , "OMFWifiInterface"]
+ _rtype = "omf::Node"
+ _authorized_connections = ["omf::Application" , "omf::WifiInterface"]
@classmethod
def _register_attributes(cls):
self._omf_api = None
+ # For performance tests
+ self.perf = True
+ self.begin_deploy_time = None
+
+
@property
def exp_id(self):
return self.ec.exp_id
to enroll the node into the experiment.
"""
+ ## For performance test
+ if self.perf:
+ self.begin_deploy_time = tnow()
+ self.perf = False
+
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
if rm_list:
for rm in rm_list:
if rm.state < ResourceState.RELEASED:
- self.ec.schedule(reschedule_delay, self.release)
+ self.ec.schedule(self.reschedule_delay, self.release)
return
from nepi.resources.omf.interface import OMFWifiInterface
if rm_list:
for rm in rm_list:
if rm.state < ResourceState.RELEASED:
- self.ec.schedule(reschedule_delay, self.release)
+ self.ec.schedule(self.reschedule_delay, self.release)
return
if self._omf_api:
def check_ready(self, xmpp):
delay = 1.0
- for i in xrange(10):
+ for i in xrange(15):
if xmpp.ready:
break
else:
""" Delete the session and logger topics. Then disconnect
"""
+ # To receive the last messages
+ time.sleep(2)
+
self._client.delete(self._nepi_topic)
# Wait the send queue to be empty before disconnect
from nepi.util.logger import Logger
+import os
import traceback
import xml.etree.ElementTree as ET
"""
super(OMF6Parser, self).__init__("OMF6API")
self.mailbox={}
+ self.traces={}
+ self.trace='NULL'
self.init_mailbox()
+
def init_mailbox(self):
self.mailbox['create'] = []
self.mailbox['started'] = []
"""
props = self._check_for_props(root, namespaces)
uid = self._check_for_tag(root, namespaces, "uid")
- msg = "STATUS -- "
+ event = self._check_for_tag(root, namespaces, "event")
+
+ log = "STATUS -- "
for elt in props.keys():
ns, tag = elt.split('}')
if tag == "it":
- msg = msg + "membership : " + props[elt]+" -- "
+ log = log + "membership : " + props[elt]+" -- "
elif tag == "event":
self.mailbox['started'].append(uid)
- msg = msg + "event : " + props[elt]+" -- "
+ log = log + "event : " + props[elt]+" -- "
+ elif tag == "msg":
+ if event == "STDOUT" :
+ filename = os.path.join("/tmp", "%s.out" % uid)
+ f = open(filename,'a+')
+ # XXX: Adding fake \n for visual formatting
+ msg = props[elt] + "\n"
+ f.write(msg)
+ f.close()
+ elif event == "STDERR" :
+ filename = os.path.join("/tmp", "%s.err" % uid)
+ f = open(filename,'a+')
+ # XXX: Adding fake \n for visual formatting
+ msg = props[elt] + "\n"
+ f.write(msg)
+ f.close()
+ log = log + tag +" : " + props[elt]+" -- "
else:
- msg = msg + tag +" : " + props[elt]+" -- "
- msg = msg + " STATUS "
- self.info(msg)
+ log = log + tag +" : " + props[elt]+" -- "
+ log = log + " STATUS "
+ self.info(log)
def _inform_released(self, root, namespaces):
""" Parse and Display RELEASED message
if cls._apis[key]['cnt'] == 0:
omf_api = cls._apis[key]['api']
omf_api.disconnect()
-
+ del cls._apis[key]
@classmethod
def _make_key(cls, *args):
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
class ResourceGateway:
Generic resource gathering XMPP credential information and common methods
for OMF nodes, channels, applications, etc.
"""
- _rtype = "OMFResource"
+ _rtype = "abstract::omf::Resource"
@classmethod
def _register_attributes(cls):
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.omf.node import OMFNode
from nepi.util.sfaapi import SFAAPIFactory
from nepi.util.execfuncs import lexec
@clsinit_copy
class WilabtSfaNode(OMFNode):
- _rtype = "WilabtSfaNode"
+ _rtype = "wilabt::sfa::Node"
_help = "Controls a Wilabt host accessible using a SSH key " \
"and provisioned using SFA"
_backend = "omf"
host = Attribute("host", "Name of the physical machine",
flags = Flags.Design)
- #disk_image = Attribute("disk_image", "Specify a specific disk image for a node",
- # flags = Flags.Design)
+ disk_image = Attribute("disk_image", "Specify a specific disk image for a node",
+ flags = Flags.Design)
cls._register_attribute(username)
cls._register_attribute(identity)
cls._register_attribute(gateway_user)
cls._register_attribute(gateway)
cls._register_attribute(host)
- #cls._register_attribute(disk_image)
+ cls._register_attribute(disk_image)
def __init__(self, ec, guid):
super(WilabtSfaNode, self).__init__(ec, guid)
Based on the attributes defined by the user, discover the suitable
node for provision.
"""
- if self._skip_provision():
- super(WilabtSfaNode, self).do_discover()
- return
-
nodes = self.sfaapi.get_resources_hrn()
host = self._get_host()
Add node to user's slice and verifing that the node is functioning
correctly. Check ssh, omf rc running, hostname, file system.
"""
- if self._skip_provision():
- super(WilabtSfaNode, self).do_provision()
- return
-
provision_ok = False
ssh_ok = False
proc_ok = False
while not provision_ok:
node = self._node_to_provision
- if self._slicenode:
- self._delete_from_slice()
- self.debug("Waiting 300 sec for re-adding to slice")
- time.sleep(300) # Timout for the testbed to allow a new reservation
+ #if self._slicenode:
+ # self._delete_from_slice()
+ # self.debug("Waiting 480 sec for re-adding to slice")
+ # time.sleep(480) # Timout for the testbed to allow a new reservation
self._add_node_to_slice(node)
t = 0
while not self._check_if_in_slice([node]) and t < timeout \
"""
self.info(" Adding node to slice ")
slicename = self.get("slicename")
- #disk_image = self.get("disk_image")
- #if disk_image is not None:
- # properties = {'disk_image': disk_image}
- #else: properties = None
- properties = None
+ disk_image = self.get("disk_image")
+ if disk_image is not None:
+ properties = {'disk_image': disk_image}
+ else: properties = None
+ #properties = None
self.sfaapi.add_resource_to_slice_batch(slicename, host_hrn, properties=properties)
def _delete_from_slice(self):
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.node import LinuxNode
from nepi.resources.planetlab.plcapi import PLCAPIFactory
from nepi.util.execfuncs import lexec
@clsinit_copy
class PlanetlabNode(LinuxNode):
- _rtype = "PlanetlabNode"
+ _rtype = "planetlab::Node"
_help = "Controls a PlanetLab host accessible using a SSH key " \
"associated to a PlanetLab user account"
_backend = "planetlab"
default = False,
flags = Flags.Global)
-
cls._register_attribute(ip)
cls._register_attribute(pl_url)
cls._register_attribute(pl_ptn)
from nepi.resources.linux.application import LinuxApplication
import os
-reschedule_delay = "0.5s"
-
@clsinit_copy
class OVSSwitch(LinuxApplication):
"""
"""
- _rtype = "OVSSwitch"
+ _rtype = "planetlab::OVSSwitch"
_help = "Runs an OpenVSwitch on a PlanetLab host"
_backend = "planetlab"
- _authorized_connections = ["PlanetlabNode", "OVSPort", "LinuxNode"]
+ _authorized_connections = ["planetlab::Node", "planetla::OVSPort", "linux::Node"]
@classmethod
def _register_attributes(cls):
"""
if not self.node or self.node.state < ResourceState.READY:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return
self.do_discover()
"""
from nepi.resources.planetlab.openvswitch.ovsport import OVSPort
- rm = self.get_connected(OVSPort.get_rtype())
+ rms = self.get_connected(OVSPort.get_rtype())
- if rm[0].state < ResourceState.RELEASED:
- self.ec.schedule(reschedule_delay, self.release)
- return
+ for rm in rms :
+ if rm.state < ResourceState.RELEASED:
+ self.ec.schedule(self.reschedule_delay, self.release)
+ return
cmd = "sliver-ovs del-bridge %s" % self.get('bridge_name')
(out, err), proc = self.node.run(cmd, self.ovs_checks,
from nepi.resources.planetlab.node import PlanetlabNode
from nepi.resources.linux.application import LinuxApplication
-reschedule_delay = "0.5s"
+import os
@clsinit_copy
class OVSPort(LinuxApplication):
"""
- _rtype = "OVSPort"
+ _rtype = "planetlab::OVSPort"
_help = "Runs an OpenVSwitch on a PlanetLab host"
_backend = "planetlab"
- _authorized_connections = ["OVSSwitch", "OVSTunnel"]
+ _authorized_connections = ["planetlab::OVSSwitch", "linux::UdpTunnel", "linux::Tunnel"]
@classmethod
def _register_attributes(cls):
"""
port_name = Attribute("port_name", "Name of the port",
flags = Flags.Design)
+ ip = Attribute("ip", "IP of the endpoint. This is the attribute "
+ "you should use to establish a tunnel or a remote "
+ "connection between endpoint",
+ flags = Flags.Design)
+ network = Attribute("network", "Network used by the port",
+ flags = Flags.Design)
cls._register_attribute(port_name)
+ cls._register_attribute(ip)
+ cls._register_attribute(network)
def __init__(self, ec, guid):
"""
"""
super(OVSPort, self).__init__(ec, guid)
+
+
self._port_number = None
- self.port_info = []
+ # in case of connection by tunnel
+ self._remote_ip = None
def log_message(self, msg):
return " guid %d - OVSPort - %s " % (self.guid, msg)
if ovsswitch: return ovsswitch[0]
return None
+ @property
+ def remote_ip(self):
+ return self._remote_ip
+
@property
def port_number(self):
return self._port_number
self.info("Created the port %s on switch %s" % (self.get('port_name'),
self.ovsswitch.get('bridge_name')))
- def get_local_end(self):
+ def initiate_udp_connection(self, remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
""" Get the local_endpoint of the port
"""
+ self._remote_ip = remote_endpoint.node.get("ip")
+
msg = "Discovering the number of the port %s" % self.get('port_name')
- self.debug(msg)
+ self.info(msg)
command = "sliver-ovs get-local-endpoint %s" % self.get('port_name')
out = err = ""
self.info("The number of the %s is %s" % (self.get('port_name'),
self.port_number))
-
- def set_port_info(self):
- """ Set all the information about the port inside a list
- """
- info = []
- info.append(self.node.get('hostname'))
+ if remote_endpoint.is_rm_instance("planetlab::Tap"):
+ self._vroute = self.ec.register_resource("planetlab::Vroute")
+ self.ec.set(self._vroute, "action", "add")
+ self.ec.set(self._vroute, "network", self.get("network"))
+
+ print "Vroute Guid :" + str(self._vroute)
+
+ self.ec.register_connection(self._vroute, remote_endpoint.guid)
+ self.ec.deploy(guids=[self._vroute], group = self.deployment_group)
+
+ # For debugging
+ msg = "Route for the tap configured"
+ self.debug(msg)
+
+ return self.port_number
+
- #Return the ip of the node
- import socket
- ip = socket.gethostbyname(self.node.get('hostname'))
- info.append(ip)
+ def establish_udp_connection(self,remote_endpoint, port):
+ establish_connection_command = self._establish_connection_command(port)
- info.append(self.get('port_name'))
- info.append(self.ovsswitch.get('virtual_ip_pref'))
- info.append(self.port_number)
- return info
+ # upload command to connect.sh script
+ shfile = os.path.join(self.app_home, "sw-connect.sh")
+ self.node.upload_command(establish_connection_command,
+ shfile = shfile,
+ overwrite = False)
- def switch_connect_command(self, local_port_name,
- remote_ip, remote_port_num):
+ # invoke connect script
+ cmd = "bash %s" % shfile
+ (out, err), proc = self.node.run(cmd, self.run_home,
+ sudo = True,
+ stdout = "sw_stdout",
+ stderr = "sw_stderr")
+
+ # check if execution errors occurred
+ msg = "Failed to connect endpoints "
+ if proc.poll():
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # Wait for pid file to be generated
+ self._pid, self._ppid = self.node.wait_pid(self.run_home)
+
+ # If the process is not running, check for error information
+ # on the remote machine
+ if not self._pid or not self._ppid:
+ (out, err), proc = self.node.check_errors(self.run_home)
+ # Out is what was written in the stderr file
+ if err:
+ msg = " Failed to start command '%s' " % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # For debugging
+ msg = "Connection on port configured"
+ self.debug(msg)
+
+
+ def _establish_connection_command(self, port):
""" Script to create the connection from a switch to a
remote endpoint
"""
+ local_port_name = self.get('port_name')
command = ["sliver-ovs"]
command.append("set-remote-endpoint ")
command.append("%s " % local_port_name)
- command.append("%s " % remote_ip)
- command.append("%s " % remote_port_num)
+ command.append("%s " % self.remote_ip)
+ command.append("%s " % port)
command = " ".join(command)
command = self.replace_paths(command)
return command
+ def verify_connection(self):
+ self.ovsswitch.ovs_status()
+
+ def terminate_connection(self):
+ return True
+
+ def check_status(self):
+ return self.node.status(self._pid, self._ppid)
+
def do_deploy(self):
""" Deploy the OVS port after the OVS Switch
"""
if not self.ovsswitch or self.ovsswitch.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- OVSwitch state %s " % self.ovsswitch.state )
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
return
self.do_discover()
self.do_provision()
self.create_port()
- self.get_local_end()
+ end_ip = self.ovsswitch.get('virtual_ip_pref').split('/')
+ self.set("ip", end_ip[0])
#Check the status of the OVS Switch
self.ovsswitch.ovs_status()
- # Save all the information inside a list
- self.port_info = self.set_port_info()
-
super(OVSPort, self).do_deploy()
def do_release(self):
""" Delete the port on the OVSwitch. It needs to wait for the tunnel
to be released.
"""
+ from nepi.resources.linux.udptunnel import LinuxUdpTunnel
+ rm = self.get_connected(LinuxUdpTunnel.get_rtype())
- from nepi.resources.planetlab.openvswitch.tunnel import OVSTunnel
- rm = self.get_connected(OVSTunnel.get_rtype())
-
- if rm and rm[0].state < ResourceState.RELEASED:
- self.ec.schedule(reschedule_delay, self.release)
+ if rm and rm[0].state < ResourceState.STOPPED:
+ self.ec.schedule(self.reschedule_delay, self.release)
return
cmd = "sliver-ovs del_port %s" % self.get('port_name')
+++ /dev/null
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
-# Alexandros Kouvakas <alexandros.kouvakas@inria.fr>
-# Julien Tribino <julien.tribino@inria.fr>
-
-
-from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import ResourceManager, ResourceFactory, clsinit_copy, \
- ResourceState
-from nepi.resources.linux.application import LinuxApplication
-from nepi.resources.planetlab.node import PlanetlabNode
-from nepi.resources.planetlab.openvswitch.ovs import OVSSwitch
-from nepi.util.timefuncs import tnow, tdiffsec
-from nepi.resources.planetlab.vroute import PlanetlabVroute
-from nepi.resources.planetlab.tap import PlanetlabTap
-
-import os
-import time
-import socket
-
-reschedule_delay = "0.5s"
-
-@clsinit_copy
-class OVSTunnel(LinuxApplication):
- """
- .. class:: Class Args :
-
- :param ec: The Experiment controller
- :type ec: ExperimentController
- :param guid: guid of the RM
- :type guid: int
- :param creds: Credentials to communicate with the rm
- :type creds: dict
-
- """
-
- _rtype = "OVSTunnel"
- _authorized_connections = ["OVSPort", "PlanetlabTap"]
-
- @classmethod
- def _register_attributes(cls):
- """ Register the attributes of OVSTunnel RM
-
- """
- network = Attribute("network", "IPv4 Network Address",
- flags = Flags.Design)
-
- cipher = Attribute("cipher",
- "Cipher to encript communication. "
- "One of PLAIN, AES, Blowfish, DES, DES3. ",
- default = None,
- allowed = ["PLAIN", "AES", "Blowfish", "DES", "DES3"],
- type = Types.Enumerate,
- flags = Flags.Design)
-
- cipher_key = Attribute("cipherKey",
- "Specify a symmetric encryption key with which to protect "
- "packets across the tunnel. python-crypto must be installed "
- "on the system." ,
- flags = Flags.Design)
-
- txqueuelen = Attribute("txQueueLen",
- "Specifies the interface's transmission queue length. "
- "Defaults to 1000. ",
- type = Types.Integer,
- flags = Flags.Design)
-
- bwlimit = Attribute("bwLimit",
- "Specifies the interface's emulated bandwidth in bytes "
- "per second.",
- type = Types.Integer,
- flags = Flags.Design)
-
- cls._register_attribute(network)
- cls._register_attribute(cipher)
- cls._register_attribute(cipher_key)
- cls._register_attribute(txqueuelen)
- cls._register_attribute(bwlimit)
-
- def __init__(self, ec, guid):
- """
- :param ec: The Experiment controller
- :type ec: ExperimentController
- :param guid: guid of the RM
- :type guid: int
-
- """
- super(OVSTunnel, self).__init__(ec, guid)
- self._home = "tunnel-%s" % self.guid
- self.port_info_tunl = []
- self._pid = None
- self._ppid = None
- self._vroute = None
- self._node_endpoint1 = None
- self._node_endpoint2 = None
-
- def log_message(self, msg):
- return " guid %d - Tunnel - %s " % (self.guid, msg)
-
- def app_home(self, node):
- return os.path.join(node.exp_home, self._home)
-
- def run_home(self, node):
- return os.path.join(self.app_home(node), self.ec.run_id)
-
- @property
- def tap(self):
- """ Return the Tap RM if it exists """
- rclass = ResourceFactory.get_resource_type(PlanetlabTap.get_rtype())
- for guid in self.connections:
- rm = self.ec.get_resource(guid)
- if isinstance(rm, rclass):
- return rm
-
- @property
- def ovsswitch(self):
- """ Return the 1st switch """
- for guid in self.connections:
- rm_port = self.ec.get_resource(guid)
- if hasattr(rm_port, "create_port"):
- rm_list = rm_port.get_connected(OVSSwitch.get_rtype())
- if rm_list:
- return rm_list[0]
-
- @property
- def check_switch_host_link(self):
- """ Check if the links are between switches
- or switch-host. Return False for the latter.
- """
- if self.tap :
- return True
- return False
-
-
- def endpoints(self):
- """ Return the list with the two connected elements.
- Either Switch-Switch or Switch-Host
- """
- connected = [1, 1]
- position = 0
- for guid in self.connections:
- rm = self.ec.get_resource(guid)
- if hasattr(rm, "create_port"):
- connected[position] = rm
- position += 1
- elif hasattr(rm, "udp_connect_command"):
- connected[1] = rm
- return connected
-
- def get_node(self, endpoint):
- """ Get the nodes of the endpoint
- """
- rm = []
- if hasattr(endpoint, "create_port"):
- rm_list = endpoint.get_connected(OVSSwitch.get_rtype())
- if rm_list:
- rm = rm_list[0].get_connected(PlanetlabNode.get_rtype())
- else:
- rm = endpoint.get_connected(PlanetlabNode.get_rtype())
-
- if rm :
- return rm[0]
-
- @property
- def endpoint1(self):
- """ Return the first endpoint : Always a Switch
- """
- endpoint = self.endpoints()
- return endpoint[0]
-
- @property
- def endpoint2(self):
- """ Return the second endpoint : Either a Switch or a TAP
- """
- endpoint = self.endpoints()
- return endpoint[1]
-
- def get_port_info(self, endpoint1, endpoint2):
- #TODO : Need to change it. Really bad to have method that return different type of things !!!!!
- """ Retrieve the port_info list for each port
-
- """
- if self.check_switch_host_link :
- host0, ip0, pname0, virt_ip0, pnumber0 = endpoint1.port_info
- return pnumber0
-
- host0, ip0, pname0, virt_ip0, pnumber0 = endpoint1.port_info
- host1, ip1, pname1, virt_ip1, pnumber1 = endpoint2.port_info
-
- return pname0, ip1, pnumber1
-
- def wait_local_port(self, node_endpoint):
- """ Waits until the if_name file for the command is generated,
- and returns the if_name for the device """
-
- local_port = None
- delay = 1.0
-
- #TODO : Need to change it with reschedule to avoid the problem
- # of the order of connection
- for i in xrange(10):
- (out, err), proc = node_endpoint.check_output(self.run_home(node_endpoint), 'local_port')
- if out:
- local_port = int(out)
- break
- else:
- time.sleep(delay)
- delay = delay * 1.5
- else:
- msg = "Couldn't retrieve local_port"
- self.error(msg, out, err)
- raise RuntimeError, msg
-
- return local_port
-
- def connection(self, local_endpoint, rm_endpoint):
- """ Create the connect command for each case :
- - Host - Switch,
- - Switch - Switch,
- - Switch - Host
- """
- local_node = self.get_node(local_endpoint)
- local_node.mkdir(self.run_home(local_node))
-
- rm_node = self.get_node(rm_endpoint)
- rm_node.mkdir(self.run_home(rm_node))
-
- # Host to switch
- if self.check_switch_host_link and local_endpoint == self.endpoint2 :
- # Collect info from rem_endpoint
- remote_ip = socket.gethostbyname(rm_node.get("hostname"))
-
- # Collect info from endpoint
- local_port_file = os.path.join(self.run_home(local_node), "local_port")
- rem_port_file = os.path.join(self.run_home(local_node), "remote_port")
- ret_file = os.path.join(self.run_home(local_node), "ret_file")
- cipher = self.get("cipher")
- cipher_key = self.get("cipherKey")
- bwlimit = self.get("bwLimit")
- txqueuelen = self.get("txQueueLen")
-
- rem_port = str(self.get_port_info(rm_endpoint,local_endpoint))
-
- # Upload the remote port in a file
- local_node.upload(rem_port, rem_port_file,
- text = True,
- overwrite = False)
-
- connect_command = local_endpoint.udp_connect_command(
- remote_ip, local_port_file, rem_port_file,
- ret_file, cipher, cipher_key, bwlimit, txqueuelen)
-
- self.connection_command(connect_command, local_node, rm_node)
-
- # Wait for pid file to be generated
- self._pid, self._ppid = local_node.wait_pid(self.run_home(local_node))
-
- if not self._pid or not self._ppid:
- (out, err), proc = local_node.check_errors(self.run_home(local_node))
- # Out is what was written in the stderr file
- if err:
- msg = " Failed to start connection of the OVS Tunnel "
- self.error(msg, out, err)
- raise RuntimeError, msg
- return
-
- # Switch to Host
- if self.check_switch_host_link and local_endpoint == self.endpoint1:
- local_port_name = local_endpoint.get('port_name')
- remote_port_num = self.wait_local_port(rm_node)
- remote_ip = socket.gethostbyname(rm_node.get("hostname"))
-
- # Switch to Switch
- if not self.check_switch_host_link :
- local_port_name, remote_ip, remote_port_num = self.get_port_info(local_endpoint, rm_endpoint)
-
- connect_command = local_endpoint.switch_connect_command(
- local_port_name, remote_ip, remote_port_num)
-
- self.connection_command(connect_command, local_node, rm_node)
-
- def connection_command(self, command, node_endpoint, rm_node_endpoint):
- """ Execute the connection command on the node and check if the processus is
- correctly running on the node.
- """
- shfile = os.path.join(self.app_home(node_endpoint), "sw_connect.sh")
- node_endpoint.upload(command,
- shfile,
- text = True,
- overwrite = False)
-
- # Invoke connect script
- out = err= ''
- cmd = "bash %s" % shfile
- (out, err), proc = node_endpoint.run(cmd, self.run_home(node_endpoint),
- sudo = True,
- stdout = "sw_stdout",
- stderr = "sw_stderr")
-
- # Check if execution errors occured
-
- if proc.poll():
- msg = "Failed to connect endpoints"
- self.error(msg, out, err)
- raise RuntimeError, msg
-
- # For debugging
- msg = "Connection on port configured"
- self.debug(msg)
-
- def do_provision(self):
- """ Provision the tunnel
- """
-
- #TODO : The order of the connection is important for now !
- # Need to change the code of wait local port
- self.connection(self.endpoint2, self.endpoint1)
- self.connection(self.endpoint1, self.endpoint2)
-
- def configure_route(self):
- """ Configure the route for the tap device
-
- .. note : In case of a conection between a switch and a host, a route
- was missing on the node with the Tap Device. This method create
- the missing route.
- """
-
- if self.check_switch_host_link:
- self._vroute = self.ec.register_resource("PlanetlabVroute")
- self.ec.set(self._vroute, "action", "add")
- self.ec.set(self._vroute, "network", self.get("network"))
-
- self.ec.register_connection(self._vroute, self.tap.guid)
- self.ec.deploy(guids=[self._vroute], group = self.deployment_group)
-
- def do_deploy(self):
- """ Deploy the tunnel after the endpoint get ready
- """
- if (not self.endpoint1 or self.endpoint1.state < ResourceState.READY) or \
- (not self.endpoint2 or self.endpoint2.state < ResourceState.READY):
- self.ec.schedule(reschedule_delay, self.deploy)
- return
-
- self.do_discover()
- self.do_provision()
- self.configure_route()
-
- # Cannot call the deploy of the linux application
- # because of a log error.
- # Need to investigate if it is right that the tunnel
- # inherits from the linux application
- # super(OVSTunnel, self).do_deploy()
- self.set_ready()
-
- def do_release(self):
- """ Release the tunnel by releasing the Tap Device if exists
- """
- if self.check_switch_host_link:
- # TODO: Make more generic Release method of PLTAP
- tap_node = self.get_node(self.endpoint2)
- if self._pid and self._ppid:
- (out, err), proc = tap_node.kill(self._pid,
- self._ppid, sudo = True)
-
- if err or proc.poll():
- msg = " Failed to delete TAP device"
- self.error(msg, out, err)
-
- super(OVSTunnel, self).do_release()
-
import passfd
import socket
import vsys
-from optparse import OptionParser, SUPPRESS_HELP
-
-# TODO: GRE OPTION!! CONFIGURE THE VIF-UP IN GRE MODE!!
+from optparse import OptionParser
STOP_MSG = "STOP"
PASSFD_MSG = "PASSFD"
return "STOP-ACK"
def passfd_action(fd, args):
+ """ Sends the file descriptor associated to the TAP device
+ to another process through a unix socket.
+ """
address = args.pop(0)
print address
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
def get_options():
usage = ("usage: %prog -t <vif-type> -a <ip4-address> -n <net-prefix> "
- "-s <snat> -p <pointopoint> -f <if-name-file> -S <socket-name>")
+ "-s <snat> -p <pointopoint> -q <txqueuelen> -f <vif-name-file> "
+ "-S <socket-name>")
parser = OptionParser(usage = usage)
help = "Peer end point for the interface ", default = None,
type="str")
- parser.add_option("-f", "--if-name-file", dest="if_name_file",
- help = "File to store the interface name assigned by the OS",
- default = "if_name", type="str")
+ parser.add_option("-q", "--txqueuelen", dest="txqueuelen",
+ help = "Size of transmision queue. Defaults to 0.",
+ default = 0,
+ type="int")
+
+ parser.add_option("-f", "--vif-name-file", dest="vif_name_file",
+ help = "File to store the virtual interface name assigned by the OS",
+ default = "vif_name", type="str")
parser.add_option("-S", "--socket-name", dest="socket_name",
help = "Name for the unix socket used to interact with this process",
- default = "tap.sock", type="str")
+ type="str")
(options, args) = parser.parse_args()
if options.vif_type and options.vif_type == "IFF_TUN":
vif_type = vsys.IFF_TUN
- return (vif_type, options.ip4_address, options.net_prefix, options.snat,
- options.pointopoint, options.if_name_file, options.socket_name)
+ return (vif_type, options.ip4_address, options.net_prefix,
+ options.snat, options.pointopoint, options.txqueuelen,
+ options.vif_name_file, options.socket_name)
if __name__ == '__main__':
- (vif_type, ip4_address, net_prefix, snat, pointopoint,
- if_name_file, socket_name) = get_options()
-
- (fd, if_name) = vsys.fd_tuntap(vif_type)
- vsys.vif_up(if_name, ip4_address, net_prefix, snat, pointopoint)
-
- # Saving interface name to 'if_name_file
- f = open(if_name_file, 'w')
- f.write(if_name)
+ (vif_type, ip4_address, net_prefix, snat, pointopoint,
+ txqueuelen, vif_name_file, socket_name) = get_options()
+
+ (fd, vif_name) = vsys.fd_tuntap(vif_type)
+
+ vsys.vif_up(vif_name, ip4_address, net_prefix, snat = snat,
+ pointopoint = pointopoint, txqueuelen = txqueuelen)
+
+ # Saving interface name to vif_name_file
+ f = open(vif_name_file, 'w')
+ f.write(vif_name)
f.close()
# create unix socket to receive instructions
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import base64
+import socket
+import vsys
+
+from optparse import OptionParser
+
+STOP_MSG = "STOP"
+
+def get_options():
+ usage = ("usage: %prog -u <slicename> -N <vif-name> -t <vif-type> "
+ "-D <delete> -S <socket-name>")
+
+ parser = OptionParser(usage = usage)
+
+ parser.add_option("-u", "--slicename", dest="slicename",
+ help = "The name of the PlanetLab slice ",
+ type="str")
+
+ parser.add_option("-N", "--vif-name", dest="vif_name",
+ help = "The name of the virtual interface, or a "
+ "unique numeric identifier to name the interface "
+ "if GRE mode is used.",
+ type="str")
+
+ parser.add_option("-t", "--vif-type", dest="vif_type",
+ help = "Virtual interface type. Either IFF_TAP or IFF_TUN. "
+ "Defaults to IFF_TAP. ", type="str")
+
+ parser.add_option("-D", "--delete", dest="delete",
+ action="store_true",
+ default = False,
+ help="Removes virtual interface if GRE mode was used")
+
+ parser.add_option("-S", "--socket-name", dest="socket_name",
+ help = "Name for the unix socket used to interact with this process",
+ type="str")
+
+ (options, args) = parser.parse_args()
+
+ vif_type = vsys.IFF_TAP
+ if options.vif_type and options.vif_type == "IFF_TUN":
+ vif_type = vsys.IFF_TUN
+
+ return (options.socket_name, options.vif_name, options.slicename,
+ vif_type, options.delete)
+
+if __name__ == '__main__':
+
+ (socket_name, vif_name, slicename, vif_type, delete) = get_options()
+
+ # If a socket name is sent, send the STOP message and wait for a reply
+ if socket_name:
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(socket_name)
+ encoded = base64.b64encode(STOP_MSG)
+ sock.send("%s\n" % encoded)
+ reply = sock.recv(1024)
+ reply = base64.b64decode(reply)
+ print reply
+
+ # If a slicename is provided, use it to remove a GRE device
+ elif slicename:
+ import pwd
+ import getpass
+
+ sliceid = pwd.getpwnam(slicename).pw_uid
+
+ if vif_type == vsys.IFF_TAP:
+ vif_prefix = "tap"
+ else:
+ vif_prefix = "tun"
+
+ # if_name should be a unique numeric vif id
+ vif_name = "%s%s-%s" % (vif_prefix, sliceid, vif_name)
+
+ vsys.vif_down(vif_name, delete = True)
+
+ # Else, use the vsys interface to set the virtual interface down
+ else:
+ vsys.vif_down(vif_name)
+
+
import tunchannel
import vsys
-from optparse import OptionParser, SUPPRESS_HELP
+from optparse import OptionParser
PASSFD_MSG = "PASSFD"
# xxx: There seems to be a weird behavior where
# even if the file exists and had the port number,
# the read operation returns empty string!
- # Maybe a raise condition?
+ # Maybe a race condition?
for i in xrange(10):
f = open(remote_port_file, 'r')
remote_port = f.read()
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import vsys
+
+from optparse import OptionParser
+
+def get_options():
+ usage = ("usage: %prog -u <slicename> -N <vif-name> -t <vif-type> -a <ip4-address> "
+ "-n <net-prefix> -S <snat> -p <pointopoint> -q <txqueuelen> "
+ "-g <gre_key> -G <gre_remote> -f <vif-name-file> ")
+
+ parser = OptionParser(usage = usage)
+
+ parser.add_option("-u", "--slicename", dest="slicename",
+ help = "The name of the PlanetLab slice ",
+ type="str")
+
+ parser.add_option("-N", "--vif-name", dest="vif_name",
+ help = "The name of the virtual interface, or a "
+ "unique numeric identifier to name the interface "
+ "if GRE mode is used.",
+ type="str")
+
+ parser.add_option("-t", "--vif-type", dest="vif_type",
+ help = "Virtual interface type. Either IFF_TAP or IFF_TUN. "
+ "Defaults to IFF_TAP. ", type="str")
+
+ parser.add_option("-a", "--ip4-address", dest="ip4_address",
+ help = "IPv4 address to assign to interface. It must belong to the "
+ "network segment owned by the slice, given by the vsys_vnet tag. ",
+ type="str")
+
+ parser.add_option("-n", "--net-prefix", dest="net_prefix",
+ help = "IPv4 network prefix for the interface. It must be the one "
+ "given by the slice's vsys_vnet tag. ",
+ type="int")
+
+ parser.add_option("-s", "--snat", dest="snat",
+ action="store_true",
+ default = False,
+ help="Enable SNAT for the interface")
+
+ parser.add_option("-p", "--pointopoint", dest="pointopoint",
+ help = "Peer end point for the interface. ",
+ default = None,
+ type="str")
+
+ parser.add_option("-q", "--txqueuelen", dest="txqueuelen",
+ help = "Size of transmision queue. Defaults to 0.",
+ default = 0,
+ type="int")
+
+ parser.add_option("-g", "--gre-key", dest="gre_key",
+ help = "When set, enables GRE mode with the corresponding GRE key.",
+ default = None,
+ type="str")
+
+ parser.add_option("-G", "--gre-remote", dest="gre_remote",
+ help = "Remote endpoint (public IP) for the GRE tunnel.",
+ default = None,
+ type="str")
+
+ parser.add_option("-f", "--vif-name-file", dest="vif_name_file",
+ help = "File to store the virtual interface name assigned by the OS",
+ default = "vif_name", type="str")
+
+ (options, args) = parser.parse_args()
+
+ vif_type = vsys.IFF_TAP
+ if options.vif_type and options.vif_type == "IFF_TUN":
+ vif_type = vsys.IFF_TUN
+
+ return (options.slicename, options.vif_name, vif_type, options.ip4_address,
+ options.net_prefix, options.snat, options.pointopoint,
+ options.txqueuelen, options.gre_key, options.gre_remote,
+ options.vif_name_file)
+
+if __name__ == '__main__':
+
+ (slicename, vif_name, vif_type, ip4_address, net_prefix, snat, pointopoint,
+ txqueuelen, gre_key, gre_remote, vif_name_file) = get_options()
+
+ if (gre_key):
+ import pwd
+ import getpass
+
+ sliceid = pwd.getpwnam(slicename).pw_uid
+
+ if vif_type == vsys.IFF_TAP:
+ vif_prefix = "tap"
+ else:
+ vif_prefix = "tun"
+
+ # if_name should be a unique numeric vif id
+ vif_name = "%s%s-%s" % (vif_prefix, sliceid, vif_name)
+
+ try:
+ vsys.vif_up(vif_name, ip4_address, net_prefix, snat = snat,
+ pointopoint = pointopoint, txqueuelen = txqueuelen,
+ gre_key = gre_key, gre_remote = gre_remote)
+
+ except RuntimeError as e:
+ import sys
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+
+ # Ignore warnings
+ if e.message.find("WARNING:") < 0:
+ sys.exit(1)
+
+ # Saving interface name to vif_name_file
+ f = open(vif_name_file, 'w')
+ f.write(vif_name)
+ f.close()
+
+
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import ResourceManager, clsinit_copy, \
- ResourceState, reschedule_delay
+ ResourceState
from nepi.resources.linux.node import LinuxNode
from nepi.util.sfaapi import SFAAPIFactory
from nepi.util.execfuncs import lexec
@clsinit_copy
class PlanetlabSfaNode(LinuxNode):
- _rtype = "PlanetlabSfaNode"
+ _rtype = "planetlab::sfa::Node"
_help = "Controls a PlanetLab host accessible using a SSH key " \
"and provisioned using SFA"
_backend = "planetlab"
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.planetlab.node import PlanetlabNode
from nepi.util.timefuncs import tnow, tdiffsec
import os
import time
-# TODO: - routes!!!
-# - CREATE GRE - PlanetlabGRE - it only needs to set the gre and remote
-# properties when configuring the vif_up
-
PYTHON_VSYS_VERSION = "1.0"
@clsinit_copy
class PlanetlabTap(LinuxApplication):
- _rtype = "PlanetlabTap"
+ _rtype = "planetlab::Tap"
_help = "Creates a TAP device on a PlanetLab host"
_backend = "planetlab"
@classmethod
def _register_attributes(cls):
- ip4 = Attribute("ip4", "IPv4 Address",
+ ip = Attribute("ip", "IP of the endpoint. This is the attribute "
+ "you should use to establish a tunnel or a remote "
+ "connection between endpoint",
flags = Flags.Design)
mac = Attribute("mac", "MAC Address",
flags = Flags.Design)
- prefix4 = Attribute("prefix4", "IPv4 network prefix",
- type = Types.Integer,
+ prefix = Attribute("prefix", "IPv4 network prefix of the endpoint",
flags = Flags.Design)
mtu = Attribute("mtu", "Maximum transmition unit for device",
pointopoint = Attribute("pointopoint", "Peer IP address",
flags = Flags.Design)
- tear_down = Attribute("tearDown", "Bash script to be executed before " + \
- "releasing the resource",
+ txqueuelen = Attribute("txqueuelen", "Length of transmission queue",
+ flags = Flags.Design)
+
+ txqueuelen = Attribute("txqueuelen", "Length of transmission queue",
+ flags = Flags.Design)
+
+ gre_key = Attribute("greKey",
+ "GRE key to be used to configure GRE tunnel",
+ default = "1",
+ flags = Flags.Design)
+
+ gre_remote = Attribute("greRemote",
+ "Public IP of remote endpoint for GRE tunnel",
+ flags = Flags.Design)
+
+ tear_down = Attribute("tearDown",
+ "Bash script to be executed before releasing the resource",
flags = Flags.Design)
- cls._register_attribute(ip4)
+ cls._register_attribute(ip)
cls._register_attribute(mac)
- cls._register_attribute(prefix4)
+ cls._register_attribute(prefix)
cls._register_attribute(mtu)
cls._register_attribute(devname)
cls._register_attribute(up)
cls._register_attribute(snat)
cls._register_attribute(pointopoint)
+ cls._register_attribute(txqueuelen)
+ cls._register_attribute(gre_key)
+ cls._register_attribute(gre_remote)
cls._register_attribute(tear_down)
def __init__(self, ec, guid):
super(PlanetlabTap, self).__init__(ec, guid)
self._home = "tap-%s" % self.guid
+ self._gre_enabled = False
@property
def node(self):
node = self.get_connected(PlanetlabNode.get_rtype())
if node: return node[0]
- return None
+ raise RuntimeError, "TAP/TUN devices must be connected to Node"
+
+ @property
+ def gre_enabled(self):
+ if not self._gre_enabled:
+ from nepi.resources.linux.gretunnel import LinuxGRETunnel
+ gre = self.get_connected(LinuxGRETunnel.get_rtype())
+ if gre: self._gre_enabled = True
+
+ return self._gre_enabled
def upload_sources(self):
- # upload vif-creation python script
+ scripts = []
+
+ # vif-creation python script
pl_vif_create = os.path.join(os.path.dirname(__file__), "scripts",
"pl-vif-create.py")
- self.node.upload(pl_vif_create,
- os.path.join(self.node.src_dir, "pl-vif-create.py"),
- overwrite = False)
-
- # upload vif-stop python script
- pl_vif_stop = os.path.join(os.path.dirname(__file__), "scripts",
- "pl-vif-stop.py")
+ scripts.append(pl_vif_create)
+
+ # vif-up python script
+ pl_vif_up = os.path.join(os.path.dirname(__file__), "scripts",
+ "pl-vif-up.py")
+
+ scripts.append(pl_vif_up)
- self.node.upload(pl_vif_stop,
- os.path.join(self.node.src_dir, "pl-vif-stop.py"),
- overwrite = False)
+ # vif-down python script
+ pl_vif_down = os.path.join(os.path.dirname(__file__), "scripts",
+ "pl-vif-down.py")
+
+ scripts.append(pl_vif_down)
- # upload vif-connect python script
+ # udp-connect python script
pl_vif_connect = os.path.join(os.path.dirname(__file__), "scripts",
"pl-vif-udp-connect.py")
+
+ scripts.append(pl_vif_connect)
- self.node.upload(pl_vif_connect,
- os.path.join(self.node.src_dir, "pl-vif-udp-connect.py"),
- overwrite = False)
-
- # upload tun-connect python script
+ # tunnel creation python script
tunchannel = os.path.join(os.path.dirname(__file__), "..", "linux",
"scripts", "tunchannel.py")
- self.node.upload(tunchannel,
- os.path.join(self.node.src_dir, "tunchannel.py"),
+ scripts.append(tunchannel)
+
+ # Upload scripts
+ scripts = ";".join(scripts)
+
+ self.node.upload(scripts,
+ os.path.join(self.node.src_dir),
overwrite = False)
# upload stop.sh script
stop_command = self.replace_paths(self._stop_command)
- self.node.upload(stop_command,
- os.path.join(self.app_home, "stop.sh"),
- text = True,
+
+ self.node.upload_command(stop_command,
+ shfile = os.path.join(self.app_home, "stop.sh"),
# Overwrite file every time.
- # The stop.sh has the path to the socket, wich should change
+ # The stop.sh has the path to the socket, which should change
# on every experiment run.
overwrite = True)
def upload_start_command(self):
- # Overwrite file every time.
- # The stop.sh has the path to the socket, wich should change
- # on every experiment run.
- super(PlanetlabTap, self).upload_start_command(overwrite = True)
-
- # We want to make sure the device is up and running
- # before the deploy finishes (so things will be ready
- # before other stuff starts running).
- # Run the command as a bash script in background,
- # in the host ( but wait until the command has
- # finished to continue )
- self._run_in_background()
-
- # Retrive if_name
- if_name = self.wait_if_name()
- self.set("deviceName", if_name)
+ # If GRE mode is enabled, TAP creation is delayed until the
+ # tunnel is established
+ if not self.gre_enabled:
+ # Overwrite file every time.
+ # The start.sh has the path to the socket, wich should change
+ # on every experiment run.
+ super(PlanetlabTap, self).upload_start_command(overwrite = True)
+
+ # We want to make sure the device is up and running
+ # before the deploy finishes, so we execute now the
+ # start script. We run it in background, because the
+ # TAP will live for as long as the process that
+ # created it is running, and wait until the TAP
+ # is created.
+ self._run_in_background()
+
+ # After creating the TAP, the pl-vif-create.py script
+ # will write the name of the TAP to a file. We wait until
+ # we can read the interface name from the file.
+ vif_name = self.wait_vif_name()
+ self.set("deviceName", vif_name)
def do_deploy(self):
if not self.node or self.node.state < ResourceState.PROVISIONED:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("command"):
self.set("command", self._start_command)
(out, err), proc = self.execute_command(command,
blocking = True)
+ if err:
+ msg = " Failed to stop command '%s' " % command
+ self.error(msg, out, err)
+
self.set_stopped()
@property
def state(self):
- # First check if the ccnd has failed
state_check_delay = 0.5
if self._state == ResourceState.STARTED and \
tdiffsec(tnow(), self._last_state_check) > state_check_delay:
def do_release(self):
# Node needs to wait until all associated RMs are released
# to be released
- from nepi.resources.linux.udptunnel import UdpTunnel
- rms = self.get_connected(UdpTunnel.get_rtype())
+ from nepi.resources.linux.tunnel import LinuxTunnel
+ rms = self.get_connected(LinuxTunnel.get_rtype())
+
for rm in rms:
if rm.state < ResourceState.STOPPED:
- self.ec.schedule(reschedule_delay, self.release)
+ self.ec.schedule(self.reschedule_delay, self.release)
return
super(PlanetlabTap, self).do_release()
- def wait_if_name(self):
- """ Waits until the if_name file for the command is generated,
- and returns the if_name for the device """
- if_name = None
- delay = 1.0
+ def wait_vif_name(self, exec_run_home = None):
+ """ Waits until the vif_name file for the command is generated,
+ and returns the vif_name for the device """
+ vif_name = None
+ delay = 0.5
+
+ # The vif_name file will be created in the tap-home, while the
+ # current execution home might be elsewhere to check for errors
+ # (e.g. could be a tunnel-home)
+ if not exec_run_home:
+ exec_run_home = self.run_home
for i in xrange(20):
- (out, err), proc = self.node.check_output(self.run_home, "if_name")
+ (out, err), proc = self.node.check_output(self.run_home, "vif_name")
+
+ if proc.poll() > 0:
+ (out, err), proc = self.node.check_errors(exec_run_home)
+
+ if err.strip():
+ raise RuntimeError, err
if out:
- if_name = out.strip()
+ vif_name = out.strip()
break
else:
time.sleep(delay)
delay = delay * 1.5
else:
- msg = "Couldn't retrieve if_name"
+ msg = "Couldn't retrieve vif_name"
self.error(msg, out, err)
raise RuntimeError, msg
- return if_name
+ return vif_name
- def udp_connect_command(self, remote_ip, local_port_file,
- remote_port_file, ret_file, cipher, cipher_key,
- bwlimit, txqueuelen):
- command = ["sudo -S "]
+ def gre_connect(self, remote_endpoint, connection_app_home,
+ connection_run_home):
+ gre_connect_command = self._gre_connect_command(
+ remote_endpoint, connection_run_home)
+
+ # upload command to connect.sh script
+ shfile = os.path.join(connection_app_home, "gre-connect.sh")
+ self.node.upload_command(gre_connect_command,
+ shfile = shfile,
+ overwrite = False)
+
+ # invoke connect script
+ cmd = "bash %s" % shfile
+ (out, err), proc = self.node.run(cmd, connection_run_home)
+
+ # check if execution errors occurred
+ msg = " Failed to connect endpoints "
+
+ if proc.poll() or err:
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # Wait for pid file to be generated
+ pid, ppid = self.node.wait_pid(connection_run_home)
+
+ # If the process is not running, check for error information
+ # on the remote machine
+ if not pid or not ppid:
+ (out, err), proc = self.node.check_errors(connection_run_home)
+ # Out is what was written in the stderr file
+ if err:
+ msg = " Failed to start command '%s' " % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # After creating the TAP, the pl-vif-create.py script
+ # will write the name of the TAP to a file. We wait until
+ # we can read the interface name from the file.
+ vif_name = self.wait_vif_name(exec_run_home = connection_run_home)
+ self.set("deviceName", vif_name)
+
+ return True
+
+ def initiate_udp_connection(self, remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+ port = self.udp_connect(remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen)
+ return port
+
+ def udp_connect(self, remote_endpoint, connection_app_home,
+ connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+ udp_connect_command = self._udp_connect_command(
+ remote_endpoint, connection_run_home,
+ cipher, cipher_key, bwlimit, txqueuelen)
+
+ # upload command to connect.sh script
+ shfile = os.path.join(self.app_home, "udp-connect.sh")
+ self.node.upload_command(udp_connect_command,
+ shfile = shfile,
+ overwrite = False)
+
+ # invoke connect script
+ cmd = "bash %s" % shfile
+ (out, err), proc = self.node.run(cmd, self.run_home)
+
+ # check if execution errors occurred
+ msg = "Failed to connect endpoints "
+
+ if proc.poll():
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ # Wait for pid file to be generated
+ self._pid, self._ppid = self.node.wait_pid(self.run_home)
+
+ # If the process is not running, check for error information
+ # on the remote machine
+ if not self._pid or not self._ppid:
+ (out, err), proc = self.node.check_errors(self.run_home)
+ # Out is what was written in the stderr file
+ if err:
+ msg = " Failed to start command '%s' " % command
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ port = self.wait_local_port()
+
+ return port
+
+ def _udp_connect_command(self, remote_endpoint, connection_run_home,
+ cipher, cipher_key, bwlimit, txqueuelen):
+
+ # Set the remote endpoint, (private) IP of the device
+ self.set("pointopoint", remote_endpoint.get("ip"))
+
+ # Public IP of the node
+ remote_ip = remote_endpoint.node.get("ip")
+
+ local_port_file = os.path.join(self.run_home,
+ "local_port")
+
+ remote_port_file = os.path.join(self.run_home,
+ "remote_port")
+
+ ret_file = os.path.join(self.run_home,
+ "ret_file")
+
+ # Generate UDP connect command
+ # Use pl-vif-up.py script to configure TAP with peer info
+ vif_up_command = self._vif_up_command
+
+ command = ["( "]
+ command.append(vif_up_command)
+
+ # Use pl-vid-udp-connect.py to stablish the tunnel between endpoints
+ command.append(") & (")
+ command.append("sudo -S")
command.append("PYTHONPATH=$PYTHONPATH:${SRC}")
command.append("python ${SRC}/pl-vif-udp-connect.py")
command.append("-t %s" % self.vif_type)
if bwlimit:
command.append("-b %s " % bwlimit)
+ command.append(")")
+
command = " ".join(command)
command = self.replace_paths(command)
+
+ return command
+
+ def establish_udp_connection(self, remote_endpoint, port):
+ # upload remote port number to file
+ rem_port = "%s\n" % port
+ self.node.upload(rem_port,
+ os.path.join(self.run_home, "remote_port"),
+ text = True,
+ overwrite = False)
+
+ def verify_connection(self):
+ self.wait_result()
+
+ def terminate_connection(self):
+ if self._pid and self._ppid:
+ (out, err), proc = self.node.kill(self._pid, self._ppid,
+ sudo = True)
+
+ # check if execution errors occurred
+ if proc.poll() and err:
+ msg = " Failed to Kill the Tap"
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ def check_status(self):
+ return self.node.status(self._pid, self._ppid)
+
+ def wait_local_port(self):
+ """ Waits until the local_port file for the endpoint is generated,
+ and returns the port number
+
+ """
+ return self.wait_file("local_port")
+
+ def wait_result(self):
+ """ Waits until the return code file for the endpoint is generated
+
+ """
+ return self.wait_file("ret_file")
+
+ def wait_file(self, filename):
+ """ Waits until file on endpoint is generated """
+ result = None
+ delay = 1.0
+
+ for i in xrange(20):
+ (out, err), proc = self.node.check_output(
+ self.run_home, filename)
+ if out:
+ result = out.strip()
+ break
+ else:
+ time.sleep(delay)
+ delay = delay * 1.5
+ else:
+ msg = "Couldn't retrieve %s" % filename
+ self.error(msg, out, err)
+ raise RuntimeError, msg
+
+ return result
+
+ def _gre_connect_command(self, remote_endpoint, connection_run_home):
+ # Set the remote endpoint, (private) IP of the device
+ self.set("pointopoint", remote_endpoint.get("ip"))
+ # Public IP of the node
+ self.set("greRemote", remote_endpoint.node.get("ip"))
+
+ # Generate GRE connect command
+
+ # Use vif_down command to first kill existing TAP in GRE mode
+ vif_down_command = self._vif_down_command
+
+ # Use pl-vif-up.py script to configure TAP with peer info
+ vif_up_command = self._vif_up_command
+
+ command = ["("]
+ command.append(vif_down_command)
+ command.append(") ; (")
+ command.append(vif_up_command)
+ command.append(")")
+
+ command = " ".join(command)
+ command = self.replace_paths(command)
+
return command
@property
def _start_command(self):
- command = ["sudo -S python ${SRC}/pl-vif-create.py"]
-
+ if self.gre_enabled:
+ command = []
+ else:
+ command = ["sudo -S python ${SRC}/pl-vif-create.py"]
+
+ command.append("-t %s" % self.vif_type)
+ command.append("-a %s" % self.get("ip"))
+ command.append("-n %s" % self.get("prefix"))
+ command.append("-f %s " % self.vif_name_file)
+ command.append("-S %s " % self.sock_name)
+
+ if self.get("snat") == True:
+ command.append("-s")
+
+ if self.get("pointopoint"):
+ command.append("-p %s" % self.get("pointopoint"))
+
+ if self.get("txqueuelen"):
+ command.append("-q %s" % self.get("txqueuelen"))
+
+ return " ".join(command)
+
+ @property
+ def _stop_command(self):
+ if self.gre_enabled:
+ command = self._vif_down_command
+ else:
+ command = ["sudo -S "]
+ command.append("PYTHONPATH=$PYTHONPATH:${SRC}")
+ command.append("python ${SRC}/pl-vif-down.py")
+ command.append("-S %s " % self.sock_name)
+ command = " ".join(command)
+
+ return command
+
+ @property
+ def _vif_up_command(self):
+ if self.gre_enabled:
+ device_name = "%s" % self.guid
+ else:
+ device_name = self.get("deviceName")
+
+ # Use pl-vif-up.py script to configure TAP
+ command = ["sudo -S "]
+ command.append("PYTHONPATH=$PYTHONPATH:${SRC}")
+ command.append("python ${SRC}/pl-vif-up.py")
+ command.append("-u %s" % self.node.get("username"))
+ command.append("-N %s" % device_name)
command.append("-t %s" % self.vif_type)
- command.append("-a %s" % self.get("ip4"))
- command.append("-n %d" % self.get("prefix4"))
- command.append("-f %s " % self.if_name_file)
- command.append("-S %s " % self.sock_name)
+ command.append("-a %s" % self.get("ip"))
+ command.append("-n %s" % self.get("prefix"))
+
if self.get("snat") == True:
command.append("-s")
+
if self.get("pointopoint"):
command.append("-p %s" % self.get("pointopoint"))
+
+ if self.get("txqueuelen"):
+ command.append("-q %s" % self.get("txqueuelen"))
+
+ if self.gre_enabled:
+ command.append("-g %s" % self.get("greKey"))
+ command.append("-G %s" % self.get("greRemote"))
+
+ command.append("-f %s " % self.vif_name_file)
return " ".join(command)
@property
- def _stop_command(self):
- command = ["sudo -S python ${SRC}/pl-vif-stop.py"]
+ def _vif_down_command(self):
+ if self.gre_enabled:
+ device_name = "%s" % self.guid
+ else:
+ device_name = self.get("deviceName")
+
+ command = ["sudo -S "]
+ command.append("PYTHONPATH=$PYTHONPATH:${SRC}")
+ command.append("python ${SRC}/pl-vif-down.py")
+ command.append("-N %s " % device_name)
- command.append("-S %s " % self.sock_name)
+ if self.gre_enabled:
+ command.append("-u %s" % self.node.get("username"))
+ command.append("-t %s" % self.vif_type)
+ command.append("-D")
+
return " ".join(command)
@property
return "IFF_TAP"
@property
- def if_name_file(self):
- return os.path.join(self.run_home, "if_name")
+ def vif_name_file(self):
+ return os.path.join(self.run_home, "vif_name")
@property
def sock_name(self):
@clsinit_copy
class PlanetlabTun(PlanetlabTap):
- _rtype = "PlanetlabTun"
+ _rtype = "planetlab::Tun"
_help = "Creates a TUN device on a PlanetLab host"
_backend = "planetlab"
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import clsinit_copy, ResourceState, \
- reschedule_delay
+from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.application import LinuxApplication
from nepi.resources.planetlab.node import PlanetlabNode
from nepi.resources.planetlab.tap import PlanetlabTap
@clsinit_copy
class PlanetlabVroute(LinuxApplication):
- _rtype = "PlanetlabVroute"
+ _rtype = "planetlab::Vroute"
_help = "Creates a Vroute on a PlanetLab host"
_backend = "planetlab"
def do_deploy(self):
if not self.tap or self.tap.state < ResourceState.PROVISIONED:
- self.ec.schedule(reschedule_delay, self.deploy)
+ self.ec.schedule(self.reschedule_delay, self.deploy)
else:
if not self.get("command"):
self.set("command", self._start_command)
command = ["sudo -S python ${SRC}/pl-vroute.py"]
command.append("-a %s" % self.get("action"))
command.append("-n %s" % self.get("network"))
- command.append("-p %d" % self.tap.get("prefix4"))
+ command.append("-p %s" % self.tap.get("prefix"))
command.append("-g %s" % self.tap.get("pointopoint"))
command.append("-f %s" % self.tap.get("deviceName"))
return " ".join(command)
command = ["sudo -S python ${SRC}/pl-vroute.py"]
command.append("-a %s" % "del")
command.append("-n %s" % self.get("network"))
- command.append("-p %d" % self.tap.get("prefix4"))
+ command.append("-p %s" % self.tap.get("prefix"))
command.append("-g %s" % self.tap.get("pointopoint"))
command.append("-f %s" % self.tap.get("deviceName"))
return " ".join(command)
# Should it be made thread-safe?
class GuidGenerator(object):
def __init__(self):
- self._guids = list()
+ self._last_guid = 0
def next(self, guid = None):
- if guid != None:
- return guid
- else:
- last_guid = 0 if len(self._guids) == 0 else self._guids[-1]
- guid = last_guid + 1
- self._guids.append(guid)
- self._guids.sort()
+ if guid == None:
+ guid = self._last_guid + 1
+
+ self._last_guid = self._last_guid if guid <= self._last_guid else guid
+
return guid
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import ipaddr
+import networkx
+import math
+import random
+
+class TopologyType:
+ LINEAR = "linear"
+ LADDER = "ladder"
+ MESH = "mesh"
+ TREE = "tree"
+ STAR = "star"
+ ADHOC = "adhoc"
+
+## TODO:
+## - AQ: Add support for hypergraphs (to be able to add hyper edges to
+## model CSMA or wireless networks)
+
+class NetGraph(object):
+ """ NetGraph represents a network topology.
+ Network graphs are internally using the networkx library.
+
+ """
+
+ def __init__(self, **kwargs):
+ """ A graph can be generated using a specified pattern
+ (LADDER, MESH, TREE, etc), or provided as an argument.
+
+ :param topology: Undirected graph to use as internal representation
+ :type topology: networkx.Graph
+
+ :param topo_type: One of TopologyType.{LINEAR,LADDER,MESH,TREE,STAR}
+ used to automatically generate the topology graph.
+ :type topo_type: TopologyType
+
+ :param node_count: Number of nodes in the topology to be generated.
+ :type node_count: int
+
+ :param branches: Number of branches (arms) for the STAR topology.
+ :type branches: int
+
+
+ :param assign_ips: Automatically assign IP addresses to each node.
+ :type assign_ips: bool
+
+ :param network: Base network segment for IP address assignment.
+ :type network: str
+
+ :param prefix: Base network prefix for IP address assignment.
+ :type prefix: int
+
+ :param version: IP version for IP address assignment.
+ :type version: int
+
+ :param assign_st: Select source and target nodes on the graph.
+ :type assign_st: bool
+
+ :param sources_targets: dictionary with the list of sources (key =
+ "sources") and list of targets (key = "targets") if defined, ignore
+ assign_st
+ :type sources_targets: dictionary of lists
+
+ :param leaf_source: if True, random sources will be selected only
+ from leaf nodes.
+ :type leaf_source: bool
+
+ NOTE: Only point-to-point like network topologies are supported for now.
+ (Wireless and Ethernet networks were several nodes share the same
+ edge (hyperedge) can not be modeled for the moment).
+
+ """
+ self._topology = kwargs.get("topology")
+ self._topo_type = kwargs.get("topo_type", TopologyType.ADHOC)
+
+ if not self.topology:
+ if kwargs.get("node_count"):
+ node_count = kwargs["node_count"]
+ branches = kwargs.get("branches")
+
+ self._topology = self.generate_topology(self.topo_type,
+ node_count, branches = branches)
+ else:
+ self._topology = networkx.Graph()
+
+ if kwargs.get("assign_ips"):
+ network = kwargs.get("network", "10.0.0.0")
+ prefix = kwargs.get("prefix", 8)
+ version = kwargs.get("version", 4)
+
+ self.assign_p2p_ips(network = network, prefix = prefix,
+ version = version)
+
+ sources_targets = kwargs.get("sources_targets")
+ if sources_targets:
+ [self.set_source(n) for n in sources_targets["sources"]]
+ [self.set_target(n) for n in sources_targets["targets"]]
+ elif kwargs.get("assign_st"):
+ self.select_target_zero()
+ self.select_random_source(is_leaf = kwargs.get("leaf_source"))
+
+ @property
+ def topology(self):
+ return self._topology
+
+ @property
+ def topo_type(self):
+ return self._topo_type
+
+ @property
+ def order(self):
+ return self.topology.order()
+
+ def nodes(self):
+ return self.topology.nodes()
+
+ def edges(self):
+ return self.topology.edges()
+
+ def generate_topology(self, topo_type, node_count, branches = None):
+ if topo_type == TopologyType.LADDER:
+ total_nodes = node_count/2
+ graph = networkx.ladder_graph(total_nodes)
+
+ elif topo_type == TopologyType.LINEAR:
+ graph = networkx.path_graph(node_count)
+
+ elif topo_type == TopologyType.MESH:
+ graph = networkx.complete_graph(node_count)
+
+ elif topo_type == TopologyType.TREE:
+ h = math.log(node_count + 1)/math.log(2) - 1
+ graph = networkx.balanced_tree(2, h)
+
+ elif topo_type == TopologyType.STAR:
+ graph = networkx.Graph()
+ graph.add_node(0)
+
+ nodesinbranch = (node_count - 1)/ BRANCHES
+ c = 1
+
+ for i in xrange(BRANCHES):
+ prev = 0
+ for n in xrange(1, nodesinbranch + 1):
+ graph.add_node(c)
+ graph.add_edge(prev, c)
+ prev = c
+ c += 1
+
+ return graph
+
+ def add_node(self, nid):
+ if nid not in self.topology:
+ self.topology.add_node(nid)
+
+ def add_edge(self, nid1, nid2):
+ self.add_node(nid1)
+ self.add_node( nid2)
+
+ if nid1 not in self.topology[nid2]:
+ self.topology.add_edge(nid2, nid1)
+
+ def annotate_node_ip(self, nid, ip):
+ if "ips" not in self.topology.node[nid]:
+ self.topology.node[nid]["ips"] = list()
+
+ self.topology.node[nid]["ips"].append(ip)
+
+ def node_ip_annotations(self, nid):
+ return self.topology.node[nid].get("ips", [])
+
+ def annotate_node(self, nid, name, value):
+ if not isinstance(value, str) and not isinstance(value, int) and \
+ not isinstance(value, float) and not isinstance(value, bool):
+ raise RuntimeError, "Non-serializable annotation"
+
+ self.topology.node[nid][name] = value
+
+ def node_annotation(self, nid, name):
+ return self.topology.node[nid].get(name)
+
+ def node_annotations(self, nid):
+ return self.topology.node[nid].keys()
+
+ def del_node_annotation(self, nid, name):
+ del self.topology.node[nid][name]
+
+ def annotate_edge(self, nid1, nid2, name, value):
+ if not isinstance(value, str) and not isinstance(value, int) and \
+ not isinstance(value, float) and not isinstance(value, bool):
+ raise RuntimeError, "Non-serializable annotation"
+
+ self.topology.edge[nid1][nid2][name] = value
+
+ def annotate_edge_net(self, nid1, nid2, ip1, ip2, mask, network,
+ prefixlen):
+ self.topology.edge[nid1][nid2]["net"] = dict()
+ self.topology.edge[nid1][nid2]["net"][nid1] = ip1
+ self.topology.edge[nid1][nid2]["net"][nid2] = ip2
+ self.topology.edge[nid1][nid2]["net"]["mask"] = mask
+ self.topology.edge[nid1][nid2]["net"]["network"] = network
+ self.topology.edge[nid1][nid2]["net"]["prefix"] = prefixlen
+
+ def edge_net_annotation(self, nid1, nid2):
+ return self.topology.edge[nid1][nid2].get("net", dict())
+
+ def edge_annotation(self, nid1, nid2, name):
+ return self.topology.edge[nid1][nid2].get(name)
+
+ def edge_annotations(self, nid1, nid2):
+ return self.topology.edge[nid1][nid2].keys()
+
+ def del_edge_annotation(self, nid1, nid2, name):
+ del self.topology.edge[nid1][nid2][name]
+
+ def assign_p2p_ips(self, network = "10.0.0.0", prefix = 8, version = 4):
+ """ Assign IP addresses to each end of each edge of the network graph,
+ computing all the point to point subnets and addresses in the network
+ representation.
+
+ :param network: Base network address used for subnetting.
+ :type network: str
+
+ :param prefix: Prefix for the base network address used for subnetting.
+ :type prefixt: int
+
+ :param version: IP version (either 4 or 6).
+ :type version: int
+
+ """
+ if networkx.number_connected_components(self.topology) > 1:
+ raise RuntimeError("Disconnected graph!!")
+
+ # Assign IP addresses to host
+ netblock = "%s/%d" % (network, prefix)
+ if version == 4:
+ net = ipaddr.IPv4Network(netblock)
+ new_prefix = 30
+ elif version == 6:
+ net = ipaddr.IPv6Network(netblock)
+ new_prefix = 30
+ else:
+ raise RuntimeError, "Invalid IP version %d" % version
+
+ ## Clear all previusly assigned IPs
+ for nid in self.topology.nodes():
+ self.topology.node[nid]["ips"] = list()
+
+ ## Generate and assign new IPs
+ sub_itr = net.iter_subnets(new_prefix = new_prefix)
+
+ for nid1, nid2 in self.topology.edges():
+ #### Compute subnets for each link
+
+ # get a subnet of base_add with prefix /30
+ subnet = sub_itr.next()
+ mask = subnet.netmask.exploded
+ network = subnet.network.exploded
+ prefixlen = subnet.prefixlen
+
+ # get host addresses in that subnet
+ i = subnet.iterhosts()
+ addr1 = i.next()
+ addr2 = i.next()
+
+ ip1 = addr1.exploded
+ ip2 = addr2.exploded
+ self.annotate_edge_net(nid1, nid2, ip1, ip2, mask, network,
+ prefixlen)
+
+ self.annotate_node_ip(nid1, ip1)
+ self.annotate_node_ip(nid2, ip2)
+
+ def get_p2p_info(self, nid1, nid2):
+ net = self.topology.edge[nid1][nid2]["net"]
+ return ( net[nid1], net[nid2], net["mask"], net["network"],
+ net["prefixlen"] )
+
+ def set_source(self, nid):
+ self.topology.node[nid]["source"] = True
+
+ def is_source(self, nid):
+ return self.topology.node[nid].get("source")
+
+ def set_target(self, nid):
+ self.topology.node[nid]["target"] = True
+
+ def is_target(self, nid):
+ return self.topology.node[nid].get("target")
+
+ def targets(self):
+ """ Returns the nodes that are targets """
+ return [nid for nid in self.topology.nodes() \
+ if self.topology.node[nid].get("target")]
+
+ def sources(self):
+ """ Returns the nodes that are sources """
+ return [nid for nid in self.topology.nodes() \
+ if self.topology.node[nid].get("source")]
+
+ def select_target_zero(self):
+ """ Mark the node 0 as target
+ """
+ nid = 0 if 0 in self.topology.nodes() else "0"
+ self.set_target(nid)
+
+ def select_random_source(self, **kwargs):
+ """ Mark a random node as source.
+ """
+
+ # The ladder is a special case because is not symmetric.
+ if self.topo_type == TopologyType.LADDER:
+ total_nodes = self.order/2
+ leaf1 = total_nodes
+ leaf2 = total_nodes - 1
+ leaves = [leaf1, leaf2]
+ source = leaves.pop(random.randint(0, len(leaves) - 1))
+ else:
+ # options must not be already sources or targets
+ options = [ k for k,v in self.topology.degree().iteritems() \
+ if (not kwargs.get("is_leaf") or v == 1) \
+ and not self.topology.node[k].get("source") \
+ and not self.topology.node[k].get("target")]
+ source = options.pop(random.randint(0, len(options) - 1))
+
+ self.set_source(source)
+
+++ /dev/null
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-from nepi.design.box import Box
-
-from xml.dom import minidom
-import sys
-
-STRING = "string"
-BOOL = "bool"
-INTEGER = "integer"
-DOUBLE = "float"
-
-def xmlencode(s):
- if isinstance(s, str):
- rv = s.decode("latin1")
- elif not isinstance(s, unicode):
- rv = unicode(s)
- else:
- rv = s
- return rv.replace(u'\x00',u'�')
-
-def xmldecode(s):
- return s.replace(u'�',u'\x00').encode("utf8")
-
-def from_type(value):
- if isinstance(value, str):
- return STRING
- if isinstance(value, bool):
- return BOOL
- if isinstance(value, int):
- return INTEGER
- if isinstance(value, float):
- return DOUBLE
-
-def to_type(type, value):
- if type == STRING:
- return str(value)
- if type == BOOL:
- return value == "True"
- if type == INTEGER:
- return int(value)
- if type == DOUBLE:
- return float(value)
-
-class XMLParser(object):
- def to_xml(self, box):
- doc = minidom.Document()
-
- root = doc.createElement("boxes")
- doc.appendChild(root)
-
- traversed = dict()
- self._traverse_boxes(doc, traversed, box)
-
- # Keep the order
- for guid in sorted(traversed.keys()):
- bnode = traversed[guid]
- root.appendChild(bnode)
-
- try:
- xml = doc.toprettyxml(indent=" ", encoding="UTF-8")
- except:
- print >>sys.stderr, "Oops: generating XML from %s" % (data,)
- raise
-
- return xml
-
- def _traverse_boxes(self, doc, traversed, box):
- bnode = doc.createElement("box")
- bnode.setAttribute("guid", xmlencode(box.guid))
- bnode.setAttribute("label", xmlencode(box.label))
- bnode.setAttribute("x", xmlencode(box.x))
- bnode.setAttribute("y", xmlencode(box.y))
- bnode.setAttribute("width", xmlencode(box.width))
- bnode.setAttribute("height", xmlencode(box.height))
-
- traversed[box.guid] = bnode
-
- anode = doc.createElement("attributes")
- bnode.appendChild(anode)
- for name in sorted(box.attributes):
- value = getattr(box.a, name)
- aanode = doc.createElement("attribute")
- anode.appendChild(aanode)
- aanode.setAttribute("name", xmlencode(name))
- aanode.setAttribute("value", xmlencode(value))
- aanode.setAttribute("type", from_type(value))
-
- tnode = doc.createElement("tags")
- bnode.appendChild(tnode)
- for tag in sorted(box.tags):
- ttnode = doc.createElement("tag")
- tnode.appendChild(ttnode)
- ttnode.setAttribute("name", xmlencode(tag))
-
- cnode = doc.createElement("connections")
- bnode.appendChild(cnode)
- for b in sorted(box.connections):
- ccnode = doc.createElement("connection")
- cnode.appendChild(ccnode)
- ccnode.setAttribute("guid", xmlencode(b.guid))
- if b.guid not in traversed:
- self._traverse_boxes(doc, traversed, b)
-
- def from_xml(self, xml):
- doc = minidom.parseString(xml)
- bnode_list = doc.getElementsByTagName("box")
-
- boxes = dict()
- connections = dict()
-
- for bnode in bnode_list:
- if bnode.nodeType == doc.ELEMENT_NODE:
- guid = int(bnode.getAttribute("guid"))
- label = xmldecode(bnode.getAttribute("label"))
- x = float(bnode.getAttribute("x"))
- y = float(bnode.getAttribute("y"))
- height = float(bnode.getAttribute("height"))
- width = float(bnode.getAttribute("width"))
- box = Box(label=label, guid=guid)
- boxes[guid] = box
-
- anode_list = bnode.getElementsByTagName("attribute")
- for anode in anode_list:
- name = xmldecode(anode.getAttribute("name"))
- value = xmldecode(anode.getAttribute("value"))
- type = xmldecode(anode.getAttribute("type"))
- value = to_type(type, value)
- setattr(box.a, name, value)
-
- tnode_list = bnode.getElementsByTagName("tag")
- for tnode in tnode_list:
- value = xmldecode(tnode.getAttribute("name"))
- box.tadd(value)
-
- connections[box] = set()
- cnode_list = bnode.getElementsByTagName("connection")
- for cnode in cnode_list:
- guid = int(cnode.getAttribute("guid"))
- connections[box].add(guid)
-
- for box, conns in connections.iteritems():
- for guid in conns:
- b = boxes[guid]
- box.connect(b)
-
- return box
-
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.util.netgraph import NetGraph, TopologyType
+from nepi.util.timefuncs import stformat, tsformat
+
+from xml.dom import minidom
+
+import datetime
+import sys
+import os
+
+STRING = "string"
+BOOL = "bool"
+INTEGER = "integer"
+DOUBLE = "float"
+
+def xmlencode(s):
+ if isinstance(s, str):
+ rv = s.decode("latin1")
+ if isinstance(s, datetime.datetime):
+ rv = tsformat(s)
+ elif not isinstance(s, unicode):
+ rv = unicode(s)
+ else:
+ rv = s
+ return rv.replace(u'\x00',u'�')
+
+def xmldecode(s, cast = str):
+ ret = s.replace(u'�',u'\x00').encode("ascii")
+ ret = cast(ret)
+ if s == "None":
+ return None
+ return ret
+
+def from_type(value):
+ if isinstance(value, bool):
+ return BOOL
+ if isinstance(value, int):
+ return INTEGER
+ if isinstance(value, float):
+ return DOUBLE
+
+ return STRING
+
+def to_type(type, value):
+ if not value:
+ return value
+
+ if type == STRING:
+ return str(value)
+ if type == BOOL:
+ return value == "True"
+ if type == INTEGER:
+ return int(value)
+ if type == DOUBLE:
+ return float(value)
+
+class ECXMLParser(object):
+ def to_xml(self, ec):
+
+ doc = minidom.Document()
+
+ self._ec_to_xml(doc, ec)
+
+ try:
+ xml = doc.toprettyxml(indent=" ", encoding="UTF-8")
+ except:
+ print >>sys.stderr, "Oops: generating XML from %s" % (data,)
+ raise
+
+ return xml
+
+ def _ec_to_xml(self, doc, ec):
+ ecnode = doc.createElement("experiment")
+ ecnode.setAttribute("exp_id", xmlencode(ec.exp_id))
+ ecnode.setAttribute("run_id", xmlencode(ec.run_id))
+ ecnode.setAttribute("nthreads", xmlencode(ec.nthreads))
+ ecnode.setAttribute("local_dir", xmlencode(ec.local_dir))
+ doc.appendChild(ecnode)
+
+ if ec.netgraph != None:
+ self._netgraph_to_xml(doc, ecnode, ec)
+
+ rmsnode = doc.createElement("rms")
+ ecnode.appendChild(rmsnode)
+
+ for guid, rm in ec._resources.iteritems():
+ self._rm_to_xml(doc, rmsnode, ec, guid, rm)
+
+ return doc
+
+ def _netgraph_to_xml(self, doc, ecnode, ec):
+ ngnode = doc.createElement("topology")
+ ngnode.setAttribute("topo-type", xmlencode(ec.netgraph.topo_type))
+ ecnode.appendChild(ngnode)
+
+ self. _netgraph_nodes_to_xml(doc, ngnode, ec)
+ self. _netgraph_edges_to_xml(doc, ngnode, ec)
+
+ def _netgraph_nodes_to_xml(self, doc, ngnode, ec):
+ ngnsnode = doc.createElement("nodes")
+ ngnode.appendChild(ngnsnode)
+
+ for nid in ec.netgraph.nodes():
+ ngnnode = doc.createElement("node")
+ ngnnode.setAttribute("nid", xmlencode(nid))
+ ngnnode.setAttribute("nid-type", from_type(nid))
+ ngnsnode.appendChild(ngnnode)
+
+ # Mark ources and targets
+ if ec.netgraph.is_source(nid):
+ ngnnode.setAttribute("source", xmlencode(True))
+
+ if ec.netgraph.is_target(nid):
+ ngnnode.setAttribute("target", xmlencode(True))
+
+ # Node annotations
+ annosnode = doc.createElement("node-annotations")
+ add_annotations = False
+ for name in ec.netgraph.node_annotations(nid):
+ add_annotations = True
+ value = ec.netgraph.node_annotation(nid, name)
+ annonode = doc.createElement("node-annotation")
+ annonode.setAttribute("name", xmlencode(name))
+ annonode.setAttribute("value", xmlencode(value))
+ annonode.setAttribute("type", from_type(value))
+ annosnode.appendChild(annonode)
+
+ if add_annotations:
+ ngnnode.appendChild(annosnode)
+
+ def _netgraph_edges_to_xml(self, doc, ngnode, ec):
+ ngesnode = doc.createElement("edges")
+ ngnode.appendChild(ngesnode)
+
+ for nid1, nid2 in ec.netgraph.edges():
+ ngenode = doc.createElement("edge")
+ ngenode.setAttribute("nid1", xmlencode(nid1))
+ ngenode.setAttribute("nid1-type", from_type(nid1))
+ ngenode.setAttribute("nid2", xmlencode(nid2))
+ ngenode.setAttribute("nid2-type", from_type(nid2))
+ ngesnode.appendChild(ngenode)
+
+ # Edge annotations
+ annosnode = doc.createElement("edge-annotations")
+ add_annotations = False
+ for name in ec.netgraph.edge_annotations(nid1, nid2):
+ add_annotations = True
+ value = ec.netgraph.edge_annotation(nid1, nid2, name)
+ annonode = doc.createElement("edge-annotation")
+ annonode.setAttribute("name", xmlencode(name))
+ annonode.setAttribute("value", xmlencode(value))
+ annonode.setAttribute("type", from_type(value))
+ annosnode.appendChild(annonode)
+
+ if add_annotations:
+ ngenode.appendChild(annosnode)
+
+ def _rm_to_xml(self, doc, rmsnode, ec, guid, rm):
+ rmnode = doc.createElement("rm")
+ rmnode.setAttribute("guid", xmlencode(guid))
+ rmnode.setAttribute("rtype", xmlencode(rm._rtype))
+ rmnode.setAttribute("state", xmlencode(rm._state))
+ if rm._start_time:
+ rmnode.setAttribute("start_time", xmlencode(rm._start_time))
+ if rm._stop_time:
+ rmnode.setAttribute("stop_time", xmlencode(rm._stop_time))
+ if rm._discover_time:
+ rmnode.setAttribute("discover_time", xmlencode(rm._discover_time))
+ if rm._provision_time:
+ rmnode.setAttribute("provision_time", xmlencode(rm._provision_time))
+ if rm._ready_time:
+ rmnode.setAttribute("ready_time", xmlencode(rm._ready_time))
+ if rm._release_time:
+ rmnode.setAttribute("release_time", xmlencode(rm._release_time))
+ if rm._failed_time:
+ rmnode.setAttribute("failed_time", xmlencode(rm._failed_time))
+ rmsnode.appendChild(rmnode)
+
+ anode = doc.createElement("attributes")
+ attributes = False
+
+ for attr in rm._attrs.values():
+ if attr.has_changed:
+ attributes = True
+ aanode = doc.createElement("attribute")
+ aanode.setAttribute("name", xmlencode(attr.name))
+ aanode.setAttribute("value", xmlencode(attr.value))
+ aanode.setAttribute("type", from_type(attr.value))
+ anode.appendChild(aanode)
+
+ if attributes:
+ rmnode.appendChild(anode)
+
+ cnode = doc.createElement("connections")
+ connections = False
+
+ for guid in rm._connections:
+ connections = True
+ ccnode = doc.createElement("connection")
+ ccnode.setAttribute("guid", xmlencode(guid))
+ cnode.appendChild(ccnode)
+
+ if connections:
+ rmnode.appendChild(cnode)
+
+ cnnode = doc.createElement("conditions")
+ conditions = False
+
+ for action, conds in rm._conditions.iteritems():
+ conditions = True
+ for (group, state, time) in conds:
+ ccnnode = doc.createElement("condition")
+ ccnnode.setAttribute("action", xmlencode(action))
+ ccnnode.setAttribute("group", xmlencode(group))
+ ccnnode.setAttribute("state", xmlencode(state))
+ ccnnode.setAttribute("time", xmlencode(time))
+ cnnode.appendChild(ccnnode)
+
+ if conditions:
+ rmnode.appendChild(cnnode)
+
+ tnode = doc.createElement("traces")
+ traces = False
+
+ for trace in rm._trcs.values():
+ if trace.enabled:
+ traces = True
+ ttnode = doc.createElement("trace")
+ ttnode.setAttribute("name", xmlencode(trace.name))
+ tnode.appendChild(ttnode)
+
+ if traces:
+ rmnode.appendChild(tnode)
+
+ def from_xml(self, xml):
+ doc = minidom.parseString(xml)
+ return self._ec_from_xml(doc)
+
+ def _ec_from_xml(self, doc):
+ from nepi.execution.ec import ExperimentController
+ ec = None
+
+ ecnode_list = doc.getElementsByTagName("experiment")
+ for ecnode in ecnode_list:
+ if ecnode.nodeType == doc.ELEMENT_NODE:
+ exp_id = xmldecode(ecnode.getAttribute("exp_id"))
+ run_id = xmldecode(ecnode.getAttribute("run_id"))
+ local_dir = xmldecode(ecnode.getAttribute("local_dir"))
+
+ # Configure number of preocessing threads
+ nthreads = xmldecode(ecnode.getAttribute("nthreads"))
+ os.environ["NEPI_NTHREADS"] = nthreads
+
+ # Deserialize netgraph
+ topology = None
+ topo_type = None
+
+ netgraph = self._netgraph_from_xml(doc, ecnode)
+
+ if netgraph:
+ topo_type = netgraph.topo_type
+ topology = netgraph.topology
+
+ # Instantiate EC
+ ec = ExperimentController(exp_id = exp_id, local_dir = local_dir,
+ topology = topology, topo_type = topo_type)
+
+ connections = set()
+
+ rmsnode_list = ecnode.getElementsByTagName("rms")
+ if rmsnode_list:
+ rmnode_list = rmsnode_list[0].getElementsByTagName("rm")
+ for rmnode in rmnode_list:
+ if rmnode.nodeType == doc.ELEMENT_NODE:
+ self._rm_from_xml(doc, rmnode, ec, connections)
+
+ for (guid1, guid2) in connections:
+ ec.register_connection(guid1, guid2)
+
+ break
+
+ return ec
+
+ def _netgraph_from_xml(self, doc, ecnode):
+ netgraph = None
+
+ topology = ecnode.getElementsByTagName("topology")
+ if topology:
+ topology = topology[0]
+ topo_type = xmldecode(topology.getAttribute("topo-type"))
+
+ netgraph = NetGraph(topo_type = topo_type)
+
+ ngnsnode_list = topology.getElementsByTagName("nodes")
+ if ngnsnode_list:
+ ngnsnode = ngnsnode_list[0].getElementsByTagName("node")
+ for ngnnode in ngnsnode:
+ nid = xmldecode(ngnnode.getAttribute("nid"))
+ tipe = xmldecode(ngnnode.getAttribute("nid-type"))
+ nid = to_type(tipe, nid)
+ netgraph.add_node(nid)
+
+ if ngnnode.hasAttribute("source"):
+ netgraph.set_source(nid)
+ if ngnnode.hasAttribute("target"):
+ netgraph.set_target(nid)
+
+ annosnode_list = ngnnode.getElementsByTagName("node-annotations")
+
+ if annosnode_list:
+ annosnode = annosnode_list[0].getElementsByTagName("node-annotation")
+ for annonode in annosnode:
+ name = xmldecode(annonode.getAttribute("name"))
+
+ if name == "ips":
+ ips = xmldecode(annonode.getAttribute("value"), eval) # list
+ for ip in ips:
+ netgraph.annotate_node_ip(nid, ip)
+ else:
+ value = xmldecode(annonode.getAttribute("value"))
+ tipe = xmldecode(annonode.getAttribute("type"))
+ value = to_type(tipe, value)
+ netgraph.annotate_node(nid, name, value)
+
+ ngesnode_list = topology.getElementsByTagName("edges")
+ if ngesnode_list:
+ ngesnode = ngesnode_list[0].getElementsByTagName("edge")
+ for ngenode in ngesnode:
+ nid1 = xmldecode(ngenode.getAttribute("nid1"))
+ tipe1 = xmldecode(ngenode.getAttribute("nid1-type"))
+ nid1 = to_type(tipe1, nid1)
+
+ nid2 = xmldecode(ngenode.getAttribute("nid2"))
+ tipe2 = xmldecode(ngenode.getAttribute("nid2-type"))
+ nid2 = to_type(tipe2, nid2)
+
+ netgraph.add_edge(nid1, nid2)
+
+ annosnode_list = ngenode.getElementsByTagName("edge-annotations")
+ if annosnode_list:
+ annosnode = annosnode_list[0].getElementsByTagName("edge-annotation")
+ for annonode in annosnode:
+ name = xmldecode(annonode.getAttribute("name"))
+
+ if name == "net":
+ net = xmldecode(annonode.getAttribute("value"), eval) # dict
+ netgraph.annotate_edge_net(nid1, nid2, net[nid1], net[nid2],
+ net["mask"], net["network"], net["prefix"])
+ else:
+ value = xmldecode(annonode.getAttribute("value"))
+ tipe = xmldecode(annonode.getAttribute("type"))
+ value = to_type(tipe, value)
+ netgraph.annotate_edge(nid1, nid2, name, value)
+ return netgraph
+
+ def _rm_from_xml(self, doc, rmnode, ec, connections):
+ start_time = None
+ stop_time = None
+ discover_time = None
+ provision_time = None
+ ready_time = None
+ release_time = None
+ failed_time = None
+
+ guid = xmldecode(rmnode.getAttribute("guid"), int)
+ rtype = xmldecode(rmnode.getAttribute("rtype"))
+
+ # FOR NOW ONLY STATE NEW IS ALLOWED
+ state = 0
+ """
+ state = xmldecode(rmnode.getAttribute("state"), int)
+
+ if rmnode.hasAttribute("start_time"):
+ start_time = xmldecode(rmnode.getAttribute("start_time"),
+ datetime.datetime)
+ if rmnode.hasAttribute("stop_time"):
+ stop_time = xmldecode(rmnode.getAttribute("stop_time"),
+ datetime.datetime)
+ if rmnode.hasAttribute("discover_time"):
+ dicover_time = xmldecode(rmnode.getAttribute("discover_time"),
+ datetime.datetime)
+ if rmnode.hasAttribute("provision_time"):
+ provision_time = xmldecode(rmnode.getAttribute("provision_time"),
+ datetime.datetime)
+ if rmnode.hasAttribute("ready_time"):
+ ready_time = xmldecode(rmnode.getAttribute("ready_time"),
+ datetime.datetime)
+ if rmnode.hasAttribute("release_time"):
+ release_time = xmldecode(rmnode.getAttribute("release_time"),
+ datetime.datetime)
+ if rmnode.hasAttribute("failed_time"):
+ failed_time = xmldecode(rmnode.getAttribute("failed_time"),
+ datetime.datetime)
+ """
+
+ ec.register_resource(rtype, guid = guid)
+ rm = ec.get_resource(guid)
+ rm.set_state_time(state, "_start_time", start_time)
+ rm.set_state_time(state, "_stop_time", stop_time)
+ rm.set_state_time(state, "_discover_time", discover_time)
+ rm.set_state_time(state, "_provision_time", provision_time)
+ rm.set_state_time(state, "_ready_time", ready_time)
+ rm.set_state_time(state, "_release_time", release_time)
+ rm.set_state_time(state, "_failed_time", failed_time)
+
+ anode_list = rmnode.getElementsByTagName("attributes")
+ if anode_list:
+ aanode_list = anode_list[0].getElementsByTagName("attribute")
+ for aanode in aanode_list:
+ name = xmldecode(aanode.getAttribute("name"))
+ value = xmldecode(aanode.getAttribute("value"))
+ tipe = xmldecode(aanode.getAttribute("type"))
+ value = to_type(tipe, value)
+ rm.set(name, value)
+
+ cnode_list = rmnode.getElementsByTagName("connections")
+ if cnode_list:
+ ccnode_list = cnode_list[0].getElementsByTagName("connection")
+ for ccnode in ccnode_list:
+ guid2 = xmldecode(ccnode.getAttribute("guid"), int)
+ connections.add((guid, guid2))
+
+ tnode_list = rmnode.getElementsByTagName("traces")
+ if tnode_list:
+ ttnode_list = tnode_list[0].getElementsByTagName("trace")
+ for ttnode in ttnode_list:
+ name = xmldecode(ttnode.getAttribute("name"))
+ ec.enable_trace(guid, name)
+
+ cnnode_list = rmnode.getElementsByTagName("conditions")
+ if cnnode_list:
+ ccnnode_list = cnnode_list[0].getElementsByTagName("condition")
+ for ccnnode in ccnnode_list:
+ action = xmldecode(ccnnode.getAttribute("action"), int)
+ group = xmldecode(ccnnode.getAttribute("group"), eval) # list
+ state = xmldecode(ccnnode.getAttribute("state"), int)
+ time = xmldecode(ccnnode.getAttribute("time"))
+ time = to_type('STRING', time)
+ ec.register_condition(guid, action, group, state, time = time)
+
+++ /dev/null
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-import networkx
-import tempfile
-
-class Plotter(object):
- def __init__(self, box):
- self._graph = networkx.Graph(graph = dict(overlap = "false"))
-
- traversed = set()
- self._traverse_boxes(traversed, box)
-
- def _traverse_boxes(self, traversed, box):
- traversed.add(box.guid)
-
- self._graph.add_node(box.label,
- width = 50/72.0, # 1 inch = 72 points
- height = 50/72.0,
- shape = "circle")
-
- for b in box.connections:
- self._graph.add_edge(box.label, b.label)
- if b.guid not in traversed:
- self._traverse_boxes(traversed, b)
-
- def plot(self):
- f = tempfile.NamedTemporaryFile(delete=False)
- networkx.draw_graphviz(self._graph)
- networkx.write_dot(self._graph, f.name)
- f.close()
- return f.name
-
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import logging
+import os
+
+try:
+ import networkx
+except ImportError:
+ msg = "Networkx library is not installed, you will not be able to plot."
+ logger = logging.Logger("Plotter")
+ logger.debug(msg)
+
+try:
+ import matplotlib.pyplot as plt
+except:
+ msg = ("Matplotlib library is not installed or X11 is not enabled. "
+ "You will not be able generate PNG plots.")
+ logger = logging.Logger("Plotter")
+ logger.debug(msg)
+
+class PFormats:
+ DOT = "dot"
+ FIGURE = "figure"
+
+class ECPlotter(object):
+ def plot(self, ec, dirpath = None, format= PFormats.FIGURE,
+ show = False):
+ graph, labels = self._ec2graph(ec)
+
+ add_extension = False
+
+ if not dirpath:
+ import tempfile
+ dirpath = tempfile.mkdtemp()
+
+ fpath = os.path.join(dirpath, "%s_%s" % (ec.exp_id, ec.run_id))
+
+ if format == PFormats.FIGURE:
+ pos = networkx.graphviz_layout(graph, prog="neato")
+ networkx.draw(graph, pos = pos, node_color="white",
+ node_size = 500, with_labels=True)
+
+ label = "\n".join(map(lambda v: "%s: %s" % (v[0], v[1]), labels.iteritems()))
+ plt.annotate(label, xy=(0.05, 0.95), xycoords='axes fraction')
+
+ fpath += ".png"
+
+ plt.savefig(fpath, bbox_inches="tight")
+
+ if show:
+ plt.show()
+
+ elif format == PFormats.DOT:
+ fpath += ".dot"
+
+ networkx.write_dot(graph, fpath)
+
+ if show:
+ import subprocess
+ subprocess.call(["dot", "-Tps", fpath, "-o", "%s.ps" % fpath])
+ subprocess.call(["evince","%s.ps" % fpath])
+
+ return fpath
+
+ def _ec2graph(self, ec):
+ graph = networkx.Graph(graph = dict(overlap = "false"))
+
+ labels = dict()
+ connections = set()
+
+ for guid, rm in ec._resources.iteritems():
+ label = rm.get_rtype()
+
+ graph.add_node(guid,
+ label = "%d %s" % (guid, label),
+ width = 50/72.0, # 1 inch = 72 points
+ height = 50/72.0,
+ shape = "circle")
+
+ labels[guid] = label
+
+ for guid2 in rm.connections:
+ # Avoid adding a same connection twice
+ if (guid2, guid) not in connections:
+ connections.add((guid, guid2))
+
+ for (guid1, guid2) in connections:
+ graph.add_edge(guid1, guid2)
+
+ return graph, labels
--- /dev/null
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import datetime
+import os
+
+class SFormats:
+ XML = "xml"
+
+class ECSerializer(object):
+ def load(self, filepath, format = SFormats.XML):
+ if format == SFormats.XML:
+ from nepi.util.parsers.xml_parser import ECXMLParser
+
+ parser = ECXMLParser()
+ f = open(filepath, "r")
+ xml = f.read()
+ f.close()
+
+ ec = parser.from_xml(xml)
+
+ return ec
+
+ def serialize(self, ec, format = SFormats.XML):
+ if format == SFormats.XML:
+ from nepi.util.parsers.xml_parser import ECXMLParser
+
+ parser = ECXMLParser()
+ sec = parser.to_xml(ec)
+
+ return sec
+
+ def save(self, ec, dirpath, format = SFormats.XML):
+ date = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ filename = "%s_%s" % (ec.exp_id, date)
+
+ if format == SFormats.XML:
+ filepath = os.path.join(dirpath, "%s.xml" % filename)
+ sec = self.serialize(ec, format = format)
+ f = open(filepath, "w")
+ f.write(sec)
+ f.close()
+
+ return filepath
+
import hashlib
import re
import os
+import time
from nepi.util.logger import Logger
self._slice_resources_batch.append(resource_hrn)
resources_hrn_new = list()
if self._count == len(self._total):
+ check_all_inslice = self._check_all_inslice(self._slice_resources_batch, slicename)
+ if check_all_inslice == True:
+ return True
for resource_hrn in self._slice_resources_batch:
resource_parts = resource_hrn.split('.')
resource_hrn = '.'.join(resource_parts[:2]) + '.' + '\\.'.join(resource_parts[2:])
resources_hrn_new.append(resource_hrn)
with self.lock_slice:
- self._sfi_exec_method('delete', slicename)
+ if check_all_inslice != 0:
+ self._sfi_exec_method('delete', slicename)
+ time.sleep(480)
+
# Re implementing urn from hrn because the library sfa-common doesn't work for wilabt
resources_urn = self._get_urn(resources_hrn_new)
rspec = self.rspec_proc.build_sfa_rspec(slicename, resources_urn, properties, leases)
else:
self._log.debug(" Waiting for more nodes to add the batch to the slice ")
+ def _check_all_inslice(self, resources_hrn, slicename):
+ slice_res = self.get_slice_resources(slicename)['resource']
+ if slice_res:
+ if len(slice_res[0]['services']) != 0:
+ slice_res_hrn = self.get_resources_hrn(slice_res).values()
+ if self._compare_lists(slice_res_hrn, resources_hrn):
+ return True
+ else: return len(slice_res_hrn)
+ return 0
+
+ def _compare_lists(self, list1, list2):
+ if len(list1) != len(list2):
+ return False
+ for item in list1:
+ if item not in list2:
+ return False
+ return True
+
def _get_urn(self, resources_hrn):
"""
Get urn from hrn.
agent = None,
identity = None,
server_key = None,
- tty = False):
+ tty = False,
+ strict_host_checking = True):
"""
Spawn a remote command such that it will continue working asynchronously in
background.
agent = agent,
identity = identity,
server_key = server_key,
- tty = tty ,
+ tty = tty,
+ strict_host_checking = strict_host_checking ,
)
if proc.wait():
gw = None,
agent = None,
identity = None,
- server_key = None):
+ server_key = None,
+ strict_host_checking = True):
"""
Returns the pid and ppid of a process from a remote file where the
information was stored.
gw = gw,
agent = agent,
identity = identity,
- server_key = server_key
+ server_key = server_key,
+ strict_host_checking = strict_host_checking
)
if proc.wait():
gw = None,
agent = None,
identity = None,
- server_key = None):
+ server_key = None,
+ strict_host_checking = True):
"""
Returns a code representing the the status of a remote process
gw = gw,
agent = agent,
identity = identity,
- server_key = server_key
+ server_key = server_key,
+ strict_host_checking = strict_host_checking
)
if proc.wait():
sudo = False,
identity = None,
server_key = None,
- nowait = False):
+ nowait = False,
+ strict_host_checking = True):
"""
Sends a kill signal to a remote process.
gw = gw,
agent = agent,
identity = identity,
- server_key = server_key
+ server_key = server_key,
+ strict_host_checking = strict_host_checking
)
# wait, don't leave zombies around
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-import base64
-import errno
-import vsys
-import socket
-from optparse import OptionParser, SUPPRESS_HELP
+import math
+import numpy
+from scipy import stats
-STOP_MSG = "STOP"
+def compute_mean(sample):
+ # TODO: Discard outliers !!!!
-def get_options():
- usage = ("usage: %prog -S <socket-name>")
-
- parser = OptionParser(usage = usage)
-
- parser.add_option("-S", "--socket-name", dest="socket_name",
- help = "Name for the unix socket used to interact with this process",
- default = "tap.sock", type="str")
-
- (options, args) = parser.parse_args()
-
- return (options.socket_name)
+ if not sample:
+ print " CANNOT COMPUTE STATS for ", sample
+ return (0, 0, 0, 0)
-if __name__ == '__main__':
-
- (socket_name) = get_options()
-
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(socket_name)
- encoded = base64.b64encode(STOP_MSG)
- sock.send("%s\n" % encoded)
- reply = sock.recv(1024)
- reply = base64.b64decode(reply)
-
- print reply
+ x = numpy.array(sample)
+ # sample mean and standard deviation
+ n, min_max, mean, var, skew, kurt = stats.describe(x)
+ std = math.sqrt(var)
+ # for the population mean and std ...
+ # mean = x.mean()
+ # std = x.std()
+
+ # Calculate confidence interval t-distribution
+ ## BUG: Use quantil of NORMAL distribution, not t-student quantil distribution
+ ci = stats.t.interval(0.95, n-1, loc = mean, scale = std/math.sqrt(n))
+ return (mean, std, ci[0], ci[1])
return None
+def compute_delay_ms(timestamp2, timestamp1):
+ d1 = datetime.datetime.fromtimestamp(float(timestamp1))
+ d2 = datetime.datetime.fromtimestamp(float(timestamp2))
+ delay = d2 - d1
+
+ # round up resolution - round up to miliseconds
+ return delay.total_seconds() * 1000
+
+++ /dev/null
-#!/usr/bin/env python
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-
-from nepi.design.box import Box
-
-import unittest
-
-class BoxDesignTestCase(unittest.TestCase):
- def test_simple_design(self):
- node1 = Box()
- node2 = Box()
-
- node1.label = "uno"
- node2.label = "dos"
-
- node1.tadd('nodo')
- node2.tadd('mynodo')
-
- self.assertEquals(node1.tags, set(['nodo']))
- self.assertEquals(node2.tags, set(['mynodo']))
-
- node1.a.hola = "chau"
- node2.a.hello = "bye"
-
- self.assertEquals(node1.a.hola, "chau")
- self.assertEquals(node2.a.hello, "bye")
-
- node1.connect(node2)
-
- self.assertEquals(node1.connections, set([node2]))
- self.assertEquals(node2.connections, set([node1]))
- self.assertTrue(node1.is_connected(node2))
- self.assertTrue(node2.is_connected(node1))
-
- self.assertEquals(node1.c.dos.a.hello, "bye")
- self.assertEquals(node2.c.uno.a.hola, "chau")
-
- node2.disconnect(node1)
-
- self.assertEquals(node1.connections, set([]))
- self.assertEquals(node2.connections, set([]))
- self.assertFalse(node1.is_connected(node2))
- self.assertFalse(node2.is_connected(node1))
-
- self.assertRaises(AttributeError, node1.c.dos)
- self.assertRaises(AttributeError, node2.c.uno)
-
-
-if __name__ == '__main__':
- unittest.main()
-
ResourceFactory.register_type(MyResource)
ResourceFactory.register_type(AnotherResource)
+ # Take into account default 'Critical' attribute
self.assertEquals(MyResource.get_rtype(), "MyResource")
- self.assertEquals(len(MyResource._attributes), 2)
+ self.assertEquals(len(MyResource._attributes), 3)
self.assertEquals(ResourceManager.get_rtype(), "Resource")
- self.assertEquals(len(ResourceManager._attributes), 1)
+ self.assertEquals(len(ResourceManager._attributes), 2)
self.assertEquals(AnotherResource.get_rtype(), "AnotherResource")
- self.assertEquals(len(AnotherResource._attributes), 1)
+ self.assertEquals(len(AnotherResource._attributes), 2)
self.assertEquals(len(ResourceFactory.resource_types()), 2)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceManager, ResourceState, \
+ clsinit_copy, ResourceAction, ResourceFactory
+from nepi.execution.runner import ExperimentRunner
+
+import functools
+import os
+import shutil
+import tempfile
+import time
+import unittest
+
+reschedule_delay = "0.5s"
+deploy_time = 0
+run_time = 0
+
+class Link(ResourceManager):
+ _rtype = "dummy::Link"
+ def do_deploy(self):
+ time.sleep(deploy_time)
+ super(Link, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Interface(ResourceManager):
+ _rtype = "dummy::Interface"
+
+ def do_deploy(self):
+ node = self.get_connected(Node.get_rtype())[0]
+ link = self.get_connected(Link.get_rtype())[0]
+
+ if node.state < ResourceState.READY or \
+ link.state < ResourceState.READY:
+ self.ec.schedule(reschedule_delay, self.deploy)
+ self.logger.debug(" -------- RESCHEDULING ------- ")
+ else:
+ time.sleep(deploy_time)
+ super(Interface, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+ _rtype = "dummy::Node"
+
+ def do_deploy(self):
+ self.logger.debug(" -------- DO_DEPLOY ------- ")
+ time.sleep(deploy_time)
+ super(Node, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+ _rtype = "dummy::Application"
+
+ def do_deploy(self):
+ node = self.get_connected(Node.get_rtype())[0]
+
+ if node.state < ResourceState.READY:
+ self.ec.schedule(reschedule_delay, self.deploy)
+ self.logger.debug(" -------- RESCHEDULING ------- ")
+ else:
+ time.sleep(deploy_time)
+ super(Application, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+ def do_start(self):
+ super(Application, self).do_start()
+ time.sleep(run_time)
+ self.ec.schedule("0s", self.stop)
+
+ResourceFactory.register_type(Application)
+ResourceFactory.register_type(Node)
+ResourceFactory.register_type(Interface)
+ResourceFactory.register_type(Link)
+
+class RunnerTestCase(unittest.TestCase):
+ def test_runner_max_runs(self):
+ node_count = 4
+ app_count = 2
+
+ ec = ExperimentController(exp_id = "max-runs-test")
+
+ # Add simulated nodes and applications
+ nodes = list()
+ apps = list()
+ ifaces = list()
+
+ for i in xrange(node_count):
+ node = ec.register_resource("dummy::Node")
+ nodes.append(node)
+
+ iface = ec.register_resource("dummy::Interface")
+ ec.register_connection(node, iface)
+ ifaces.append(iface)
+
+ for i in xrange(app_count):
+ app = ec.register_resource("dummy::Application")
+ ec.register_connection(node, app)
+ apps.append(app)
+
+ link = ec.register_resource("dummy::Link")
+
+ for iface in ifaces:
+ ec.register_connection(link, iface)
+
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec, min_runs = 5, max_runs = 10, wait_guids = apps,
+ wait_time = 0)
+
+ self.assertEquals(runs, 10)
+
+ def test_runner_convergence(self):
+ node_count = 4
+ app_count = 2
+
+ ec = ExperimentController(exp_id = "convergence-test")
+
+ # Add simulated nodes and applications
+ nodes = list()
+ apps = list()
+ ifaces = list()
+
+ for i in xrange(node_count):
+ node = ec.register_resource("dummy::Node")
+ nodes.append(node)
+
+ iface = ec.register_resource("dummy::Interface")
+ ec.register_connection(node, iface)
+ ifaces.append(iface)
+
+ for i in xrange(app_count):
+ app = ec.register_resource("dummy::Application")
+ ec.register_connection(node, app)
+ apps.append(app)
+
+ link = ec.register_resource("dummy::Link")
+
+ for iface in ifaces:
+ ec.register_connection(link, iface)
+
+ samples = [10, 10, 10, 10, 12, 10, 12, 10, 10, 11]
+
+ def compute_metric_callback(samples, ec, run):
+ return samples[run-1]
+
+ metric_callback = functools.partial(compute_metric_callback, samples)
+
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec, min_runs = 5,
+ compute_metric_callback = metric_callback,
+ wait_guids = apps,
+ wait_time = 0)
+
+ self.assertEquals(runs, 10)
+
+if __name__ == '__main__':
+ unittest.main()
+
def create_node(hostname, username = None, identity = None):
ec = DummyEC()
node = LinuxNode(ec, 1)
+
node.set("hostname", hostname)
+
if username:
node.set("username", username)
+
if identity:
node.set("identity", identity)
def skipIfNotAlive(func):
name = func.__name__
def wrapped(*args, **kwargs):
- host = args[1]
- if host != "localhost":
- user = None
+ hostname = args[1]
+ if hostname != "localhost":
+ username = None
identity = None
if len(args) >= 3:
- user = args[2]
+ username = args[2]
if len(args) >= 4:
identity = args[3]
- node, ec = create_node(host, user, identity)
+ node, ec = create_node(hostname, username, identity)
if not node.is_alive():
print "*** WARNING: Skipping test %s: Node %s is not alive\n" % (
def wrapped(*args, **kwargs):
argss = list(args)
argss.pop(0)
+
for i in xrange(len(argss)/2):
username = argss[i*2]
hostname = argss[i*2+1]
return wrapped
+def skipIfAnyNotAliveWithIdentity(func):
+ name = func.__name__
+ def wrapped(*args, **kwargs):
+ argss = list(args)
+ argss.pop(0)
+ for i in xrange(len(argss)/3):
+ username = argss[i*3]
+ hostname = argss[i*3+1]
+ identity = argss[i*3+2]
+
+ node, ec = create_node(hostname, username, identity)
+
+ if not node.is_alive():
+ print "*** WARNING: Skipping test %s: Node %s is not alive\n" % (
+ name, node.get("hostname"))
+ return
+
+ return func(*args, **kwargs)
+
+ return wrapped
+
+
def skipInteractive(func):
name = func.__name__
def wrapped(*args, **kwargs):
ec = ExperimentController(exp_id = "test-stdout")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
cmd = "echo 'HOLA'"
ec.set(app, "command", cmd)
ec.register_connection(app, node)
ec = ExperimentController(exp_id = "test-ping")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
cmd = "ping -c5 %s" % self.target
ec.set(app, "command", cmd)
ec = ExperimentController(exp_id = "tests-code")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
prog = """#include <stdio.h>
cmd = "${RUN_HOME}/hello"
build = "gcc -Wall -x c ${APP_HOME}/code -o hello"
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", cmd)
ec.set(app, "code", prog)
ec.set(app, "depends", "gcc")
ec = ExperimentController(exp_id="test-concurrency")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
apps = list()
for i in xrange(50):
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
cmd = "ping -c5 %s" % self.target
ec.set(app, "command", cmd)
ec.register_connection(app, node)
ec = ExperimentController(exp_id="test-condition")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- server = ec.register_resource("LinuxApplication")
+ server = ec.register_resource("linux::Application")
cmd = "echo 'HOLA' | nc -l 3333"
ec.set(server, "command", cmd)
ec.set(server, "depends", depends)
ec.register_connection(server, node)
- client = ec.register_resource("LinuxApplication")
+ client = ec.register_resource("linux::Application")
cmd = "nc 127.0.0.1 3333"
ec.set(client, "command", cmd)
ec.register_connection(client, node)
ec = ExperimentController(exp_id="test-http-sources")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
sources = "http://yans.pl.sophia.inria.fr/code/nef/archive/tip.tar.gz;" \
"http://yans.pl.sophia.inria.fr/code/nef/raw-file/8ace577d4079/src/nef/images/menu/connect.png"
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "sources", sources)
command = "ls ${SRC}"
ec.register_connection(app, node)
+
ec.deploy()
ec.wait_finished([app])
ec = ExperimentController(exp_id="test-xterm")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", "xterm")
ec.set(app, "depends", "xterm")
ec.set(app, "forwardX11", True)
ec = ExperimentController(exp_id="test-copyfile")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxApplication")
+ app = ec.register_resource("linux::Application")
ec.set(app, "command", "ls ${SRC}")
ec.set(app, "sources", "%s;%s" % (dirpath, f1.name))
ec.register_connection(app, node)
def test_ccnpeek(self):
ec = ExperimentController(exp_id = "test-linux-ccncat")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", self.fedora_host)
ec.set(node, "username", self.fedora_user)
ec.set(node, "identity", self.fedora_identity)
#ec.set(node, "cleanProcesses", True)
- #ec.set(node, "cleanHome", True)
+ #ec.set(node, "cleanExperiment", True)
- ccnd = ec.register_resource("LinuxCCND")
+ ccnd = ec.register_resource("linux::CCND")
ec.register_connection(ccnd, node)
# REPO file is in test/resources/linux/ns3/ccn/repoFile1
"..", "ns3", "ccn", "repoFile1")
## Register a repository in node 1
- ccnr = ec.register_resource("LinuxCCNR")
+ ccnr = ec.register_resource("linux::CCNR")
ec.set(ccnr, "repoFile1", repofile)
ec.register_connection(ccnr, ccnd)
- ccncat = ec.register_resource("LinuxCCNCat")
+ ccncat = ec.register_resource("linux::CCNCat")
ec.set(ccncat, "contentName", "ccnx:/test/bunny.ts")
ec.register_connection(ccncat, ccnd)
def test_ccnpeek(self):
ec = ExperimentController(exp_id = "test-linux-ccnpeek")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", self.fedora_host)
ec.set(node, "username", self.fedora_user)
ec.set(node, "identity", self.fedora_identity)
#ec.set(node, "cleanProcesses", True)
- #ec.set(node, "cleanHome", True)
+ #ec.set(node, "cleanExperiment", True)
- ccnd = ec.register_resource("LinuxCCND")
+ ccnd = ec.register_resource("linux::CCND")
ec.register_connection(ccnd, node)
- peek = ec.register_resource("LinuxCCNPeek")
+ peek = ec.register_resource("linux::CCNPeek")
ec.set(peek, "contentName", "ccnx:/chunk0")
ec.register_connection(peek, ccnd)
- poke = ec.register_resource("LinuxCCNPoke")
+ poke = ec.register_resource("linux::CCNPoke")
ec.set(poke, "contentName", "ccnx:/chunk0")
ec.set(poke, "content", "DATA")
ec.register_connection(poke, ccnd)
ec = ExperimentController(exp_id = "test-ccn-ping-count")
- node1 = ec.register_resource("LinuxNode")
+ node1 = ec.register_resource("linux::Node")
ec.set(node1, "hostname", host1)
ec.set(node1, "username", user1)
- ec.set(node1, "cleanHome", True)
+ ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
- ccnd1 = ec.register_resource("LinuxCCND")
+ ccnd1 = ec.register_resource("linux::CCND")
ec.register_connection(ccnd1, node1)
- entry1 = ec.register_resource("LinuxFIBEntry")
+ entry1 = ec.register_resource("linux::FIBEntry")
ec.set(entry1, "host", host2)
ec.register_connection(entry1, ccnd1)
- server = ec.register_resource("LinuxCCNPingServer")
+ server = ec.register_resource("linux::CCNPingServer")
ec.set(server, "prefix", "ccnx:/test")
ec.register_connection(server, ccnd1)
- node2 = ec.register_resource("LinuxNode")
+ node2 = ec.register_resource("linux::Node")
ec.set(node2, "hostname", host2)
ec.set(node2, "username", user2)
- ec.set(node2, "cleanHome", True)
+ ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
- ccnd2 = ec.register_resource("LinuxCCND")
+ ccnd2 = ec.register_resource("linux::CCND")
ec.register_connection(ccnd2, node2)
- entry2 = ec.register_resource("LinuxFIBEntry")
+ entry2 = ec.register_resource("linux::FIBEntry")
ec.set(entry2, "host", host1)
ec.register_connection(entry2, ccnd2)
- client = ec.register_resource("LinuxCCNPing")
+ client = ec.register_resource("linux::CCNPing")
ec.set(client, "c", 15)
ec.set(client, "prefix", "ccnx:/test")
ec.register_connection(client, ccnd2)
ec = ExperimentController(exp_id = "test-fib-traces")
- node1 = ec.register_resource("LinuxNode")
+ node1 = ec.register_resource("linux::Node")
ec.set(node1, "hostname", host)
ec.set(node1, "username", user)
- #ec.set(node1, "cleanHome", True)
+ ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
- ccnd1 = ec.register_resource("LinuxCCND")
+ ccnd1 = ec.register_resource("linux::CCND")
ec.register_connection(ccnd1, node1)
- entry1 = ec.register_resource("LinuxFIBEntry")
+ entry1 = ec.register_resource("linux::FIBEntry")
ec.set(entry1, "host", self.target)
ec.enable_trace(entry1, "ping")
ec.enable_trace(entry1, "mtr")
ec.register_connection(entry1, ccnd1)
- node2 = ec.register_resource("LinuxNode")
+ node2 = ec.register_resource("linux::Node")
ec.set(node2, "hostname", self.target)
ec.set(node2, "username", self.fedora_user)
- ec.set(node2, "cleanHome", True)
+ ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
- ccnd2 = ec.register_resource("LinuxCCND")
+ ccnd2 = ec.register_resource("linux::CCND")
ec.register_connection(ccnd2, node2)
- entry2 = ec.register_resource("LinuxFIBEntry")
+ entry2 = ec.register_resource("linux::FIBEntry")
ec.set(entry2, "host", host)
ec.register_connection(entry2, ccnd2)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+from test_utils import skipIfAnyNotAliveWithIdentity
+
+import os
+import time
+import unittest
+
+## TODO: VALIDATE THIS TEST!
+
+class LinuxGRETunnelTestCase(unittest.TestCase):
+ def setUp(self):
+ self.host1 = "roseval.pl.sophia.inria.fr"
+ self.host2 = "138.96.118.11"
+ self.user1 = "inria_nepi"
+ self.user2 = "omflab"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.netblock = "192.168.1"
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tap_gre_tunnel(self, user1, host1, identity1, user2, host2,
+ identity2):
+
+ ec = ExperimentController(exp_id = "test-tap-gre-tunnel")
+
+ node1 = ec.register_resource("linux::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tap1 = ec.register_resource("linux::Tap")
+ ec.set(tap1, "ip", "%s.1" % self.netblock)
+ ec.set(tap1, "prefix", "32")
+ ec.register_connection(tap1, node1)
+
+ node2 = ec.register_resource("linux::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tap2 = ec.register_resource("linux::Tap")
+ ec.set(tap2, "ip", "%s.2" % self.netblock)
+ ec.set(tap2, "prefix", "32")
+ ec.register_connection(tap2, node2)
+
+ gretun = ec.register_resource("linux::GRETunnel")
+ ec.register_connection(tap1, gretun)
+ ec.register_connection(tap2, gretun)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, 'stdout')
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tap1, "deviceName")
+ self.assertTrue(if_name.startswith("tap"))
+
+ if_name = ec.get(tap2, "deviceName")
+ self.assertTrue(if_name.startswith("tap"))
+
+ ec.shutdown()
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tun_gre_tunnel(self, user1, host1, identity1, user2, host2,
+ identity2):
+
+ ec = ExperimentController(exp_id = "test-tun-gre-tunnel")
+
+ node1 = ec.register_resource("linux::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tun1 = ec.register_resource("linux::Tun")
+ ec.set(tun1, "ip", "%s.1" % self.netblock)
+ ec.set(tun1, "prefix", "32")
+ ec.register_connection(tun1, node1)
+
+ node2 = ec.register_resource("linux::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tun2 = ec.register_resource("linux::Tun")
+ ec.set(tun2, "ip", "%s.2" % self.netblock)
+ ec.set(tun2, "prefix", "32")
+ ec.register_connection(tun2, node2)
+
+ udptun = ec.register_resource("linux::GRETunnel")
+ ec.register_connection(tun1, udptun)
+ ec.register_connection(tun2, udptun)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, 'stdout')
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tun1, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ if_name = ec.get(tun2, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ ec.shutdown()
+
+ def test_tap_gre_tunnel(self):
+ self.t_tap_gre_tunnel(self.user1, self.host1, self.identity,
+ self.user2, self.host2, self.identity)
+
+ def test_tun_gre_tunnel(self):
+ self.t_tun_gre_tunnel(self.user1, self.host1, self.identity,
+ self.user2, self.host2, self.identity)
+
+if __name__ == '__main__':
+ unittest.main()
+
ec = ExperimentController()
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- iface = ec.register_resource("LinuxInterface")
- chan = ec.register_resource("LinuxChannel")
+ iface = ec.register_resource("linux::Interface")
+ chan = ec.register_resource("linux::Channel")
ec.register_connection(iface, node)
ec.register_connection(iface, chan)
ec = ExperimentController(exp_id = "test-nodns")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxMtr")
+ app = ec.register_resource("linux::Mtr")
ec.set(app, "noDns", False)
ec.set(app, "target", self.target)
ec.register_connection(app, node)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.runner import ExperimentRunner
+
+from test_utils import skipIfNotAlive, skipInteractive
+
+import functools
+import glob
+import os
+import re
+import shutil
+import time
+import tempfile
+import unittest
+
+_ping_re = re.compile("[^/]+rtt min/avg/max/mdev = (?P<min>\d\.\d+)/(?P<avg>\d\.\d+)/(?P<max>\d\.\d+)/(?P<mdev>\d\.\d+)[^/]+", re.MULTILINE)
+
+class LinuxMultiRunTestCase(unittest.TestCase):
+ def setUp(self):
+ self.fedora_host = "nepi2.pl.sophia.inria.fr"
+ self.fedora_user = "inria_nepi"
+
+ self.ubuntu_host = "roseval.pl.sophia.inria.fr"
+ self.ubuntu_user = "inria_nepi"
+
+ self.target = "nepi5.pl.sophia.inria.fr"
+
+ @skipIfNotAlive
+ def t_simple_multirun(self, host, user, depends):
+
+ dirpath = tempfile.mkdtemp()
+
+ ec = ExperimentController(exp_id = "test-condition-multirun",
+ local_dir = dirpath)
+
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "cleanExperiment", True)
+ ec.set(node, "cleanProcesses", True)
+
+ ping = ec.register_resource("linux::Application")
+ ec.set(ping, "command", "ping -c10 nepi.inria.fr")
+ ec.register_connection(ping, node)
+
+ collector = ec.register_resource("Collector")
+ ec.set(collector, "traceName", "stdout")
+ ec.register_connection(ping, collector)
+
+ def compute_metric_callback(ping, ec, run):
+ stdout = ec.trace(ping, "stdout")
+
+ m = _ping_re.match(stdout)
+ if not m:
+ return None
+
+ return float(m.groupdict()["min"])
+
+ metric_callback = functools.partial(compute_metric_callback, ping)
+
+ rnr = ExperimentRunner()
+ runs = rnr.run(ec, min_runs = 5,
+ compute_metric_callback = metric_callback,
+ wait_guids = [ping],
+ wait_time = 0)
+
+ self.assertTrue(runs >= 5)
+
+ dircount = 0
+
+ for d in os.listdir(ec.exp_dir):
+ path = os.path.join(ec.exp_dir, d)
+ if os.path.isdir(path):
+ dircount += 1
+ logs = glob.glob(os.path.join(path, "*.stdout"))
+ self.assertEquals(len(logs), 1)
+
+ self.assertEquals(runs, dircount)
+
+ shutil.rmtree(dirpath)
+
+ def test_simple_multirun_fedora(self):
+ self.t_simple_multirun(self.fedora_host, self.fedora_user, "nc")
+
+ def test_simple_multirun_ubuntu(self):
+ self.t_simple_multirun(self.ubuntu_host, self.ubuntu_user, "netcat")
+
+if __name__ == '__main__':
+ unittest.main()
+
ec = ExperimentController(exp_id = "test-nping")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxNPing")
+ app = ec.register_resource("linux::NPing")
ec.set(app, "c", 1)
ec.set(app, "tcp", True)
ec.set(app, "p", 80)
def t_dce_ccn(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-dce-ccn-app")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
- ec.set(simu, "verbose", True)
- ec.set(simu, "buildMode", "debug")
- ec.set(simu, "nsLog", "DceApplication")
+ simu = ec.register_resource("linux::ns3::Simulation")
+ #ec.set(simu, "verbose", True)
+ #ec.set(simu, "buildMode", "debug")
+ #ec.set(simu, "nsLog", "DceApplication")
ec.register_connection(simu, node)
nsnode1 = add_ns3_node(ec, simu)
ec.register_connection(chan, p2p2)
### create applications
- ccnd1 = ec.register_resource("ns3::LinuxDceCCND")
+ ccnd1 = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd1, "stackSize", 1<<20)
ec.set (ccnd1, "debug", 7)
ec.set (ccnd1, "capacity", 50000)
os.path.dirname(os.path.realpath(__file__)),
"repoFile1")
- ccnr = ec.register_resource("ns3::LinuxDceCCNR")
+ ccnr = ec.register_resource("linux::ns3::dce::CCNR")
ec.set (ccnr, "repoFile1", repofile)
ec.set (ccnr, "stackSize", 1<<20)
ec.set (ccnr, "StartTime", "2s")
ec.set (ccnr, "StopTime", "120s")
ec.register_connection(ccnr, nsnode1)
- ccndc1 = ec.register_resource("ns3::LinuxDceFIBEntry")
+ ccndc1 = ec.register_resource("linux::ns3::dce::FIBEntry")
ec.set (ccndc1, "protocol", "udp")
ec.set (ccndc1, "uri", "ccnx:/")
ec.set (ccndc1, "host", "10.0.0.2")
ec.set (ccndc1, "StopTime", "120s")
ec.register_connection(ccndc1, nsnode1)
- ccnd2 = ec.register_resource("ns3::LinuxDceCCND")
+ ccnd2 = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd2, "stackSize", 1<<20)
ec.set (ccnd2, "debug", 7)
ec.set (ccnd2, "capacity", 50000)
ec.set (ccnd2, "StopTime", "20s")
ec.register_connection(ccnd2, nsnode2)
- ccndc2 = ec.register_resource("ns3::LinuxDceFIBEntry")
+ ccndc2 = ec.register_resource("linux::ns3::dce::FIBEntry")
ec.set (ccndc2, "protocol", "udp")
ec.set (ccndc2, "uri", "ccnx:/")
ec.set (ccndc2, "host", "10.0.0.1")
ec.set (ccndc2, "StopTime", "120s")
ec.register_connection(ccndc2, nsnode2)
- ccnpeek = ec.register_resource("ns3::LinuxDceCCNPeek")
+ ccnpeek = ec.register_resource("linux::ns3::dce::CCNPeek")
ec.set (ccnpeek, "contentName", "ccnx:/test/bunny.ts")
ec.set (ccnpeek, "stackSize", 1<<20)
ec.set (ccnpeek, "StartTime", "4s")
ec.set (ccnpeek, "StopTime", "120s")
ec.register_connection(ccnpeek, nsnode2)
- ccncat = ec.register_resource("ns3::LinuxDceCCNCat")
+ ccncat = ec.register_resource("linux::ns3::dce::CCNCat")
ec.set (ccncat, "contentName", "ccnx:/test/bunny.ts")
ec.set (ccncat, "stackSize", 1<<20)
ec.set (ccncat, "StartTime", "4s")
def t_dce_ccnpeek(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-dce-ccnpeek")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "nsLog", "DceApplication")
ec.register_connection(simu, node)
nsnode = add_ns3_node(ec, simu)
### create applications
- ccnd = ec.register_resource("ns3::LinuxDceCCND")
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd, "stackSize", 1<<20)
ec.set (ccnd, "StartTime", "1s")
ec.register_connection(ccnd, nsnode)
- ccnpoke = ec.register_resource("ns3::LinuxDceCCNPoke")
+ ccnpoke = ec.register_resource("linux::ns3::dce::CCNPoke")
ec.set (ccnpoke, "contentName", "ccnx:/chunk0")
ec.set (ccnpoke, "content", "DATA")
ec.set (ccnpoke, "stackSize", 1<<20)
ec.set (ccnpoke, "StartTime", "2s")
ec.register_connection(ccnpoke, nsnode)
- ccnpeek = ec.register_resource("ns3::LinuxDceCCNPeek")
+ ccnpeek = ec.register_resource("linux::ns3::dce::CCNPeek")
ec.set (ccnpeek, "contentName", "ccnx:/chunk0")
ec.set (ccnpeek, "stackSize", 1<<20)
ec.set (ccnpeek, "StartTime", "4s")
def t_dce_ccnpeek_with_stack(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-dce-peek-lostack")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
ec.register_connection(nsnode, simu)
### create applications
- ccnd = ec.register_resource("ns3::LinuxDceCCND")
+ ccnd = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd, "stackSize", 1<<20)
ec.set (ccnd, "StartTime", "1s")
ec.register_connection(ccnd, nsnode)
- ccnpoke = ec.register_resource("ns3::LinuxDceCCNPoke")
+ ccnpoke = ec.register_resource("linux::ns3::dce::CCNPoke")
ec.set (ccnpoke, "contentName", "ccnx:/chunk0")
ec.set (ccnpoke, "content", "DATA")
ec.set (ccnpoke, "stackSize", 1<<20)
ec.set (ccnpoke, "StartTime", "2s")
ec.register_connection(ccnpoke, nsnode)
- ccnpeek = ec.register_resource("ns3::LinuxDceCCNPeek")
+ ccnpeek = ec.register_resource("linux::ns3::dce::CCNPeek")
ec.set (ccnpeek, "contentName", "ccnx:/chunk0")
ec.set (ccnpeek, "stackSize", 1<<20)
ec.set (ccnpeek, "StartTime", "4s")
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceState, ResourceAction
+from nepi.execution.trace import TraceAttr
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import unittest
+
+def add_ns3_node(ec, simu):
+ node = ec.register_resource("ns3::Node")
+ ec.register_connection(node, simu)
+
+ ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ ec.register_connection(node, ipv4)
+
+ arp = ec.register_resource("ns3::ArpL3Protocol")
+ ec.register_connection(node, arp)
+
+ icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ ec.register_connection(node, icmp)
+
+ udp = ec.register_resource("ns3::UdpL4Protocol")
+ ec.register_connection(node, udp)
+
+ return node
+
+def add_fd_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::FdNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ return dev
+
+def add_tap_device(ec, node, ip, prefix):
+ dev = ec.register_resource("linux::Tap")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ return dev
+
+def add_point2point_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::PointToPointNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+class LinuxNS3FdNetDeviceTest(unittest.TestCase):
+ def setUp(self):
+ self.fedora_host = "nepi2.pl.sophia.inria.fr"
+ self.fedora_user = "inria_nepi"
+ self.fedora_identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+
+ @skipIfNotAlive
+ def t_cross_ping(self, host, user = None, identity = None):
+ ec = ExperimentController(exp_id = "test-linux-ns3-tap-fd")
+
+ node = ec.register_resource("linux::Node")
+ if host == "localhost":
+ ec.set(node, "hostname", "localhost")
+ else:
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanProcesses", True)
+ ec.set(node, "cleanExperiment", True)
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.set(simu, "simulatorImplementationType", "ns3::RealtimeSimulatorImpl")
+ ec.set(simu, "checksumEnabled", True)
+ ec.set(simu, "verbose", True)
+ #ec.set(simu, "buildMode", "debug")
+ #ec.set(simu, "nsLog", "FdNetDevice")
+ ec.register_connection(simu, node)
+
+ nsnode1 = add_ns3_node(ec, simu)
+ dev1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
+
+ nsnode2 = add_ns3_node(ec, simu)
+ dev2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
+
+ # Add routes on the NS3 side
+ r1 = ec.register_resource("ns3::Route")
+ ec.set(r1, "network", "10.0.1.0")
+ ec.set(r1, "prefix", "30")
+ ec.set(r1, "nexthop", "10.0.0.1")
+ ec.register_connection(r1, nsnode2)
+
+ # Create channel
+ chan = ec.register_resource("ns3::PointToPointChannel")
+ ec.set(chan, "Delay", "0s")
+ ec.register_connection(chan, dev1)
+ ec.register_connection(chan, dev2)
+
+ fddev = add_fd_device(ec, nsnode1, "10.0.1.2", "30")
+ ec.enable_trace(fddev, "pcap")
+ ec.enable_trace(fddev, "promiscPcap")
+ ec.enable_trace(fddev, "ascii")
+
+ tap = add_tap_device(ec, node, "10.0.1.1", "30")
+
+ crosslink = ec.register_resource("linux::ns3::TapFdLink")
+ ec.register_connection(crosslink, tap)
+ ec.register_connection(crosslink, fddev)
+
+ # Add routes on the localhost side
+ r2 = ec.register_resource("linux::Route")
+ ec.set(r2, "network", "10.0.0.0")
+ ec.set(r2, "prefix", "30")
+ ec.set(r2, "nexthop", "10.0.1.2")
+ ec.register_connection(r2, tap)
+
+ app = ec.register_resource("linux::Application")
+ ec.set(app, "command", "ping -c3 10.0.0.1")
+ ec.register_connection(app, node)
+
+ ec.register_condition(app, ResourceAction.START, simu,
+ ResourceState.STARTED, time="5s")
+
+ ec.deploy()
+
+ ec.wait_finished([app])
+
+ stdout = ec.trace(app, "stdout")
+ expected = "3 packets transmitted, 3 received, 0% packet loss"
+ self.assertTrue(stdout.find(expected) > -1)
+
+ ## Releasing to force ns3 to flush the traces
+ ec.release()
+ pcap = ec.trace(fddev, "pcap")
+
+ self.assertTrue(len(pcap) > 4000)
+ ec.shutdown()
+
+ def ztest_cross_ping_fedora(self):
+ self.t_cross_ping(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+ def test_cross_ping_local(self):
+ self.t_cross_ping("localhost")
+
+
+if __name__ == '__main__':
+ unittest.main()
+
self.fedora_user = "inria_nepi"
self.fedora_identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.ubuntu_host = "roseval.pl.sophia.inria.fr"
+ self.ubuntu_user = "inria_nepi"
+ self.ubuntu_identity = "%s/.ssh/id_rsa" % (os.environ['HOME'])
+
@skipIfNotAlive
def t_dce_ping(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-dce-ping")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "identity", identity)
ec.set(node, "cleanProcesses", True)
- #ec.set(node, "cleanHome", True)
+ #ec.set(node, "cleanExperiment", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "buildMode", "debug")
ec.set(simu, "nsLog", "DceApplication")
ec.register_connection(chan, p2p2)
### create applications
- ping = ec.register_resource("ns3::LinuxDceApplication")
+ ping = ec.register_resource("linux::ns3::dce::Application")
ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
"cd iputils-s20101006/ && "
def t_dce_ccn(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-dce-ccn")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
#ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "buildMode", "debug")
ec.set(simu, "nsLog", "DceApplication")
ec.register_connection(chan, p2p2)
### create applications
- ccnd1 = ec.register_resource("ns3::LinuxCCNDceApplication")
-
- # NOTE THAT INSTALLATION MIGHT FAIL IF openjdk-6-jdk is not installed
- ec.set(ccnd1, "depends", "libpcap0.8-dev openjdk-6-jdk ant1.8 autoconf "
- "libssl-dev libexpat-dev libpcap-dev libecryptfs0 libxml2-utils auto"
- "make gawk gcc g++ git-core pkg-config libpcre3-dev openjdk-6-jre-lib")
+ ccnd1 = ec.register_resource("linux::ns3::dce::CCNApplication")
+
+ if host == self.fedora_host:
+ depends = ( " autoconf openssl-devel expat-devel libpcap-devel "
+ " ecryptfs-utils-devel libxml2-devel automake gawk "
+ " gcc gcc-c++ git pcre-devel make ")
+ else: # UBUNTU
+ # NOTE THAT INSTALLATION MIGHT FAIL IF openjdk-6-jdk is not installed
+ depends = ( "libpcap0.8-dev openjdk-6-jdk ant1.8 autoconf "
+ "libssl-dev libexpat-dev libpcap-dev libecryptfs0 libxml2-utils auto"
+ "make gawk gcc g++ git-core pkg-config libpcre3-dev openjdk-6-jre-lib")
+
+ ec.set (ccnd1, "depends", depends)
ec.set (ccnd1, "sources", "http://www.ccnx.org/releases/ccnx-0.7.2.tar.gz")
ec.set (ccnd1, "build", "tar zxf ${SRC}/ccnx-0.7.2.tar.gz && "
"cd ccnx-0.7.2 && "
os.path.dirname(os.path.realpath(__file__)), "ccn",
"repoFile1")
- ccnr = ec.register_resource("ns3::LinuxCCNDceApplication")
+ ccnr = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccnr, "binary", "ccnr")
ec.set (ccnr, "environment", "CCNR_DIRECTORY=/REPO/")
ec.set (ccnr, "files", "%s=/REPO/repoFile1" % repofile)
ec.set (ccnr, "StopTime", "120s")
ec.register_connection(ccnr, nsnode1)
- ccndc1 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ ccndc1 = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccndc1, "binary", "ccndc")
ec.set (ccndc1, "arguments", "-v;add;ccnx:/;udp;10.0.0.2")
ec.set (ccndc1, "stackSize", 1<<20)
ec.set (ccndc1, "StopTime", "120s")
ec.register_connection(ccndc1, nsnode1)
- ccnd2 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ ccnd2 = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccnd2, "binary", "ccnd")
ec.set (ccnd2, "stackSize", 1<<20)
ec.set (ccnd2, "environment", "CCND_CAP=50000; CCND_DEBUG=7")
ec.set (ccnd2, "StopTime", "120s")
ec.register_connection(ccnd2, nsnode2)
- ccndc2 = ec.register_resource("ns3::LinuxCCNDceApplication")
+ ccndc2 = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccndc2, "binary", "ccndc")
ec.set (ccndc2, "arguments", "-v;add;ccnx:/;udp;10.0.0.1")
ec.set (ccndc2, "stackSize", 1<<20)
ec.set (ccndc2, "StopTime", "120s")
ec.register_connection(ccndc2, nsnode2)
- ccnpeek = ec.register_resource("ns3::LinuxCCNDceApplication")
+ ccnpeek = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccnpeek, "binary", "ccnpeek")
ec.set (ccnpeek, "arguments", "ccnx:/test/bunny.ts")
ec.set (ccnpeek, "stdinFile", "")
ec.set (ccnpeek, "StopTime", "120s")
ec.register_connection(ccnpeek, nsnode2)
- ccncat = ec.register_resource("ns3::LinuxCCNDceApplication")
+ ccncat = ec.register_resource("linux::ns3::dce::CCNApplication")
ec.set (ccncat, "binary", "ccncat")
ec.set (ccncat, "arguments", "ccnx:/test/bunny.ts")
ec.set (ccncat, "stdinFile", "")
ec.shutdown()
def test_dce_ping_fedora(self):
- self.t_dce_ping(self.fedora_host, self.fedora_user, self.fedora_identity)
+ self.t_dce_ping(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+ def test_dce_ping_ubuntu(self):
+ self.t_dce_ping(self.ubuntu_host, self.ubuntu_user, self.ubuntu_identity)
def test_dce_ping_local(self):
self.t_dce_ping("localhost")
def test_dce_ccn_fedora(self):
self.t_dce_ccn(self.fedora_host, self.fedora_user, self.fedora_identity)
+ def test_dce_ccn_ubuntu(self):
+ self.t_dce_ccn(self.ubuntu_host, self.ubuntu_user, self.ubuntu_identity)
+
def test_dce_ccn_local(self):
self.t_dce_ccn("localhost")
def t_dce_ping(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-dce-ping-app")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "buildMode", "debug")
ec.set(simu, "nsLog", "DceApplication")
ec.register_connection(chan, p2p2)
### create applications
- ping = ec.register_resource("ns3::LinuxDcePing")
+ ping = ec.register_resource("linux::ns3::dce::Ping")
ec.set (ping, "stackSize", 1<<20)
ec.set (ping, "target", "10.0.0.2")
ec.set (ping, "count", "10")
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.trace import TraceAttr
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import unittest
+
+def add_ns3_node(ec, simu):
+ node = ec.register_resource("ns3::Node")
+ ec.register_connection(node, simu)
+
+ ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ ec.register_connection(node, ipv4)
+
+ arp = ec.register_resource("ns3::ArpL3Protocol")
+ ec.register_connection(node, arp)
+
+ icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ ec.register_connection(node, icmp)
+
+ udp = ec.register_resource("ns3::UdpL4Protocol")
+ ec.register_connection(node, udp)
+
+ return node
+
+def add_fd_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::FdNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ return dev
+
+class LinuxNS3FdNetDeviceTest(unittest.TestCase):
+ def setUp(self):
+ self.fedora_host = "nepi2.pl.sophia.inria.fr"
+ self.fedora_user = "inria_nepi"
+ self.fedora_identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+
+ @skipIfNotAlive
+ def t_dummy(self, host, user = None, identity = None):
+ ec = ExperimentController(exp_id = "test-ns3-fd-dummy")
+
+ node = ec.register_resource("linux::Node")
+ if host == "localhost":
+ ec.set(node, "hostname", "localhost")
+ else:
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanProcesses", True)
+ #ec.set(node, "cleanExperiment", True)
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.set(simu, "simulatorImplementationType", "ns3::RealtimeSimulatorImpl")
+ ec.set(simu, "checksumEnabled", True)
+ ec.set(simu, "verbose", True)
+ ec.register_connection(simu, node)
+
+ nsnode1 = add_ns3_node(ec, simu)
+ dev1 = add_fd_device(ec, nsnode1, "10.0.0.1", "30")
+
+ nsnode2 = add_ns3_node(ec, simu)
+ dev2 = add_fd_device(ec, nsnode2, "10.0.0.2", "30")
+
+ channel = ec.register_resource("ns3::PipeChannel")
+ ec.register_connection(channel, dev1)
+ ec.register_connection(channel, dev2)
+
+ ### create pinger
+ ping = ec.register_resource("ns3::V4Ping")
+ ec.set (ping, "Remote", "10.0.0.2")
+ ec.set (ping, "Interval", "1s")
+ ec.set (ping, "Verbose", True)
+ ec.set (ping, "StartTime", "0s")
+ ec.set (ping, "StopTime", "20s")
+ ec.register_connection(ping, nsnode1)
+
+ ec.deploy()
+
+ ec.wait_finished([ping])
+
+ stdout = ec.trace(simu, "stdout")
+
+ print stdout
+
+ expected = "20 packets transmitted, 20 received, 0% packet loss"
+ self.assertTrue(stdout.find(expected) > -1)
+
+ ec.shutdown()
+
+ def ztest_dummy_fedora(self):
+ self.t_dummy(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+ def test_dummy_local(self):
+ self.t_dummy("localhost")
+
+
+if __name__ == '__main__':
+ unittest.main()
+
def t_p2p_ping(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-p2p-ping")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
def t_csma_ping(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-csma-ping")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
def t_user_sources(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-user-sources")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
sources = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"ns-3.18-user.tar.gz")
def t_compile_debug_mode(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-debug-mode")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "nsLog", "V4Ping:Node")
ec.set(simu, "buildMode", "debug")
def t_real_time(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-real-time")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "simulatorImplementationType", "ns3::RealtimeSimulatorImpl")
ec.set(simu, "checksumEnabled", True)
ec.set(simu, "verbose", True)
def t_traces(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-traces")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
ec = ExperimentController(exp_id = "test-ns3-wifi-ping")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
"""
ec = ExperimentController(exp_id = "test-ns3-routes")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.register_connection(simu, node)
"""
ec = ExperimentController(exp_id = "test-ns3-routing")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", "localhost")
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "populateRoutingTables", True)
ec.register_connection(simu, node)
def t_dce(self, host, user = None, identity = None):
ec = ExperimentController(exp_id = "test-ns3-dce")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
if host == "localhost":
ec.set(node, "hostname", host)
else:
ec.set(node, "cleanProcesses", True)
#ec.set(node, "cleanHome", True)
- simu = ec.register_resource("LinuxNS3Simulation")
+ simu = ec.register_resource("linux::ns3::Simulation")
ec.set(simu, "verbose", True)
ec.set(simu, "buildMode", "debug")
ec.set(simu, "nsLog", "DceApplication")
ec.register_connection(chan, p2p2)
### create applications
- udp_perf = ec.register_resource("ns3::DceApplication")
+ udp_perf = ec.register_resource("linux::ns3::dce::Application")
ec.set (udp_perf, "binary", "udp-perf")
ec.set (udp_perf, "stackSize", 1<<20)
ec.set (udp_perf, "arguments", "--duration=10;--nodes=2")
ec.set (udp_perf, "StopTime", "20s")
ec.register_connection(udp_perf, nsnode1)
- udp_perf_client = ec.register_resource("ns3::DceApplication")
+ udp_perf_client = ec.register_resource("linux::ns3::dce::Application")
ec.set (udp_perf_client, "binary", "udp-perf")
ec.set (udp_perf_client, "stackSize", 1<<20)
ec.set (udp_perf_client, "arguments", "--client;--nodes=2;--host=10.0.0.1;--duration=10")
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.trace import TraceAttr
+
+from test_utils import skipIfNotAlive
+
+import os
+import shutil
+import time
+import tempfile
+import unittest
+
+def add_ns3_node(ec, simu):
+ node = ec.register_resource("ns3::Node")
+ ec.register_connection(node, simu)
+
+ ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ ec.register_connection(node, ipv4)
+
+ arp = ec.register_resource("ns3::ArpL3Protocol")
+ ec.register_connection(node, arp)
+
+ icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ ec.register_connection(node, icmp)
+
+ udp = ec.register_resource("ns3::UdpL4Protocol")
+ ec.register_connection(node, udp)
+
+ return node
+
+def add_point2point_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::PointToPointNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+def add_csma_device(ec, node, ip, prefix):
+ dev = ec.register_resource("ns3::CsmaNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ queue = ec.register_resource("ns3::DropTailQueue")
+ ec.register_connection(dev, queue)
+
+ return dev
+
+def add_wifi_device(ec, node, ip, prefix,
+ access_point = False):
+ dev = ec.register_resource("ns3::WifiNetDevice")
+ ec.set(dev, "ip", ip)
+ ec.set(dev, "prefix", prefix)
+ ec.register_connection(node, dev)
+
+ phy = ec.register_resource("ns3::YansWifiPhy")
+ ec.set(phy, "Standard", "WIFI_PHY_STANDARD_80211a")
+ ec.register_connection(dev, phy)
+
+ error = ec.register_resource("ns3::NistErrorRateModel")
+ ec.register_connection(phy, error)
+
+ manager = ec.register_resource("ns3::ArfWifiManager")
+ ec.register_connection(dev, manager)
+
+ if access_point:
+ mac = ec.register_resource("ns3::ApWifiMac")
+ else:
+ mac = ec.register_resource("ns3::StaWifiMac")
+
+ ec.set(mac, "Standard", "WIFI_PHY_STANDARD_80211a")
+ ec.register_connection(dev, mac)
+
+ return dev, phy
+
+def add_random_mobility(ec, node, x, y, z, speed, bounds_width,
+ bounds_height):
+ position = "%d:%d:%d" % (x, y, z)
+ bounds = "0|%d|0|%d" % (bounds_width, bounds_height)
+ speed = "ns3::UniformRandomVariable[Min=%d|Max=%s]" % (speed, speed)
+ pause = "ns3::ConstantRandomVariable[Constant=1.0]"
+
+ mobility = ec.register_resource("ns3::RandomDirection2dMobilityModel")
+ ec.set(mobility, "Position", position)
+ ec.set(mobility, "Bounds", bounds)
+ ec.set(mobility, "Speed", speed)
+ ec.set(mobility, "Pause", pause)
+ ec.register_connection(node, mobility)
+ return mobility
+
+def add_constant_mobility(ec, node, x, y, z):
+ mobility = ec.register_resource("ns3::ConstantPositionMobilityModel")
+ position = "%d:%d:%d" % (x, y, z)
+ ec.set(mobility, "Position", position)
+ ec.register_connection(node, mobility)
+ return mobility
+
+def add_wifi_channel(ec):
+ channel = ec.register_resource("ns3::YansWifiChannel")
+ delay = ec.register_resource("ns3::ConstantSpeedPropagationDelayModel")
+ ec.register_connection(channel, delay)
+
+ loss = ec.register_resource("ns3::LogDistancePropagationLossModel")
+ ec.register_connection(channel, loss)
+
+ return channel
+
+class LinuxNS3SimulationSerializationTest(unittest.TestCase):
+ def setUp(self):
+ #self.fedora_host = "nepi2.pl.sophia.inria.fr"
+ self.fedora_host = "planetlab1.informatik.uni-erlangen.de"
+ self.fedora_user = "inria_nepi"
+ self.fedora_identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+
+ @skipIfNotAlive
+ def t_wifi_serialize(self, host, user = None, identity = None):
+ bounds_width = bounds_height = 200
+ x = y = 100
+ speed = 1
+
+ dirpath = tempfile.mkdtemp()
+
+ ec = ExperimentController(exp_id = "test-ns3-wifi-ping")
+
+ node = ec.register_resource("linux::Node")
+ if host == "localhost":
+ ec.set(node, "hostname", "localhost")
+ else:
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanProcesses", True)
+ #ec.set(node, "cleanHome", True)
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.set(simu, "verbose", True)
+ ec.register_connection(simu, node)
+
+ nsnode1 = add_ns3_node(ec, simu)
+ dev1, phy1 = add_wifi_device(ec, nsnode1, "10.0.0.1", "24", access_point = True)
+ mobility1 = add_constant_mobility(ec, nsnode1, x, y, 0)
+
+ nsnode2 = add_ns3_node(ec, simu)
+ dev2, phy2 = add_wifi_device(ec, nsnode2, "10.0.0.2", "24", access_point = False)
+ mobility1 = add_constant_mobility(ec, nsnode2, x, y, 0)
+ #mobility2 = add_random_mobility(ec, nsnode2, x, y, 0, speed, bounds_width, bounds_height)
+
+ # Create channel
+ chan = add_wifi_channel(ec)
+ ec.register_connection(chan, phy1)
+ ec.register_connection(chan, phy2)
+
+ ### create pinger
+ ping = ec.register_resource("ns3::V4Ping")
+ ec.set (ping, "Remote", "10.0.0.1")
+ ec.set (ping, "Interval", "1s")
+ ec.set (ping, "Verbose", True)
+ ec.set (ping, "StartTime", "1s")
+ ec.set (ping, "StopTime", "21s")
+ ec.register_connection(ping, nsnode2)
+
+ filepath = ec.save(dirpath)
+ print filepath
+
+ ec.deploy()
+
+ ec.wait_finished([ping])
+
+ stdout = ec.trace(simu, "stdout")
+
+ expected = "20 packets transmitted, 20 received, 0% packet loss"
+ self.assertTrue(stdout.find(expected) > -1)
+
+ ec.shutdown()
+
+ # Load serialized experiment
+ ec2 = ExperimentController.load(filepath)
+
+ ec2.deploy()
+
+ ec2.wait_finished([ping])
+
+ self.assertEquals(len(ec.resources), len(ec2.resources))
+
+ stdout = ec2.trace(simu, "stdout")
+
+ expected = "20 packets transmitted, 20 received, 0% packet loss"
+ self.assertTrue(stdout.find(expected) > -1)
+
+ ec2.shutdown()
+
+ shutil.rmtree(dirpath)
+
+ @skipIfNotAlive
+ def t_routing_serialize(self, host, user = None, identity = None):
+ """
+ network topology:
+ n4
+ |
+ n1 -- p2p -- n2 -- csma -- n5 -- p2p -- n6
+ | |
+ ping n6 n3
+
+
+ """
+ dirpath = tempfile.mkdtemp()
+
+ ec = ExperimentController(exp_id = "test-ns3-routes")
+
+ node = ec.register_resource("linux::Node")
+ if host == "localhost":
+ ec.set(node, "hostname", host)
+ else:
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanProcesses", True)
+ #ec.set(node, "cleanHome", True)
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.set(simu, "verbose", True)
+ ec.register_connection(simu, node)
+
+ nsnode1 = add_ns3_node(ec, simu)
+ p2p12 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
+
+ nsnode2 = add_ns3_node(ec, simu)
+ p2p21 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
+ csma2 = add_csma_device(ec, nsnode2, "10.0.1.1", "24")
+
+ nsnode3 = add_ns3_node(ec, simu)
+ csma3 = add_csma_device(ec, nsnode3, "10.0.1.2", "24")
+
+ nsnode4 = add_ns3_node(ec, simu)
+ csma4 = add_csma_device(ec, nsnode4, "10.0.1.3", "24")
+
+ nsnode5 = add_ns3_node(ec, simu)
+ p2p56 = add_point2point_device(ec, nsnode5, "10.0.2.1", "30")
+ csma5 = add_csma_device(ec, nsnode5, "10.0.1.4", "24")
+
+ nsnode6 = add_ns3_node(ec, simu)
+ p2p65 = add_point2point_device(ec, nsnode6, "10.0.2.2", "30")
+
+ # P2P chan1
+ p2p_chan1 = ec.register_resource("ns3::PointToPointChannel")
+ ec.set(p2p_chan1, "Delay", "0s")
+ ec.register_connection(p2p_chan1, p2p12)
+ ec.register_connection(p2p_chan1, p2p21)
+
+ # CSMA chan
+ csma_chan = ec.register_resource("ns3::CsmaChannel")
+ ec.set(csma_chan, "Delay", "0s")
+ ec.register_connection(csma_chan, csma2)
+ ec.register_connection(csma_chan, csma3)
+ ec.register_connection(csma_chan, csma4)
+ ec.register_connection(csma_chan, csma5)
+
+ # P2P chan2
+ p2p_chan2 = ec.register_resource("ns3::PointToPointChannel")
+ ec.set(p2p_chan2, "Delay", "0s")
+ ec.register_connection(p2p_chan2, p2p56)
+ ec.register_connection(p2p_chan2, p2p65)
+
+ # Add routes - n1 - n6
+ r1 = ec.register_resource("ns3::Route")
+ ec.set(r1, "network", "10.0.2.0")
+ ec.set(r1, "prefix", "30")
+ ec.set(r1, "nexthop", "10.0.0.2")
+ ec.register_connection(r1, nsnode1)
+
+ # Add routes - n2 - n6
+ r2 = ec.register_resource("ns3::Route")
+ ec.set(r2, "network", "10.0.2.0")
+ ec.set(r2, "prefix", "30")
+ ec.set(r2, "nexthop", "10.0.1.4")
+ ec.register_connection(r2, nsnode2)
+
+ # Add routes - n5 - n1
+ r5 = ec.register_resource("ns3::Route")
+ ec.set(r5, "network", "10.0.0.0")
+ ec.set(r5, "prefix", "30")
+ ec.set(r5, "nexthop", "10.0.1.1")
+ ec.register_connection(r5, nsnode5)
+
+ # Add routes - n6 - n1
+ r6 = ec.register_resource("ns3::Route")
+ ec.set(r6, "network", "10.0.0.0")
+ ec.set(r6, "prefix", "30")
+ ec.set(r6, "nexthop", "10.0.2.1")
+ ec.register_connection(r6, nsnode6)
+
+ ### create pinger
+ ping = ec.register_resource("ns3::V4Ping")
+ ec.set (ping, "Remote", "10.0.2.2")
+ ec.set (ping, "Interval", "1s")
+ ec.set (ping, "Verbose", True)
+ ec.set (ping, "StartTime", "1s")
+ ec.set (ping, "StopTime", "21s")
+ ec.register_connection(ping, nsnode1)
+
+ filepath = ec.save(dirpath)
+ print filepath
+
+ ec.deploy()
+
+ ec.wait_finished([ping])
+
+ stdout = ec.trace(simu, "stdout")
+
+ expected = "20 packets transmitted, 20 received, 0% packet loss"
+ self.assertTrue(stdout.find(expected) > -1)
+
+ ec.shutdown()
+
+ # Load serialized experiment
+ ec2 = ExperimentController.load(filepath)
+
+ ec2.deploy()
+
+ ec2.wait_finished([ping])
+
+ self.assertEquals(len(ec.resources), len(ec2.resources))
+
+ stdout = ec2.trace(simu, "stdout")
+
+ expected = "20 packets transmitted, 20 received, 0% packet loss"
+ self.assertTrue(stdout.find(expected) > -1)
+
+ ec2.shutdown()
+
+ shutil.rmtree(dirpath)
+
+ @skipIfNotAlive
+ def t_dce_serialize(self, host, user = None, identity = None):
+ dirpath = tempfile.mkdtemp()
+
+ ec = ExperimentController(exp_id = "test-ns3-dce")
+
+ node = ec.register_resource("linux::Node")
+ if host == "localhost":
+ ec.set(node, "hostname", host)
+ else:
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanProcesses", True)
+ #ec.set(node, "cleanHome", True)
+
+ simu = ec.register_resource("linux::ns3::Simulation")
+ ec.set(simu, "verbose", True)
+ ec.register_connection(simu, node)
+
+ nsnode1 = add_ns3_node(ec, simu)
+ p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
+ ec.set(p2p1, "DataRate", "5Mbps")
+
+ nsnode2 = add_ns3_node(ec, simu)
+ p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
+ ec.set(p2p2, "DataRate", "5Mbps")
+
+ # Create channel
+ chan = ec.register_resource("ns3::PointToPointChannel")
+ ec.set(chan, "Delay", "2ms")
+
+ ec.register_connection(chan, p2p1)
+ ec.register_connection(chan, p2p2)
+
+ ### create applications
+ udp_perf = ec.register_resource("linux::ns3::dce::Application")
+ ec.set (udp_perf, "binary", "udp-perf")
+ ec.set (udp_perf, "stackSize", 1<<20)
+ ec.set (udp_perf, "arguments", "--duration=10;--nodes=2")
+ ec.set (udp_perf, "StartTime", "1s")
+ ec.set (udp_perf, "StopTime", "20s")
+ ec.register_connection(udp_perf, nsnode1)
+
+ udp_perf_client = ec.register_resource("linux::ns3::dce::Application")
+ ec.set (udp_perf_client, "binary", "udp-perf")
+ ec.set (udp_perf_client, "stackSize", 1<<20)
+ ec.set (udp_perf_client, "arguments", "--client;--nodes=2;--host=10.0.0.1;--duration=10")
+ ec.set (udp_perf_client, "StartTime", "2s")
+ ec.set (udp_perf_client, "StopTime", "20s")
+ ec.register_connection(udp_perf_client, nsnode2)
+
+ filepath = ec.save(dirpath)
+
+ ec.deploy()
+
+ ec.wait_finished([udp_perf_client])
+
+ # Give time to flush the streams
+ import time
+ time.sleep(5)
+
+ expected = "udp-perf --duration=10 --nodes=2"
+ cmdline = ec.trace(udp_perf, "cmdline")
+ self.assertTrue(cmdline.find(expected) > -1, cmdline)
+
+ expected = "Start Time: NS3 Time: 1s ("
+ status = ec.trace(udp_perf, "status")
+ self.assertTrue(status.find(expected) > -1, status)
+
+ expected = "received=1500 bytes, 1 reads (@1500 bytes) 1500"
+ stdout = ec.trace(udp_perf, "stdout")
+ self.assertTrue(stdout.find(expected) > -1, stdout)
+
+ ec.shutdown()
+
+ # Load serialized experiment
+ ec2 = ExperimentController.load(filepath)
+
+ ec2.deploy()
+ ec2.wait_finished([udp_perf_client])
+
+ # Give time to flush the streams
+ time.sleep(5)
+
+ self.assertEquals(len(ec.resources), len(ec2.resources))
+
+ expected = "udp-perf --duration=10 --nodes=2"
+ cmdline = ec2.trace(udp_perf, "cmdline")
+ self.assertTrue(cmdline.find(expected) > -1, cmdline)
+
+ expected = "Start Time: NS3 Time: 1s ("
+ status = ec2.trace(udp_perf, "status")
+ self.assertTrue(status.find(expected) > -1, status)
+
+ expected = "received=1500 bytes, 1 reads (@1500 bytes) 1500"
+ stdout = ec2.trace(udp_perf, "stdout")
+ self.assertTrue(stdout.find(expected) > -1, stdout)
+
+ ec2.shutdown()
+
+ shutil.rmtree(dirpath)
+
+ def test_wifi_serialize_fedora(self):
+ self.t_wifi_serialize(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+ def test_wifi_serialize_local(self):
+ self.t_wifi_serialize("localhost")
+
+ def test_routing_serialize_fedora(self):
+ self.t_routing_serialize(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+ def test_routing_serialize_local(self):
+ self.t_routing_serialize("localhost")
+
+ def test_dce_serialize_fedora(self):
+ self.t_dce_serialize(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+ def test_dce_serialize_local(self):
+ self.t_dce_serialize("localhost")
+
+
+if __name__ == '__main__':
+ unittest.main()
+
ec = ExperimentController(exp_id = "test-ping-count")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxPing")
+ app = ec.register_resource("linux::Ping")
ec.set(app, "count", "3")
ec.set(app, "target", self.target)
ec.register_connection(app, node)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceState, ResourceAction
+from nepi.execution.trace import TraceAttr
+
+from test_utils import skipIfNotAlive, skipInteractive
+
+import os
+import shutil
+import time
+import tempfile
+import unittest
+
+class LinuxSerializationTestCase(unittest.TestCase):
+ def setUp(self):
+ self.fedora_host = "nepi2.pl.sophia.inria.fr"
+ self.fedora_user = "inria_nepi"
+
+ self.ubuntu_host = "roseval.pl.sophia.inria.fr"
+ self.ubuntu_user = "inria_nepi"
+
+ self.target = "nepi5.pl.sophia.inria.fr"
+
+ @skipIfNotAlive
+ def t_condition_serialize(self, host, user, depends):
+
+ dirpath = tempfile.mkdtemp()
+
+ ec = ExperimentController(exp_id="test-condition-serial")
+
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", host)
+ ec.set(node, "username", user)
+ ec.set(node, "cleanExperiment", True)
+ ec.set(node, "cleanProcesses", True)
+
+ server = ec.register_resource("linux::Application")
+ cmd = "echo 'HOLA' | nc -l 3333"
+ ec.set(server, "command", cmd)
+ ec.set(server, "depends", depends)
+ ec.register_connection(server, node)
+
+ client = ec.register_resource("linux::Application")
+ cmd = "nc 127.0.0.1 3333"
+ ec.set(client, "command", cmd)
+ ec.register_connection(client, node)
+
+ ec.register_condition(client, ResourceAction.START, server, ResourceState.STARTED)
+
+ apps = [client, server]
+
+ filepath = ec.save(dirpath)
+
+ ec.deploy()
+
+ ec.wait_finished(apps)
+
+ self.assertTrue(ec.state(node) == ResourceState.STARTED)
+ self.assertTrue(ec.state(server) == ResourceState.STOPPED)
+ self.assertTrue(ec.state(client) == ResourceState.STOPPED)
+
+ stdout = ec.trace(client, "stdout")
+ self.assertTrue(stdout.strip() == "HOLA")
+
+ ec.shutdown()
+
+ # Load serialized experiment
+ ec2 = ExperimentController.load(filepath)
+
+ ec2.deploy()
+ ec2.wait_finished(apps)
+
+ self.assertEquals(len(ec.resources), len(ec2.resources))
+
+ self.assertTrue(ec2.state(node) == ResourceState.STARTED)
+ self.assertTrue(ec2.state(server) == ResourceState.STOPPED)
+ self.assertTrue(ec2.state(client) == ResourceState.STOPPED)
+
+ stdout = ec2.trace(client, "stdout")
+
+ self.assertTrue(stdout.strip() == "HOLA")
+
+ ec2.shutdown()
+
+ shutil.rmtree(dirpath)
+
+ def test_condition_serialize_fedora(self):
+ self.t_condition_serialize(self.fedora_host, self.fedora_user, "nc")
+
+ def test_condition_serialize_ubuntu(self):
+ self.t_condition_serialize(self.ubuntu_host, self.ubuntu_user, "netcat")
+
+if __name__ == '__main__':
+ unittest.main()
+
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import unittest
+
+class LinuxTapTestCase(unittest.TestCase):
+ def setUp(self):
+ self.host = "roseval.pl.sophia.inria.fr"
+ self.user = "inria_nepi"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.netblock = "192.168.1"
+
+ @skipIfNotAlive
+ def t_tap_create(self, host, user=None, identity=None):
+
+ ec = ExperimentController(exp_id="test-tap-create")
+
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", host)
+
+ if host != "localhost":
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanExperiment", True)
+ ec.set(node, "cleanProcesses", True)
+
+ tap = ec.register_resource("linux::Tap")
+ ec.set(tap, "ip", "%s.1" % self.netblock)
+ ec.set(tap, "prefix", "24")
+ ec.register_connection(tap, node)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.1" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tap, "deviceName")
+ self.assertTrue(if_name.startswith("tap"))
+
+ ec.shutdown()
+
+ def test_tap_create(self):
+ self.t_tap_create(self.host, self.user, self.identity)
+
+ def test_tap_create_local(self):
+ self.t_tap_create("localhost")
+
+
+if __name__ == '__main__':
+ unittest.main()
+
def t_tofile(self, host, user):
ec = ExperimentController(exp_id = "test-to-file")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- pcap = ec.register_resource("LinuxTcpdump")
+ pcap = ec.register_resource("linux::Tcpdump")
ec.set(pcap, "i", "eth0")
ec.set(pcap, "w", "custom_output")
ec.register_connection(pcap, node)
- app = ec.register_resource("LinuxPing")
+ app = ec.register_resource("linux::Ping")
ec.set(app, "count", "20")
ec.set(app, "target", self.target)
ec.register_connection(app, node)
ec = ExperimentController(exp_id = "test-traceroute")
- node = ec.register_resource("LinuxNode")
+ node = ec.register_resource("linux::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- app = ec.register_resource("LinuxTraceroute")
+ app = ec.register_resource("linux::Traceroute")
ec.set(app, "target", self.target)
ec.register_connection(app, node)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import unittest
+
+class LinuxTunTestCase(unittest.TestCase):
+ def setUp(self):
+ self.host = "roseval.pl.sophia.inria.fr"
+ self.user = "inria_nepi"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.netblock = "192.168.1"
+
+ @skipIfNotAlive
+ def t_tun_create(self, host, user=None, identity=None):
+
+ ec = ExperimentController(exp_id="test-tun-create")
+
+ node = ec.register_resource("linux::Node")
+ ec.set(node, "hostname", host)
+
+ if host != "localhost":
+ ec.set(node, "username", user)
+ ec.set(node, "identity", identity)
+
+ ec.set(node, "cleanExperiment", True)
+ ec.set(node, "cleanProcesses", True)
+
+ tun = ec.register_resource("linux::Tun")
+ ec.set(tun, "ip", "%s.1" % self.netblock)
+ ec.set(tun, "prefix", "24")
+ ec.register_connection(tun, node)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.1" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tun, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ ec.shutdown()
+
+ def test_tun_create(self):
+ self.t_tun_create(self.host, self.user, self.identity)
+
+ def test_tun_create_local(self):
+ self.t_tun_create("localhost")
+
+
+if __name__ == '__main__':
+ unittest.main()
+
ec = ExperimentController(exp_id = "test-udptest-rtt")
- node1 = ec.register_resource("LinuxNode")
+ node1 = ec.register_resource("linux::Node")
ec.set(node1, "hostname", host1)
ec.set(node1, "username", user1)
- ec.set(node1, "cleanHome", True)
+ ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
- server = ec.register_resource("LinuxUdpTest")
+ server = ec.register_resource("linux::UdpTest")
ec.set(server, "s", True)
ec.register_connection(server, node1)
- node2 = ec.register_resource("LinuxNode")
+ node2 = ec.register_resource("linux::Node")
ec.set(node2, "hostname", host2)
ec.set(node2, "username", user2)
- ec.set(node2, "cleanHome", True)
+ ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
- client = ec.register_resource("LinuxUdpTest")
+ client = ec.register_resource("linux::UdpTest")
ec.set(client, "a", True)
ec.set(client, "target", host1)
ec.register_connection(client, node2)
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+from test_utils import skipIfAnyNotAliveWithIdentity
+
+import os
+import time
+import unittest
+
+class LinuxUdpTunnelTestCase(unittest.TestCase):
+ def setUp(self):
+ self.host1 = "roseval.pl.sophia.inria.fr"
+ self.host2 = "138.96.118.11"
+ self.user1 = "inria_nepi"
+ self.user2 = "omflab"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.netblock = "192.168.1"
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tap_udp_tunnel(self, user1, host1, identity1, user2, host2,
+ identity2):
+
+ ec = ExperimentController(exp_id="test-tap-udp-tunnel")
+
+ node1 = ec.register_resource("linux::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tap1 = ec.register_resource("linux::Tap")
+ ec.set(tap1, "ip", "%s.1" % self.netblock)
+ ec.set(tap1, "prefix", "32")
+ ec.register_connection(tap1, node1)
+
+ node2 = ec.register_resource("linux::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tap2 = ec.register_resource("linux::Tap")
+ ec.set(tap2, "ip", "%s.2" % self.netblock)
+ ec.set(tap2, "prefix", "32")
+ ec.register_connection(tap2, node2)
+
+ udptun = ec.register_resource("linux::UdpTunnel")
+ ec.register_connection(tap1, udptun)
+ ec.register_connection(tap2, udptun)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ vif_name = ec.get(tap1, "deviceName")
+ self.assertTrue(vif_name.startswith("tap"))
+
+ vif_name = ec.get(tap2, "deviceName")
+ self.assertTrue(vif_name.startswith("tap"))
+
+ ec.shutdown()
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tun_udp_tunnel(self, user1, host1, identity1, user2, host2, identity2):
+
+ ec = ExperimentController(exp_id="test-tun-udp-tunnel")
+
+ node1 = ec.register_resource("linux::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tun1 = ec.register_resource("linux::Tun")
+ ec.set(tun1, "ip", "%s.1" % self.netblock)
+ ec.set(tun1, "prefix", "32")
+ ec.register_connection(tun1, node1)
+
+ node2 = ec.register_resource("linux::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tun2 = ec.register_resource("linux::Tun")
+ ec.set(tun2, "ip", "%s.2" % self.netblock)
+ ec.set(tun2, "prefix", "32")
+ ec.register_connection(tun2, node2)
+
+ udptun = ec.register_resource("linux::UdpTunnel")
+ ec.register_connection(tun1, udptun)
+ ec.register_connection(tun2, udptun)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ vif_name = ec.get(tun1, "deviceName")
+ self.assertTrue(vif_name.startswith("tun"))
+
+ vif_name = ec.get(tun2, "deviceName")
+ self.assertTrue(vif_name.startswith("tun"))
+
+ ec.shutdown()
+
+ def test_tap_udp_tunnel(self):
+ self.t_tap_udp_tunnel(self.user1, self.host1, self.identity,
+ self.user2, self.host2, self.identity)
+
+ def ztest_tun_udp_tunnel(self):
+ self.t_tun_udp_tunnel(self.user1, self.host1, self.identity,
+ self.user2, self.host2, self.identity)
+
+if __name__ == '__main__':
+ unittest.main()
+
class OMFResourceFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(OMFNode.get_rtype(), "OMFNode")
+ self.assertEquals(OMFNode.get_rtype(), "omf::Node")
self.assertEquals(len(OMFNode._attributes), 8)
- self.assertEquals(OMFWifiInterface.get_rtype(), "OMFWifiInterface")
+ self.assertEquals(OMFWifiInterface.get_rtype(), "omf::WifiInterface")
self.assertEquals(len(OMFWifiInterface._attributes), 12)
- self.assertEquals(OMFChannel.get_rtype(), "OMFChannel")
+ self.assertEquals(OMFChannel.get_rtype(), "omf::Channel")
self.assertEquals(len(OMFChannel._attributes), 8)
- self.assertEquals(OMFApplication.get_rtype(), "OMFApplication")
+ self.assertEquals(OMFApplication.get_rtype(), "omf::Application")
self.assertEquals(len(OMFApplication._attributes), 14)
class OMFEachTestCase(unittest.TestCase):
def setUp(self):
self.ec = ExperimentController(exp_id = "99999")
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPassword', "1234")
self.ec.set(self.node1, 'version', "5")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'ip', "10.0.0.17/24")
self.ec.set(self.iface1, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Vlc#1')
self.ec.set(self.app1, 'command', "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.set(self.app1, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, 'version', "5")
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, 'version', "5")
- self.app4 = self.ec.register_resource("OMFApplication")
+ self.app4 = self.ec.register_resource("omf::Application")
self.ec.set(self.app4, 'version', "5")
- self.app5 = self.ec.register_resource("OMFApplication")
+ self.app5 = self.ec.register_resource("omf::Application")
self.ec.set(self.app5, 'version', "5")
self.ec.register_connection(self.app1, self.node1)
def test_deploy(self):
ec = ExperimentController(exp_id = "5421" )
- self.node1 = ec.register_resource("OMFNode")
+ self.node1 = ec.register_resource("omf::Node")
ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
ec.set(self.node1, 'xmppUser', "nepi")
ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(self.node1, 'xmppPassword', "1234")
ec.set(self.node1, 'version', "5")
- self.iface1 = ec.register_resource("OMFWifiInterface")
+ self.iface1 = ec.register_resource("omf::WifiInterface")
ec.set(self.iface1, 'name', "wlan0")
ec.set(self.iface1, 'mode', "adhoc")
ec.set(self.iface1, 'hw_mode', "g")
ec.set(self.iface1, 'ip', "10.0.0.17/24")
ec.set(self.iface1, 'version', "5")
- self.channel = ec.register_resource("OMFChannel")
+ self.channel = ec.register_resource("omf::Channel")
ec.set(self.channel, 'channel', "6")
ec.set(self.channel, 'xmppUser', "nepi")
ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(self.channel, 'xmppPassword', "1234")
ec.set(self.channel, 'version', "5")
- self.app1 = ec.register_resource("OMFApplication")
+ self.app1 = ec.register_resource("omf::Application")
ec.set(self.app1, 'appid', 'Vlc#1')
ec.set(self.app1, 'command', "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
ec.set(self.app1, 'version', "5")
- self.app2 = ec.register_resource("OMFApplication")
+ self.app2 = ec.register_resource("omf::Application")
ec.set(self.app2, 'appid', 'Test#1')
ec.set(self.app2, 'command', "/usr/bin/test -1")
ec.set(self.app2, 'env', " ")
ec.set(self.app2, 'version', "5")
- self.app3 = ec.register_resource("OMFApplication")
+ self.app3 = ec.register_resource("omf::Application")
ec.set(self.app3, 'appid', 'Test#2')
ec.set(self.app3, 'command', "/usr/bin/test -2")
ec.set(self.app3, 'env', " ")
ec.set(self.app3, 'version', "5")
- self.app4 = ec.register_resource("OMFApplication")
+ self.app4 = ec.register_resource("omf::Application")
ec.set(self.app4, 'appid', 'Test#3')
ec.set(self.app4, 'command', "/usr/bin/test -3")
ec.set(self.app4, 'env', " ")
ec.set(self.app4, 'version', "5")
- self.app5 = ec.register_resource("OMFApplication")
+ self.app5 = ec.register_resource("omf::Application")
ec.set(self.app5, 'appid', 'Kill#2')
ec.set(self.app5, 'command', "/usr/bin/killall vlc")
ec.set(self.app5, 'env', " ")
self.ec = ExperimentController(exp_id = str(OMFVLCWrongCaseAllCritical.id))
OMFVLCWrongCaseAllCritical.id += 1
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPassword', "1234")
self.ec.set(self.node1, 'version', "5")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'ip', "10.0.0.17/24")
self.ec.set(self.iface1, 'version', "5")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Vlc#1')
self.ec.set(self.app1, 'command', "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.register_connection(self.node1, self.iface1)
def test_deploy_wo_node(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
def test_deploy_wo_hostname(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_iface(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_ip(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_channel(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'version', "5")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_app(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, 'version', "5")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_wo_app_path(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, 'appid', 'Vlc#2')
self.ec.set(self.app2, 'version', "5")
self.ec = ExperimentController(exp_id = str(OMFVLCWrongCaseWithNonCritical.id))
OMFVLCWrongCaseWithNonCritical.id += 1
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPassword', "1234")
self.ec.set(self.node1, 'version', "5")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'ip', "10.0.0.17/24")
self.ec.set(self.iface1, 'version', "5")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Kill#1')
self.ec.set(self.app1, 'command', "/usr/bin/test -1")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.register_connection(self.node1, self.iface1)
def test_deploy_with_node_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_and_iface_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_iface_channel_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab37')
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, "critical", False)
self.ec.set(self.channel, 'version', "5")
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_app_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab37')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#22')
self.ec.set(self.app2, 'version', "5")
self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_with_all_nc_and_app_critical(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab37')
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, "critical", False)
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, "critical", False)
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, 'appid', 'Kill#22')
self.ec.set(self.app2, 'version', "5")
self.ec = ExperimentController(exp_id = str(OMFVLCWrongCaseWithNonCriticalDep.id))
OMFVLCWrongCaseWithNonCriticalDep.id += 1
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPassword', "1234")
self.ec.set(self.node1, 'version', "5")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'ip', "10.0.0.17/24")
self.ec.set(self.iface1, 'version', "5")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Kill#1')
self.ec.set(self.app1, 'command', "/usr/bin/test -1")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.register_connection(self.node1, self.iface1)
def test_deploy_with_app_nc_and_critical_wrong(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab37')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#32')
self.ec.set(self.app2, 'version', "5")
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, 'appid', 'Kill#33')
self.ec.set(self.app3, 'version', "5")
self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
def test_deploy_with_app_nc_and_critical_right(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab37')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#32')
self.ec.set(self.app2, 'version', "5")
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, 'appid', 'Kill#3')
self.ec.set(self.app3, 'command', "/usr/bin/test -1")
self.ec.set(self.app3, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
def test_deploy_with_many_app_nc_and_critical(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'omf.plexus.wlab37')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPassword', "1234")
self.ec.set(self.node2, 'version', "5")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'ip', "10.0.0.37/24")
self.ec.set(self.iface2, 'version', "5")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPassword', "1234")
self.ec.set(self.channel, 'version', "5")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#32')
self.ec.set(self.app2, 'version', "5")
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, "critical", False)
self.ec.set(self.app3, 'appid', 'Kill#3')
self.ec.set(self.app3, 'command', "/usr/bin/test -1")
self.ec.set(self.app3, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.set(self.app3, 'version', "5")
- self.app4 = self.ec.register_resource("OMFApplication")
+ self.app4 = self.ec.register_resource("omf::Application")
self.ec.set(self.app4, 'appid', 'Kill#4')
self.ec.set(self.app4, 'command', "/usr/bin/test -1")
self.ec.set(self.app4, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.set(self.app4, 'version', "5")
- self.app5 = self.ec.register_resource("OMFApplication")
+ self.app5 = self.ec.register_resource("omf::Application")
self.ec.set(self.app5, "critical", False)
self.ec.set(self.app5, 'appid', 'Kill#32')
self.ec.set(self.app5, 'version', "5")
- self.app6 = self.ec.register_resource("OMFApplication")
+ self.app6 = self.ec.register_resource("omf::Application")
self.ec.set(self.app6, 'appid', 'Kill#6')
self.ec.set(self.app6, 'command', "/usr/bin/test -1")
self.ec.set(self.app6, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
class OMFResourceFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(OMFNode.get_rtype(), "OMFNode")
+ self.assertEquals(OMFNode.get_rtype(), "omf::Node")
self.assertEquals(len(OMFNode._attributes), 8)
- self.assertEquals(OMFWifiInterface.get_rtype(), "OMFWifiInterface")
+ self.assertEquals(OMFWifiInterface.get_rtype(), "omf::WifiInterface")
self.assertEquals(len(OMFWifiInterface._attributes), 12)
- self.assertEquals(OMFChannel.get_rtype(), "OMFChannel")
+ self.assertEquals(OMFChannel.get_rtype(), "omf::Channel")
self.assertEquals(len(OMFChannel._attributes), 8)
- self.assertEquals(OMFApplication.get_rtype(), "OMFApplication")
+ self.assertEquals(OMFApplication.get_rtype(), "omf::Application")
self.assertEquals(len(OMFApplication._attributes), 14)
class OMFEachTestCase(unittest.TestCase):
def setUp(self):
self.ec = ExperimentController(exp_id = "99999")
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'wlab12')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPort', "5222")
self.ec.set(self.node1, 'xmppPassword', "1234")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'essid', "vlcexp")
self.ec.set(self.iface1, 'ip', "10.0.0.17/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppUser', "nepi")
self.ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.channel, 'xmppPort', "5222")
self.ec.set(self.channel, 'xmppPassword', "1234")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Vlc#1')
self.ec.set(self.app1, 'command', "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
- self.app4 = self.ec.register_resource("OMFApplication")
+ self.app4 = self.ec.register_resource("omf::Application")
- self.app5 = self.ec.register_resource("OMFApplication")
+ self.app5 = self.ec.register_resource("omf::Application")
self.ec.register_connection(self.app1, self.node1)
self.ec.register_connection(self.app2, self.node1)
def test_deploy(self):
ec = ExperimentController(exp_id = "5421" )
- self.node1 = ec.register_resource("OMFNode")
+ self.node1 = ec.register_resource("omf::Node")
ec.set(self.node1, 'hostname', 'wlab12')
ec.set(self.node1, 'xmppUser', "nepi")
ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(self.node1, 'xmppPort', "5222")
ec.set(self.node1, 'xmppPassword', "1234")
- self.iface1 = ec.register_resource("OMFWifiInterface")
+ self.iface1 = ec.register_resource("omf::WifiInterface")
ec.set(self.iface1, 'name', "wlan0")
ec.set(self.iface1, 'mode', "adhoc")
ec.set(self.iface1, 'hw_mode', "g")
ec.set(self.iface1, 'essid', "vlcexp")
ec.set(self.iface1, 'ip', "10.0.0.17/24")
- self.channel = ec.register_resource("OMFChannel")
+ self.channel = ec.register_resource("omf::Channel")
ec.set(self.channel, 'channel', "6")
ec.set(self.channel, 'xmppUser', "nepi")
ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
ec.set(self.channel, 'xmppPort', "5222")
ec.set(self.channel, 'xmppPassword', "1234")
- self.app1 = ec.register_resource("OMFApplication")
+ self.app1 = ec.register_resource("omf::Application")
ec.set(self.app1, 'appid', 'Vlc#1')
ec.set(self.app1, 'command', "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
- self.app2 = ec.register_resource("OMFApplication")
+ self.app2 = ec.register_resource("omf::Application")
ec.set(self.app2, 'appid', 'Test#1')
ec.set(self.app2, 'command', "/usr/bin/test -1")
ec.set(self.app2, 'env', " ")
- self.app3 = ec.register_resource("OMFApplication")
+ self.app3 = ec.register_resource("omf::Application")
ec.set(self.app3, 'appid', 'Test#2')
ec.set(self.app3, 'command', "/usr/bin/test -2")
ec.set(self.app3, 'env', " ")
- self.app4 = ec.register_resource("OMFApplication")
+ self.app4 = ec.register_resource("omf::Application")
ec.set(self.app4, 'appid', 'Test#3')
ec.set(self.app4, 'command', "/usr/bin/test -3")
ec.set(self.app4, 'env', " ")
- self.app5 = ec.register_resource("OMFApplication")
+ self.app5 = ec.register_resource("omf::Application")
ec.set(self.app5, 'appid', 'Kill#2')
ec.set(self.app5, 'command', "/usr/bin/killall vlc")
ec.set(self.app5, 'env', " ")
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Julien Tribino <julien.tribino@inria.fr>
+
+
+from nepi.execution.resource import ResourceFactory, ResourceManager, ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+from nepi.resources.omf.node import OMFNode
+from nepi.resources.omf.application import OMFApplication
+from nepi.resources.omf.interface import OMFWifiInterface
+from nepi.resources.omf.channel import OMFChannel
+from nepi.resources.omf.omf_api_factory import OMFAPIFactory
+
+from nepi.util.timefuncs import *
+
+import os
+import time
+import unittest
+
+class OMFPingNormalCase(unittest.TestCase):
+ def test_deploy(self):
+ ec = ExperimentController(exp_id = "5421" )
+
+ self.node1 = ec.register_resource("omf::Node")
+ ec.set(self.node1, 'hostname', 'wlab12')
+ ec.set(self.node1, 'xmppUser', "nepi")
+ ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
+ ec.set(self.node1, 'xmppPort', "5222")
+ ec.set(self.node1, 'xmppPassword', "1234")
+
+ self.iface1 = ec.register_resource("omf::WifiInterface")
+ ec.set(self.iface1, 'name', "wlan0")
+ ec.set(self.iface1, 'mode', "adhoc")
+ ec.set(self.iface1, 'hw_mode', "g")
+ ec.set(self.iface1, 'essid', "vlcexp")
+ ec.set(self.iface1, 'ip', "10.0.0.17/24")
+
+ self.channel = ec.register_resource("omf::Channel")
+ ec.set(self.channel, 'channel', "6")
+ ec.set(self.channel, 'xmppUser', "nepi")
+ ec.set(self.channel, 'xmppServer', "xmpp-plexus.onelab.eu")
+ ec.set(self.channel, 'xmppPort', "5222")
+ ec.set(self.channel, 'xmppPassword', "1234")
+
+ self.app1 = ec.register_resource("omf::Application")
+ ec.set(self.app1, 'appid', 'Vlc#1')
+ ec.set(self.app1, 'command', "ping -c5 10.0.0.17")
+
+ ec.register_connection(self.app1, self.node1)
+ ec.register_connection(self.node1, self.iface1)
+ ec.register_connection(self.iface1, self.channel)
+
+ ec.register_condition(self.app1, ResourceAction.STOP, self.app1, ResourceState.STARTED , "10s")
+
+ ec.deploy()
+
+ ec.wait_finished(self.app1)
+
+ stdout_1 = ec.trace(self.app1, "stdout")
+ stderr_1 = ec.trace(self.app1, "stderr")
+
+ if stdout_1:
+ f = open("app1_out.txt", "w")
+ f.write(stdout_1)
+ f.close()
+
+ if stderr_1:
+ f = open("app1_err.txt", "w")
+ f.write(stderr_1)
+ f.close()
+
+ self.assertEquals(ec.get_resource(self.node1).state, ResourceState.STARTED)
+ self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.STARTED)
+ self.assertEquals(ec.get_resource(self.channel).state, ResourceState.STARTED)
+ self.assertEquals(ec.get_resource(self.app1).state, ResourceState.STOPPED)
+
+ ec.shutdown()
+
+ self.assertEquals(ec.get_resource(self.node1).state, ResourceState.RELEASED)
+ self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.RELEASED)
+ self.assertEquals(ec.get_resource(self.channel).state, ResourceState.RELEASED)
+ self.assertEquals(ec.get_resource(self.app1).state, ResourceState.RELEASED)
+
+ t = open("app1_out.txt", "r")
+ l = t.readlines()
+ self.assertEquals(l[0], "PING 10.0.0.17 (10.0.0.17) 56(84) bytes of data.\n")
+ self.assertIn("5 packets transmitted, 5 received, 0% packet loss, time", l[-2])
+ self.assertIn("rtt min/avg/max/mdev = ", l[-1])
+
+ t.close()
+ os.remove("app1_out.txt")
+
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+
self.ec = ExperimentController(exp_id = str(OMFVLCWrongCaseAllCritical.id))
OMFVLCWrongCaseAllCritical.id += 1
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'wlab12')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPort', "5222")
self.ec.set(self.node1, 'xmppPassword', "1234")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'essid', "vlcexp")
self.ec.set(self.iface1, 'ip', "10.0.0.12/24")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Vlc#1')
self.ec.set(self.app1, 'command', "/opt/vlc-1.1.13/cvlc /opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.49,port=1234,mux=ts}'")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.register_connection(self.node1, self.iface1)
def test_deploy_wo_node(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.register_connection(self.iface1, self.channel)
def test_deploy_wo_hostname(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_iface(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_channel(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.register_connection(self.iface1, self.channel)
self.ec.register_connection(self.node2, self.iface2)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_ip(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_wo_app(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.register_connection(self.iface1, self.channel)
self.ec.register_connection(self.iface2, self.channel)
self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_wo_app_path(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, 'appid', 'Vlc#2')
self.ec.register_connection(self.iface1, self.channel)
# Author: Julien Tribino <julien.tribino@inria.fr>
-from nepi.execution.resource import ResourceFactory, ResourceManager, ResourceAction, ResourceState
+from nepi.execution.resource import ResourceFactory, ResourceManager, \
+ ResourceAction, ResourceState
from nepi.execution.ec import ExperimentController
from nepi.resources.omf.node import OMFNode
self.ec = ExperimentController(exp_id = str(OMFVLCWrongCaseWithNonCritical.id))
OMFVLCWrongCaseWithNonCritical.id += 1
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'wlab12')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPort', "5222")
self.ec.set(self.node1, 'xmppPassword', "1234")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'essid', "vlcexp")
self.ec.set(self.iface1, 'ip', "10.0.0.12/24")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Kill#1')
self.ec.set(self.app1, 'command', "/usr/bin/test -1")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.register_connection(self.node1, self.iface1)
def test_deploy_with_node_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_and_iface_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_node_iface_channel_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.node2, 'hostname', 'wlab49')
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, "critical", False)
self.assertEquals(self.ec.get_resource(self.app1).state, ResourceState.RELEASED)
def test_deploy_with_app_nc(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#22')
self.assertEquals(self.ec.get_resource(self.app2).state, ResourceState.RELEASED)
def test_deploy_with_all_nc_and_app_critical(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, "critical", False)
self.ec.set(self.node2, 'hostname', 'wlab49')
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, "critical", False)
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, "critical", False)
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, 'appid', 'Kill#22')
self.ec.register_connection(self.iface1, self.channel)
self.ec = ExperimentController(exp_id = str(OMFVLCWrongCaseWithNonCriticalDep.id))
OMFVLCWrongCaseWithNonCriticalDep.id += 1
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'wlab12')
self.ec.set(self.node1, 'xmppUser', "nepi")
self.ec.set(self.node1, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPort', "5222")
self.ec.set(self.node1, 'xmppPassword', "1234")
- self.iface1 = self.ec.register_resource("OMFWifiInterface")
+ self.iface1 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface1, 'name', "wlan0")
self.ec.set(self.iface1, 'mode', "adhoc")
self.ec.set(self.iface1, 'hw_mode', "g")
self.ec.set(self.iface1, 'essid', "vlcexp")
self.ec.set(self.iface1, 'ip', "10.0.0.12/24")
- self.app1 = self.ec.register_resource("OMFApplication")
+ self.app1 = self.ec.register_resource("omf::Application")
self.ec.set(self.app1, 'appid', 'Kill#1')
self.ec.set(self.app1, 'command', "/usr/bin/test -1")
self.ec.set(self.app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
self.ec.register_connection(self.node1, self.iface1)
def test_deploy_with_app_nc_and_critical_wrong(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#32')
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, 'appid', 'Kill#33')
self.ec.register_connection(self.iface1, self.channel)
self.assertEquals(self.ec.get_resource(self.app3).state, ResourceState.RELEASED)
def test_deploy_with_app_nc_and_critical_right(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#32')
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, 'appid', 'Kill#3')
self.ec.set(self.app3, 'command', "/usr/bin/test -3")
self.ec.set(self.app3, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
def test_deploy_with_many_app_nc_and_critical(self):
- self.node2 = self.ec.register_resource("OMFNode")
+ self.node2 = self.ec.register_resource("omf::Node")
self.ec.set(self.node2, 'hostname', 'wlab49')
self.ec.set(self.node2, 'xmppUser', "nepi")
self.ec.set(self.node2, 'xmppServer', "xmpp-plexus.onelab.eu")
self.ec.set(self.node2, 'xmppPort', "5222")
self.ec.set(self.node2, 'xmppPassword', "1234")
- self.iface2 = self.ec.register_resource("OMFWifiInterface")
+ self.iface2 = self.ec.register_resource("omf::WifiInterface")
self.ec.set(self.iface2, 'name', "wlan0")
self.ec.set(self.iface2, 'mode', "adhoc")
self.ec.set(self.iface2, 'hw_mode', "g")
self.ec.set(self.iface2, 'essid', "vlcexp")
self.ec.set(self.iface2, 'ip', "10.0.0.49/24")
- self.channel = self.ec.register_resource("OMFChannel")
+ self.channel = self.ec.register_resource("omf::Channel")
self.ec.set(self.channel, 'channel', "6")
- self.app2 = self.ec.register_resource("OMFApplication")
+ self.app2 = self.ec.register_resource("omf::Application")
self.ec.set(self.app2, "critical", False)
self.ec.set(self.app2, 'appid', 'Kill#32')
- self.app3 = self.ec.register_resource("OMFApplication")
+ self.app3 = self.ec.register_resource("omf::Application")
self.ec.set(self.app3, "critical", False)
self.ec.set(self.app3, 'appid', 'Kill#3')
self.ec.set(self.app3, 'command', "/usr/bin/test -3")
self.ec.set(self.app3, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
- self.app4 = self.ec.register_resource("OMFApplication")
+ self.app4 = self.ec.register_resource("omf::Application")
self.ec.set(self.app4, 'appid', 'Kill#4')
self.ec.set(self.app4, 'command', "/usr/bin/test -4")
self.ec.set(self.app4, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
- self.app5 = self.ec.register_resource("OMFApplication")
+ self.app5 = self.ec.register_resource("omf::Application")
self.ec.set(self.app5, "critical", False)
self.ec.set(self.app5, 'appid', 'Kill#32')
- self.app6 = self.ec.register_resource("OMFApplication")
+ self.app6 = self.ec.register_resource("omf::Application")
self.ec.set(self.app6, 'appid', 'Kill#6')
self.ec.set(self.app6, 'command', "/usr/bin/test -6")
self.ec.set(self.app6, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
pass
@clsinit_copy
-class DummyOMFApplication(OMFApplication):
- _rtype = "DummyOMFApplication"
+class OMFDummyApplication(OMFApplication):
+ _rtype = "omf::DummyApplication"
@classmethod
def _register_attributes(cls):
def test_set_hook(self):
self.ec = DummyEC(exp_id = "30")
- ResourceFactory.register_type(DummyOMFApplication)
+ ResourceFactory.register_type(OMFDummyApplication)
- self.node1 = self.ec.register_resource("OMFNode")
+ self.node1 = self.ec.register_resource("omf::Node")
self.ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
self.ec.set(self.node1, 'xmppSlice', "nepi")
self.ec.set(self.node1, 'xmppHost', "xmpp-plexus.onelab.eu")
self.ec.set(self.node1, 'xmppPort', "5222")
self.ec.set(self.node1, 'xmppPassword', "1234")
- self.app1 = self.ec.register_resource("DummyOMFApplication")
+ self.app1 = self.ec.register_resource("omf::DummyApplication")
self.ec.set(self.app1, 'appid', 'Test#1')
self.ec.set(self.app1, 'path', "/usr/bin/ping")
self.ec.set(self.app1, 'args', "")
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+
+from test_utils import skipIfAnyNotAliveWithIdentity
+
+import os
+import time
+import unittest
+
+class PlanetLabGRETunnelTestCase(unittest.TestCase):
+ def setUp(self):
+ #self.host1 = "nepi2.pl.sophia.inria.fr"
+ #self.host2 = "nepi5.pl.sophia.inria.fr"
+ #self.host2 = "planetlab1.informatik.uni-goettingen.de"
+ self.host1 = "planetlab1.informatik.uni-erlangen.de"
+ self.host2 = "planck227ple.test.ibbt.be"
+ self.host3 = "roseval.pl.sophia.inria.fr"
+ self.user = "inria_nepi"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ #self.netblock = "192.168.1"
+ self.netblock = "192.168.3"
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tap_gre_tunnel(self, user1, host1, identity1, user2, host2,
+ identity2):
+
+ ec = ExperimentController(exp_id="test-tap-gre-tunnel")
+
+ node1 = ec.register_resource("planetlab::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tap1 = ec.register_resource("planetlab::Tap")
+ ec.set(tap1, "ip", "%s.1" % self.netblock)
+ ec.set(tap1, "prefix", "24")
+ ec.register_connection(tap1, node1)
+
+ node2 = ec.register_resource("planetlab::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tap2 = ec.register_resource("planetlab::Tap")
+ ec.set(tap2, "ip", "%s.2" % self.netblock)
+ ec.set(tap2, "prefix", "24")
+ ec.register_connection(tap2, node2)
+
+ gretun = ec.register_resource("linux::GRETunnel")
+ ec.register_connection(tap1, gretun)
+ ec.register_connection(tap2, gretun)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tap1, "deviceName")
+ self.assertTrue(if_name.startswith("tap"))
+
+ if_name = ec.get(tap2, "deviceName")
+ self.assertTrue(if_name.startswith("tap"))
+
+ ec.shutdown()
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tun_gre_tunnel(self, user1, host1, identity1, user2, host2,
+ identity2):
+
+ ec = ExperimentController(exp_id="test-tun-gre-tunnel")
+
+ node1 = ec.register_resource("planetlab::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tun1 = ec.register_resource("planetlab::Tun")
+ ec.set(tun1, "ip", "%s.1" % self.netblock)
+ ec.set(tun1, "prefix", "24")
+ ec.register_connection(tun1, node1)
+
+ node2 = ec.register_resource("planetlab::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tun2 = ec.register_resource("planetlab::Tun")
+ ec.set(tun2, "ip", "%s.2" % self.netblock)
+ ec.set(tun2, "prefix", "24")
+ ec.register_connection(tun2, node2)
+
+ udptun = ec.register_resource("linux::GRETunnel")
+ ec.register_connection(tun1, udptun)
+ ec.register_connection(tun2, udptun)
+
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tun1, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ if_name = ec.get(tun2, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ ec.shutdown()
+
+ @skipIfAnyNotAliveWithIdentity
+ def t_tun_hybrid_gre_tunnel(self, user1, host1, identity1,
+ user2, host2, identity2):
+
+ ec = ExperimentController(exp_id="test-tap-hybrid-gre-tunnel")
+
+ node1 = ec.register_resource("planetlab::Node")
+ ec.set(node1, "hostname", host1)
+ ec.set(node1, "username", user1)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
+ ec.set(node1, "cleanProcesses", True)
+
+ tun1 = ec.register_resource("planetlab::Tun")
+ ec.set(tun1, "ip", "%s.1" % self.netblock)
+ ec.set(tun1, "prefix", "24")
+ ec.register_connection(tun1, node1)
+
+ node2 = ec.register_resource("linux::Node")
+ ec.set(node2, "hostname", host2)
+ ec.set(node2, "username", user2)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
+ ec.set(node2, "cleanProcesses", True)
+
+ tun2 = ec.register_resource("linux::Tun")
+ ec.set(tun2, "ip", "%s.2" % self.netblock)
+ ec.set(tun2, "prefix", "24")
+ ec.register_connection(tun2, node2)
+
+ gretun = ec.register_resource("linux::GRETunnel")
+ ec.register_connection(tun1, gretun)
+ ec.register_connection(tun2, gretun)
+
+ app = ec.register_resource("linux::Application")
+ # It seems the hybrid tunnel takes some time to setup... we add a sleep 5
+ # XXX: Debug this to see if it can be fixed on the RMs
+ cmd = "sleep 5; ping -c3 %s.2" % self.netblock
+ ec.set(app, "command", cmd)
+ ec.register_connection(app, node1)
+
+ ec.deploy()
+
+ ec.wait_finished(app)
+
+ ping = ec.trace(app, "stdout")
+ expected = """3 packets transmitted, 3 received, 0% packet loss"""
+ self.assertTrue(ping.find(expected) > -1)
+
+ if_name = ec.get(tun1, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ if_name = ec.get(tun2, "deviceName")
+ self.assertTrue(if_name.startswith("tun"))
+
+ ec.shutdown()
+
+ def test_tap_gre_tunnel(self):
+ self.t_tap_gre_tunnel(self.user, self.host1, self.identity,
+ self.user, self.host2, self.identity)
+
+ def test_tun_gre_tunnel(self):
+ self.t_tun_gre_tunnel(self.user, self.host1, self.identity,
+ self.user, self.host2, self.identity)
+
+ def test_tun_hybrid_gre_tunnel(self):
+ self.t_tun_hybrid_gre_tunnel(self.user, self.host1, self.identity,
+ self.user, self.host3, self.identity)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
minBandwidth=None, minCpu=None,
architecture=None, city=None, ip=None):
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
if username:
ec.set(node, "username", username)
if ip:
ec.set(node, "ip", ip)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
return node
class PLNodeFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(PlanetlabNode._rtype, "PlanetlabNode")
+ self.assertEquals(PlanetlabNode._rtype, "planetlab::Node")
self.assertEquals(len(PlanetlabNode._attributes), 32)
class PLNodeTestCase(unittest.TestCase):
ec = ExperimentController(exp_id = "test-ovs")
- node1 = ec.register_resource("PlanetlabNode")
+ node1 = ec.register_resource("planetlab::Node")
ec.set(node1, "hostname", switch1)
ec.set(node1, "username", user1)
- ec.set(node1, "cleanHome", True)
+ ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
- ovs1 = ec.register_resource("OVSSwitch")
+ ovs1 = ec.register_resource("planetlab::OVSSwitch")
ec.set(ovs1, "bridge_name", "nepi_bridge")
ec.set(ovs1, "virtual_ip_pref", "192.168.3.1/24")
ec.set(ovs1, "controller_ip", "85.23.168.77")
ec.set(ovs1, "controller_port", "6633")
ec.register_connection(ovs1, node1)
- port1 = ec.register_resource("OVSPort")
+ port1 = ec.register_resource("planetlab::OVSPort")
ec.set(port1, "port_name", "port-1")
ec.register_connection(port1, ovs1)
- port2 = ec.register_resource("OVSPort")
+ port2 = ec.register_resource("planetlab::OVSPort")
ec.set(port2, "port_name", "port-2")
ec.register_connection(port2, ovs1)
- node2 = ec.register_resource("PlanetlabNode")
+ node2 = ec.register_resource("planetlab::PlanetlabNode")
ec.set(node2, "hostname", switch2)
ec.set(node2, "username", user2)
- ec.set(node2, "cleanHome", True)
+ ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
- ovs2 = ec.register_resource("OVSSwitch")
+ ovs2 = ec.register_resource("planetlab::OVSSwitch")
ec.set(ovs2, "bridge_name", "nepi_bridge")
ec.set(ovs2, "virtual_ip_pref", "192.168.3.2/24")
ec.set(ovs2, "controller_ip", "85.23.168.77")
ec.set(ovs2, "controller_port", "6633")
ec.register_connection(ovs2, node2)
- port3 = ec.register_resource("OVSPort")
+ port3 = ec.register_resource("planetlab::OVSPort")
ec.set(port3, "port_name", "port-3")
ec.register_connection(port3, ovs2)
- port4 = ec.register_resource("OVSPort")
+ port4 = ec.register_resource("planetlab::OVSPort")
ec.set(port4, "port_name", "port-4")
ec.register_connection(port4, ovs2)
- node3 = ec.register_resource("PlanetlabNode")
+ node3 = ec.register_resource("planetlab::Node")
ec.set(node3, "hostname", host1)
ec.set(node3, "username", user3)
- ec.set(node3, "cleanHome", True)
+ ec.set(node3, "cleanExperiment", True)
ec.set(node3, "cleanProcesses", True)
- tap1 = ec.register_resource("PlanetlabTap")
+ tap1 = ec.register_resource("planetlab::Tap")
ec.set(tap1, "ip4", "192.168.3.3")
ec.set(tap1, "pointopoint", "192.168.3.1")
ec.set(tap1, "prefix4", 24)
ec.register_connection(tap1, node3)
- node4 = ec.register_resource("PlanetlabNode")
+ node4 = ec.register_resource("planetlab::Node")
ec.set(node4, "hostname", host2)
ec.set(node4, "username", user4)
- ec.set(node4, "cleanHome", True)
+ ec.set(node4, "cleanExperiment", True)
ec.set(node4, "cleanProcesses", True)
- tap2 = ec.register_resource("PlanetlabTap")
+ tap2 = ec.register_resource("planetlab::Tap")
ec.set(tap2, "ip4", "192.168.3.4")
ec.set(tap2, "pointopoint", "192.168.3.2")
ec.set(tap2, "prefix4", 24)
ec.register_connection(tap2, node4)
- ovstun1 = ec.register_resource("OVSTunnel")
+ ovstun1 = ec.register_resource("planetlab::OVSTunnel")
ec.register_connection(port1, ovstun1)
ec.register_connection(tap1, ovstun1)
- ovstun2 = ec.register_resource("OVSTunnel")
+ ovstun2 = ec.register_resource("plantelab::OVSTunnel")
ec.register_connection(port3, ovstun2)
ec.register_connection(tap2, ovstun2)
- ovstun3 = ec.register_resource("OVSTunnel")
+ ovstun3 = ec.register_resource("planetlab::OVSTunnel")
ec.register_connection(port2, ovstun3)
ec.register_connection(port4, ovstun3)
- app1 = ec.register_resource("LinuxApplication")
+ app1 = ec.register_resource("linux::Application")
cmd = "ping -c3 192.168.3.2"
ec.set(app1, "command", cmd)
ec.register_connection(app1, node1)
- app2 = ec.register_resource("LinuxApplication")
+ app2 = ec.register_resource("linux::Application")
cmd = "ping -c3 192.168.3.4"
ec.set(app2, "command", cmd)
ec.register_connection(app2, node2)
class PLSfaNodeFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
- self.assertEquals(PlanetlabSfaNode._rtype, "PlanetlabSfaNode")
+ self.assertEquals(PlanetlabSfaNode._rtype, "planetlab::sfa::Node")
self.assertEquals(len(PlanetlabSfaNode._attributes), 31)
class PLSfaNodeTestCase(unittest.TestCase):
instanciated, and is an instance of SFAAPI. Check that using
the same credentials, the same object of the api is used.
"""
- node1 = self.ec.register_resource("PlanetlabSfaNode")
+ node1 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node1, "hostname", "planetlab2.ionio.gr")
self.ec.set(node1, "username", self.username)
self.ec.set(node1, "sfauser", self.sfauser)
self.assertEquals(len(api1._reserved), 0)
self.assertEquals(len(api1._blacklist), 0)
- node2 = self.ec.register_resource("PlanetlabSfaNode")
+ node2 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node2, "hostname", "planetlab2.ionio.gr")
self.ec.set(node2, "username", self.username)
self.ec.set(node2, "sfauser", self.sfauser)
"""
Check that the method do_discover reserve the right node.
"""
- node = self.ec.register_resource("PlanetlabSfaNode")
+ node = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node, "hostname", "roti.mimuw.edu.pl")
self.ec.set(node, "username", self.username)
self.ec.set(node, "sfauser", self.sfauser)
This test checks that the method do_provision add the node in the slice and check
its well functioning.
"""
- node = self.ec.register_resource("PlanetlabSfaNode")
+ node = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node, "hostname", "planetlab2.ionio.gr")
self.ec.set(node, "username", self.username)
self.ec.set(node, "sfauser", self.sfauser)
"""
Test deploy 1 node.
"""
- node = self.ec.register_resource("PlanetlabSfaNode")
+ node = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node, "hostname", "planetlab2.ionio.gr")
self.ec.set(node, "username", self.username)
self.ec.set(node, "sfauser", self.sfauser)
"""
Test deploy 2 nodes. Empty slice.
"""
- node1 = self.ec.register_resource("PlanetlabSfaNode")
+ node1 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node1, "hostname", "planetlab3.xeno.cl.cam.ac.uk")
self.ec.set(node1, "username", self.username)
self.ec.set(node1, "sfauser", self.sfauser)
self.ec.set(node1, "sfaPrivateKey", self.sfaPrivateKey)
- node2 = self.ec.register_resource("PlanetlabSfaNode")
+ node2 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node2, "hostname", "planetlab1.cs.vu.nl")
self.ec.set(node2, "username", self.username)
self.ec.set(node2, "sfauser", self.sfauser)
"""
Test deploy 2 nodes, already in the slice.
"""
- node1 = self.ec.register_resource("PlanetlabSfaNode")
+ node1 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node1, "hostname", "planetlab3.xeno.cl.cam.ac.uk")
self.ec.set(node1, "username", self.username)
self.ec.set(node1, "sfauser", self.sfauser)
self.ec.set(node1, "sfaPrivateKey", self.sfaPrivateKey)
- node2 = self.ec.register_resource("PlanetlabSfaNode")
+ node2 = self.ec.register_resource("planetlab::sfa::Node")
self.ec.set(node2, "hostname", "planetlab1.cs.vu.nl")
self.ec.set(node2, "username", self.username)
self.ec.set(node2, "sfauser", self.sfauser)
class PlanetlabTapTestCase(unittest.TestCase):
def setUp(self):
- self.host = "nepi5.pl.sophia.inria.fr"
+ self.host = "planetlab1.informatik.uni-erlangen.de"
self.user = "inria_nepi"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.netblock = "192.168.3"
+ #self.host = "nepi2.pl.sophia.inria.fr"
+ #self.user = "inria_nepi"
+ #self.identity = None
+ #self.netblock = "192.168.1"
@skipIfNotAlive
- def t_tap_create(self, host, user):
+ def t_tap_create(self, host, user, identity):
- ec = ExperimentController(exp_id = "test-tap-create")
+ ec = ExperimentController(exp_id="test-tap-create")
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "identity", identity)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- tap = ec.register_resource("PlanetlabTap")
- ec.set(tap, "ip4", "192.168.1.1")
- ec.set(tap, "prefix4", 24)
+ tap = ec.register_resource("planetlab::Tap")
+ ec.set(tap, "ip", "%s.1" % self.netblock)
+ ec.set(tap, "prefix", "24")
ec.register_connection(tap, node)
- app = ec.register_resource("LinuxApplication")
- cmd = "ping -c3 192.168.1.1"
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.1" % self.netblock
ec.set(app, "command", cmd)
ec.register_connection(app, node)
ec.wait_finished(app)
- ping = ec.trace(app, 'stdout')
+ ping = ec.trace(app, "stdout")
expected = """3 packets transmitted, 3 received, 0% packet loss"""
self.assertTrue(ping.find(expected) > -1)
ec.shutdown()
def test_tap_create(self):
- self.t_tap_create(self.host, self.user)
+ self.t_tap_create(self.host, self.user, self.identity)
if __name__ == '__main__':
unittest.main()
class PlanetlabTunTestCase(unittest.TestCase):
def setUp(self):
- self.host = "nepi2.pl.sophia.inria.fr"
+ self.host = "planetlab1.informatik.uni-erlangen.de"
self.user = "inria_nepi"
+ self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.netblock = "192.168.3"
+ #self.host = "nepi2.pl.sophia.inria.fr"
+ #self.user = "inria_nepi"
+ #self.identity = None
+ #self.netblock = "192.168.1"
@skipIfNotAlive
- def t_tun_create(self, host, user):
+ def t_tun_create(self, host, user, identity):
- ec = ExperimentController(exp_id = "test-un-create")
+ ec = ExperimentController(exp_id="test-un-create")
- node = ec.register_resource("PlanetlabNode")
+ node = ec.register_resource("planetlab::Node")
ec.set(node, "hostname", host)
ec.set(node, "username", user)
- ec.set(node, "cleanHome", True)
+ ec.set(node, "identity", identity)
+ ec.set(node, "cleanExperiment", True)
ec.set(node, "cleanProcesses", True)
- tun = ec.register_resource("PlanetlabTun")
- ec.set(tun, "ip4", "192.168.1.1")
- ec.set(tun, "prefix4", 24)
+ tun = ec.register_resource("planetlab::Tun")
+ ec.set(tun, "ip", "%s.1" % self.netblock)
+ ec.set(tun, "prefix", "24")
ec.register_connection(tun, node)
- app = ec.register_resource("LinuxApplication")
- cmd = "ping -c3 192.168.1.1"
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.1" % self.netblock
ec.set(app, "command", cmd)
ec.register_connection(app, node)
ec.wait_finished(app)
- ping = ec.trace(app, 'stdout')
+ ping = ec.trace(app, "stdout")
expected = """3 packets transmitted, 3 received, 0% packet loss"""
self.assertTrue(ping.find(expected) > -1)
ec.shutdown()
def test_tun_create(self):
- self.t_tun_create(self.host, self.user)
+ self.t_tun_create(self.host, self.user, self.identity)
if __name__ == '__main__':
unittest.main()
from nepi.execution.ec import ExperimentController
-from test_utils import skipIfAnyNotAlive
+from test_utils import skipIfAnyNotAliveWithIdentity
import os
import time
import unittest
-class UdpTunnelTestCase(unittest.TestCase):
+class PlanetlabUdpTunnelTestCase(unittest.TestCase):
def setUp(self):
- self.host1 = "nepi2.pl.sophia.inria.fr"
- self.host2 = "nepi5.pl.sophia.inria.fr"
+ #self.host1 = "nepi2.pl.sophia.inria.fr"
+ #self.host2 = "nepi5.pl.sophia.inria.fr"
+ #self.host2 = "planetlab1.informatik.uni-goettingen.de"
+ self.host1 = "planetlab1.informatik.uni-erlangen.de"
+ self.host2 = "planck227ple.test.ibbt.be"
self.user = "inria_nepi"
+ #self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+ self.identity = "%s/.ssh/id_rsa" % (os.environ['HOME'])
+ #self.netblock = "192.168.1"
+ self.netblock = "192.168.3"
- @skipIfAnyNotAlive
- def t_tap_udp_tunnel(self, user1, host1, user2, host2):
+ @skipIfAnyNotAliveWithIdentity
+ def t_tap_udp_tunnel(self, user1, host1, identity1, user2, host2,
+ identity2):
- ec = ExperimentController(exp_id = "test-tap-udp-tunnel")
+ ec = ExperimentController(exp_id="test-tap-udp-tunnel")
- node1 = ec.register_resource("PlanetlabNode")
+ node1 = ec.register_resource("planetlab::Node")
ec.set(node1, "hostname", host1)
ec.set(node1, "username", user1)
- ec.set(node1, "cleanHome", True)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
- tap1 = ec.register_resource("PlanetlabTap")
- ec.set(tap1, "ip4", "192.168.1.1")
- ec.set(tap1, "pointopoint", "192.168.1.2")
- ec.set(tap1, "prefix4", 24)
+ tap1 = ec.register_resource("planetlab::Tap")
+ ec.set(tap1, "ip", "%s.1" % self.netblock)
+ ec.set(tap1, "prefix", "24")
ec.register_connection(tap1, node1)
- node2 = ec.register_resource("PlanetlabNode")
+ node2 = ec.register_resource("planetlab::Node")
ec.set(node2, "hostname", host2)
ec.set(node2, "username", user2)
- ec.set(node2, "cleanHome", True)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
- tap2 = ec.register_resource("PlanetlabTap")
- ec.set(tap2, "ip4", "192.168.1.2")
- ec.set(tap2, "pointopoint", "192.168.1.1")
- ec.set(tap2, "prefix4", 24)
+ tap2 = ec.register_resource("planetlab::Tap")
+ ec.set(tap2, "ip", "%s.2" % self.netblock)
+ ec.set(tap2, "prefix", "24")
ec.register_connection(tap2, node2)
- udptun = ec.register_resource("UdpTunnel")
+ udptun = ec.register_resource("linux::UdpTunnel")
ec.register_connection(tap1, udptun)
ec.register_connection(tap2, udptun)
- app = ec.register_resource("LinuxApplication")
- cmd = "ping -c3 192.168.1.2"
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
ec.set(app, "command", cmd)
ec.register_connection(app, node1)
expected = """3 packets transmitted, 3 received, 0% packet loss"""
self.assertTrue(ping.find(expected) > -1)
- if_name = ec.get(tap1, "deviceName")
- self.assertTrue(if_name.startswith("tap"))
+ vif_name = ec.get(tap1, "deviceName")
+ self.assertTrue(vif_name.startswith("tap"))
- if_name = ec.get(tap2, "deviceName")
- self.assertTrue(if_name.startswith("tap"))
+ vif_name = ec.get(tap2, "deviceName")
+ self.assertTrue(vif_name.startswith("tap"))
ec.shutdown()
- @skipIfAnyNotAlive
- def t_tun_udp_tunnel(self, user1, host1, user2, host2):
+ @skipIfAnyNotAliveWithIdentity
+ def t_tun_udp_tunnel(self, user1, host1, identity1, user2, host2, identity2):
- ec = ExperimentController(exp_id = "test-tap-udp-tunnel")
+ ec = ExperimentController(exp_id="test-tun-udp-tunnel")
- node1 = ec.register_resource("PlanetlabNode")
+ node1 = ec.register_resource("planetlab::Node")
ec.set(node1, "hostname", host1)
ec.set(node1, "username", user1)
- ec.set(node1, "cleanHome", True)
+ ec.set(node1, "identity", identity1)
+ ec.set(node1, "cleanExperiment", True)
ec.set(node1, "cleanProcesses", True)
- tun1 = ec.register_resource("PlanetlabTun")
- ec.set(tun1, "ip4", "192.168.1.1")
- ec.set(tun1, "pointopoint", "192.168.1.2")
- ec.set(tun1, "prefix4", 24)
+ tun1 = ec.register_resource("planetlab::Tun")
+ ec.set(tun1, "ip", "%s.1" % self.netblock)
+ ec.set(tun1, "prefix", "24")
ec.register_connection(tun1, node1)
- node2 = ec.register_resource("PlanetlabNode")
+ node2 = ec.register_resource("planetlab::Node")
ec.set(node2, "hostname", host2)
ec.set(node2, "username", user2)
- ec.set(node2, "cleanHome", True)
+ ec.set(node2, "identity", identity2)
+ ec.set(node2, "cleanExperiment", True)
ec.set(node2, "cleanProcesses", True)
- tun2 = ec.register_resource("PlanetlabTun")
- ec.set(tun2, "ip4", "192.168.1.2")
- ec.set(tun2, "pointopoint", "192.168.1.1")
- ec.set(tun2, "prefix4", 24)
+ tun2 = ec.register_resource("planetlab::Tun")
+ ec.set(tun2, "ip", "%s.2" % self.netblock)
+ ec.set(tun2, "prefix", "24")
ec.register_connection(tun2, node2)
- udptun = ec.register_resource("UdpTunnel")
+ udptun = ec.register_resource("linux::UdpTunnel")
ec.register_connection(tun1, udptun)
ec.register_connection(tun2, udptun)
- app = ec.register_resource("LinuxApplication")
- cmd = "ping -c3 192.168.1.2"
+ app = ec.register_resource("linux::Application")
+ cmd = "ping -c3 %s.2" % self.netblock
ec.set(app, "command", cmd)
ec.register_connection(app, node1)
expected = """3 packets transmitted, 3 received, 0% packet loss"""
self.assertTrue(ping.find(expected) > -1)
- if_name = ec.get(tun1, "deviceName")
- self.assertTrue(if_name.startswith("tun"))
+ vif_name = ec.get(tun1, "deviceName")
+ self.assertTrue(vif_name.startswith("tun"))
- if_name = ec.get(tun2, "deviceName")
- self.assertTrue(if_name.startswith("tun"))
+ vif_name = ec.get(tun2, "deviceName")
+ self.assertTrue(vif_name.startswith("tun"))
ec.shutdown()
def test_tap_udp_tunnel(self):
- self.t_tap_udp_tunnel(self.user, self.host1, self.user, self.host2)
+ self.t_tap_udp_tunnel(self.user, self.host1, self.identity,
+ self.user, self.host2, self.identity)
def test_tun_udp_tunnel(self):
- self.t_tun_udp_tunnel(self.user, self.host1, self.user, self.host2)
+ self.t_tun_udp_tunnel(self.user, self.host1, self.identity,
+ self.user, self.host2, self.identity)
if __name__ == '__main__':
unittest.main()
+++ /dev/null
-#!/usr/bin/env python
-#
-# NEPI, a framework to manage network experiments
-# Copyright (C) 2013 INRIA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-
-from nepi.design.box import Box
-from nepi.util.plot import Plotter
-
-import subprocess
-import unittest
-
-class BoxPlotTestCase(unittest.TestCase):
- def xtest_plot(self):
- """ XXX: This test is interactive, it will open an evince instance,
- so it should not run automatically """
- node1 = Box(label="node1")
- ping1 = Box(label="ping")
- mobility1 = Box(label="mob1")
- node2 = Box(label="node2")
- mobility2 = Box(label="mob2")
- iface1 = Box(label="iface1")
- iface2 = Box(label="iface2")
- channel = Box(label="chan")
-
- node1.connect(ping1)
- node1.connect(mobility1)
- node1.connect(iface1)
- channel.connect(iface1)
- channel.connect(iface2)
- node2.connect(iface2)
- node2.connect(mobility2)
-
- plotter = Plotter(node1)
- fname = plotter.plot()
- subprocess.call(["dot", "-Tps", fname, "-o", "%s.ps"%fname])
- subprocess.call(["evince","%s.ps"%fname])
-
-if __name__ == '__main__':
- unittest.main()
-
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceManager, ResourceState, \
+ clsinit_copy, ResourceAction, ResourceFactory
+from nepi.util.plotter import PFormats
+
+import os
+import tempfile
+import time
+import unittest
+
+reschedule_delay = "0.5s"
+deploy_time = 0
+run_time = 0
+
+class Link(ResourceManager):
+ _rtype = "dummy::Link"
+ def do_deploy(self):
+ time.sleep(deploy_time)
+ super(Link, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Interface(ResourceManager):
+ _rtype = "dummy::Interface"
+
+ def do_deploy(self):
+ node = self.get_connected(Node.get_rtype())[0]
+ link = self.get_connected(Link.get_rtype())[0]
+
+ if node.state < ResourceState.READY or \
+ link.state < ResourceState.READY:
+ self.ec.schedule(reschedule_delay, self.deploy)
+ self.logger.debug(" -------- RESCHEDULING ------- ")
+ else:
+ time.sleep(deploy_time)
+ super(Interface, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+ _rtype = "dummy::Node"
+
+ def do_deploy(self):
+ self.logger.debug(" -------- DO_DEPLOY ------- ")
+ time.sleep(deploy_time)
+ super(Node, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+ _rtype = "dummy::Application"
+
+ def do_deploy(self):
+ node = self.get_connected(Node.get_rtype())[0]
+
+ if node.state < ResourceState.READY:
+ self.ec.schedule(reschedule_delay, self.deploy)
+ self.logger.debug(" -------- RESCHEDULING ------- ")
+ else:
+ time.sleep(deploy_time)
+ super(Application, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+ def do_start(self):
+ super(Application, self).do_start()
+ time.sleep(run_time)
+ self.ec.schedule("0s", self.stop)
+
+ResourceFactory.register_type(Application)
+ResourceFactory.register_type(Node)
+ResourceFactory.register_type(Interface)
+ResourceFactory.register_type(Link)
+
+class PlotterTestCase(unittest.TestCase):
+ def test_serialize(self):
+ node_count = 4
+ app_count = 2
+
+ ec = ExperimentController(exp_id = "plotter-test")
+
+ # Add simulated nodes and applications
+ nodes = list()
+ apps = list()
+ ifaces = list()
+
+ for i in xrange(node_count):
+ node = ec.register_resource("dummy::Node")
+ nodes.append(node)
+
+ iface = ec.register_resource("dummy::Interface")
+ ec.register_connection(node, iface)
+ ifaces.append(iface)
+
+ for i in xrange(app_count):
+ app = ec.register_resource("dummy::Application")
+ ec.register_connection(node, app)
+ apps.append(app)
+
+ link = ec.register_resource("dummy::Link")
+
+ for iface in ifaces:
+ ec.register_connection(link, iface)
+
+ fpath = ec.plot()
+ statinfo = os.stat(fpath)
+ size = statinfo.st_size
+ self.assertTrue(size > 0)
+ self.assertTrue(fpath.endswith(".png"))
+
+ os.remove(fpath)
+
+ fpath = ec.plot(format = PFormats.DOT)
+ statinfo = os.stat(fpath)
+ size = statinfo.st_size
+ self.assertTrue(size > 0)
+ self.assertTrue(fpath.endswith(".dot"))
+
+ os.remove(fpath)
+
+if __name__ == '__main__':
+ unittest.main()
+
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceManager, ResourceState, \
+ clsinit_copy, ResourceAction, ResourceFactory
+
+import os
+import tempfile
+import time
+import shutil
+import unittest
+
+reschedule_delay = "0.5s"
+deploy_time = 0
+run_time = 0
+
+class Link(ResourceManager):
+ _rtype = "dummy::Link"
+ def do_deploy(self):
+ time.sleep(deploy_time)
+ super(Link, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Interface(ResourceManager):
+ _rtype = "dummy::Interface"
+
+ def do_deploy(self):
+ node = self.get_connected(Node.get_rtype())[0]
+ link = self.get_connected(Link.get_rtype())[0]
+
+ if node.state < ResourceState.READY or \
+ link.state < ResourceState.READY:
+ self.ec.schedule(reschedule_delay, self.deploy)
+ self.logger.debug(" -------- RESCHEDULING ------- ")
+ else:
+ time.sleep(deploy_time)
+ super(Interface, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+ _rtype = "dummy::Node"
+
+ def do_deploy(self):
+ self.logger.debug(" -------- DO_DEPLOY ------- ")
+ time.sleep(deploy_time)
+ super(Node, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+ _rtype = "dummy::Application"
+
+ def do_deploy(self):
+ node = self.get_connected(Node.get_rtype())[0]
+
+ if node.state < ResourceState.READY:
+ self.ec.schedule(reschedule_delay, self.deploy)
+ self.logger.debug(" -------- RESCHEDULING ------- ")
+ else:
+ time.sleep(deploy_time)
+ super(Application, self).do_deploy()
+ self.logger.debug(" -------- DEPLOYED ------- ")
+
+ def do_start(self):
+ super(Application, self).do_start()
+ time.sleep(run_time)
+ self.ec.schedule("0s", self.stop)
+
+ResourceFactory.register_type(Application)
+ResourceFactory.register_type(Node)
+ResourceFactory.register_type(Interface)
+ResourceFactory.register_type(Link)
+
+class SerializerTestCase(unittest.TestCase):
+ def test_serialize(self):
+ node_count = 4
+ app_count = 2
+
+ dirpath = tempfile.mkdtemp()
+
+ ec = ExperimentController(exp_id = "serialize-test")
+
+ # Add simulated nodes and applications
+ nodes = list()
+ apps = list()
+ ifaces = list()
+
+ for i in xrange(node_count):
+ node = ec.register_resource("dummy::Node")
+ nodes.append(node)
+
+ iface = ec.register_resource("dummy::Interface")
+ ec.register_connection(node, iface)
+ ifaces.append(iface)
+
+ for i in xrange(app_count):
+ app = ec.register_resource("dummy::Application")
+ ec.register_connection(node, app)
+ apps.append(app)
+
+ link = ec.register_resource("dummy::Link")
+
+ for iface in ifaces:
+ ec.register_connection(link, iface)
+
+ filepath = ec.save(dirpath)
+
+ ec.deploy()
+
+ # Wait until nodes and apps are deployed
+ ec.wait_finished(apps)
+
+ # Do the experiment controller shutdown
+ ec.shutdown()
+
+ # Load serialized experiment
+ ec2 = ExperimentController.load(filepath)
+ apps = ec2.filter_resources("dummy::Application")
+ ec2.deploy()
+ ec2.wait_finished(apps)
+ ec2.shutdown()
+
+ self.assertEquals(len(ec.resources), len(ec2.resources))
+
+ shutil.rmtree(dirpath)
+
+if __name__ == '__main__':
+ unittest.main()
+