Merge the nepi-omf6-perf branch with the last update (traces, ...)
authorJulien Tribino <julien.tribino@inria.fr>
Mon, 22 Sep 2014 12:01:15 +0000 (14:01 +0200)
committerJulien Tribino <julien.tribino@inria.fr>
Mon, 22 Sep 2014 12:01:15 +0000 (14:01 +0200)
132 files changed:
doc/user_manual/NEPI_overview.png [new file with mode: 0644]
doc/user_manual/NEPI_overview_short.odg [new file with mode: 0644]
doc/user_manual/NEPI_overview_short.png [new file with mode: 0644]
doc/user_manual/abstract_topology_mapping.odg [new file with mode: 0644]
doc/user_manual/abstract_topology_mapping.pdf [new file with mode: 0644]
doc/user_manual/abstract_topology_mapping.png [new file with mode: 0644]
doc/user_manual/abstract_topology_vs_executable_description.odg [new file with mode: 0644]
doc/user_manual/abstract_topology_vs_executable_description.png [new file with mode: 0644]
doc/user_manual/intro_life_cycle.png [new file with mode: 0644]
examples/ccn_emu_live/dce.py [new file with mode: 0644]
examples/ccn_emu_live/planetlab.py [new file with mode: 0644]
examples/ccn_emu_live/repoFile1.0.8.2 [new file with mode: 0644]
examples/dce/custom_local_csma_ping.py [new file with mode: 0644]
examples/dce/custom_local_p2p_ccn.py [moved from examples/linux/dce/custom_dce_ccn.py with 91% similarity]
examples/dce/custom_local_p2p_ping.py [moved from examples/linux/dce/custom_dce_ping.py with 86% similarity]
examples/dce/custom_local_wifi_ping.py [new file with mode: 0644]
examples/dce/wrapped_local_p2p_ccncat.py [moved from examples/linux/dce/dce_ccn_application.py with 89% similarity]
examples/dce/wrapped_local_p2p_ccnpeek.py [moved from examples/linux/dce/dce_ccnpeek_application.py with 96% similarity]
examples/dce/wrapped_local_p2p_ping.py [moved from examples/linux/dce/dce_ping_application.py with 88% similarity]
examples/linux/ccn_simple_transfer.py [moved from examples/linux/ccn/two_nodes_file_retrieval.py with 63% similarity]
examples/linux/ccn_transfer_using_linuxapp.py [new file with mode: 0644]
examples/linux/netcat_file_transfer.py [moved from examples/linux/file_transfer.py with 94% similarity]
examples/linux/ping.py
examples/linux/testing/ccncat_2_nodes.py [moved from examples/linux/ccn/ccncat_2_nodes.py with 99% similarity]
examples/linux/testing/ccncat_extended_ring_topo.py [moved from examples/linux/ccn/ccncat_extended_ring_topo.py with 97% similarity]
examples/linux/testing/multihop_ssh.py [moved from examples/linux/multihop_ssh.py with 100% similarity]
examples/linux/testing/scalability.py [moved from examples/linux/scalability.py with 100% similarity]
examples/linux/vlc_streaming.py
examples/ns3/local_csma_p2p_star_ping.py [moved from examples/linux/ns3/csma_p2p_star.py with 100% similarity]
examples/ns3/local_csma_ping.py [new file with mode: 0644]
examples/ns3/local_mobile_wifi_ping.py [moved from examples/linux/ns3/wifi_ping.py with 99% similarity]
examples/ns3/local_p2p_ping.py [moved from examples/linux/ns3/local_ping.py with 98% similarity]
examples/ns3/local_wifi_ping.py [new file with mode: 0644]
examples/ns3/remote_p2p_ping.py [moved from examples/linux/ns3/remote_ping.py with 97% similarity]
examples/omf/nepi_omf5_nitos_xeyes.py
examples/omf/vod_exp/big_buck_bunny_240p_mpeg4_lq.ts [new file with mode: 0755]
examples/omf/vod_exp/conf_Broadcast.vlm [new file with mode: 0644]
examples/omf/vod_exp/conf_VoD.vlm [new file with mode: 0644]
examples/omf/vod_exp/vod_experiment.py [new file with mode: 0755]
examples/openvswitch/ovs_ping_2switches.py
examples/openvswitch/ovs_ping_3switches_line.py
examples/openvswitch/ovs_ping_3switches_loop.py
examples/openvswitch/ping_over_udpTapTunnel_performance_test_triangleTopo.py
examples/planetlab/ccn/two_nodes_file_retrieval.py [deleted file]
examples/planetlab/ccn_simple_transfer.py [new file with mode: 0644]
examples/planetlab/openvswitch/ovs_ping_exp.py [deleted file]
examples/planetlab/ping.py [moved from examples/planetlab/ping_experiment.py with 52% similarity]
examples/planetlab/ping_with_filters.py [moved from examples/planetlab/ping_filters_experiment.py with 53% similarity]
examples/planetlab/select_nodes.py [new file with mode: 0644]
examples/planetlab/testing/blacklist.py [moved from examples/planetlab/blacklist.py with 100% similarity]
examples/planetlab/testing/ping_sfa.py [moved from examples/planetlab/ping_sfa.py with 100% similarity]
examples/planetlab/testing/scalability.py [moved from examples/planetlab/scalability.py with 100% similarity]
setup.py
src/nepi/data/__init__.py [new file with mode: 0644]
src/nepi/data/processing/__init__.py [new file with mode: 0644]
src/nepi/data/processing/ccn/__init__.py [new file with mode: 0644]
src/nepi/data/processing/ccn/parser.py [new file with mode: 0644]
src/nepi/data/processing/ping/__init__.py [new file with mode: 0644]
src/nepi/data/processing/ping/parser.py [new file with mode: 0644]
src/nepi/design/__init__.py [deleted file]
src/nepi/design/box.py [deleted file]
src/nepi/execution/attribute.py
src/nepi/execution/ec.py
src/nepi/execution/resource.py
src/nepi/execution/runner.py [new file with mode: 0644]
src/nepi/resources/all/collector.py
src/nepi/resources/linux/application.py
src/nepi/resources/linux/ccn/ccnd.py
src/nepi/resources/linux/ccn/fibentry.py
src/nepi/resources/linux/gretunnel.py
src/nepi/resources/linux/node.py
src/nepi/resources/linux/ns3/ccn/ns3ccnddceapplication.py
src/nepi/resources/linux/ns3/ccn/ns3fibentrydceapplication.py
src/nepi/resources/linux/ns3/ns3simulation.py
src/nepi/resources/linux/scripts/linux-udp-connect.py [new file with mode: 0644]
src/nepi/resources/linux/tap.py [new file with mode: 0644]
src/nepi/resources/linux/tun.py [moved from test/util/parser.py with 51% similarity, mode: 0644]
src/nepi/resources/linux/tunnel.py
src/nepi/resources/linux/udptunnel.py
src/nepi/resources/ns3/ns3base.py
src/nepi/resources/ns3/ns3node.py
src/nepi/resources/ns3/ns3wrapper.py
src/nepi/resources/omf/wilabt_node.py
src/nepi/resources/planetlab/openvswitch/ovs.py
src/nepi/resources/planetlab/openvswitch/ovsport.py
src/nepi/resources/planetlab/openvswitch/tunnel.py [deleted file]
src/nepi/resources/planetlab/scripts/pl-vif-down.py
src/nepi/resources/planetlab/tap.py
src/nepi/resources/planetlab/vroute.py
src/nepi/util/guid.py
src/nepi/util/netgraph.py [new file with mode: 0644]
src/nepi/util/parser.py [deleted file]
src/nepi/util/parsers/__init__.py [new file with mode: 0644]
src/nepi/util/parsers/xml_parser.py [new file with mode: 0644]
src/nepi/util/plot.py [deleted file]
src/nepi/util/plotter.py [new file with mode: 0644]
src/nepi/util/serializer.py [new file with mode: 0644]
src/nepi/util/sfaapi.py
src/nepi/util/sshfuncs.py
src/nepi/util/statfuncs.py [new file with mode: 0644]
src/nepi/util/timefuncs.py
test/design/box.py [deleted file]
test/execution/runner.py [new file with mode: 0755]
test/resources/linux/application.py
test/resources/linux/ccn/ccncat.py [changed mode: 0644->0755]
test/resources/linux/ccn/ccnpeek.py [changed mode: 0644->0755]
test/resources/linux/ccn/ccnping.py [changed mode: 0644->0755]
test/resources/linux/ccn/fibentry.py [changed mode: 0644->0755]
test/resources/linux/gretunnel.py [new file with mode: 0755]
test/resources/linux/multirun.py [new file with mode: 0755]
test/resources/linux/ns3/ccn/ns3dceccn.py [changed mode: 0644->0755]
test/resources/linux/ns3/ccn/ns3dceccnpeek.py [changed mode: 0644->0755]
test/resources/linux/ns3/ns3client.py [changed mode: 0644->0755]
test/resources/linux/ns3/ns3dceapplication.py [changed mode: 0644->0755]
test/resources/linux/ns3/ns3dceping.py [changed mode: 0644->0755]
test/resources/linux/ns3/ns3simulation.py [changed mode: 0644->0755]
test/resources/linux/ns3/serialization.py [new file with mode: 0755]
test/resources/linux/serialization.py [new file with mode: 0755]
test/resources/linux/tap.py [new file with mode: 0755]
test/resources/linux/tun.py [new file with mode: 0755]
test/resources/linux/udptunnel.py [new file with mode: 0755]
test/resources/omf/omf6_vlc_wrong_critical.py [changed mode: 0644->0755]
test/resources/omf/omf6_vlc_wrong_non_critical.py [changed mode: 0644->0755]
test/resources/omf/set_hook.py [changed mode: 0644->0755]
test/resources/planetlab/gretunnel.py
test/resources/planetlab/tap.py
test/resources/planetlab/tun.py
test/resources/planetlab/udptunnel.py
test/util/parallel.py [changed mode: 0644->0755]
test/util/plot.py [deleted file]
test/util/plotter.py [new file with mode: 0755]
test/util/serializer.py [new file with mode: 0755]

diff --git a/doc/user_manual/NEPI_overview.png b/doc/user_manual/NEPI_overview.png
new file mode 100644 (file)
index 0000000..74957a8
Binary files /dev/null and b/doc/user_manual/NEPI_overview.png differ
diff --git a/doc/user_manual/NEPI_overview_short.odg b/doc/user_manual/NEPI_overview_short.odg
new file mode 100644 (file)
index 0000000..adbafb0
Binary files /dev/null and b/doc/user_manual/NEPI_overview_short.odg differ
diff --git a/doc/user_manual/NEPI_overview_short.png b/doc/user_manual/NEPI_overview_short.png
new file mode 100644 (file)
index 0000000..7024271
Binary files /dev/null and b/doc/user_manual/NEPI_overview_short.png differ
diff --git a/doc/user_manual/abstract_topology_mapping.odg b/doc/user_manual/abstract_topology_mapping.odg
new file mode 100644 (file)
index 0000000..1bf720a
Binary files /dev/null and b/doc/user_manual/abstract_topology_mapping.odg differ
diff --git a/doc/user_manual/abstract_topology_mapping.pdf b/doc/user_manual/abstract_topology_mapping.pdf
new file mode 100644 (file)
index 0000000..6a13252
Binary files /dev/null and b/doc/user_manual/abstract_topology_mapping.pdf differ
diff --git a/doc/user_manual/abstract_topology_mapping.png b/doc/user_manual/abstract_topology_mapping.png
new file mode 100644 (file)
index 0000000..607216a
Binary files /dev/null and b/doc/user_manual/abstract_topology_mapping.png differ
diff --git a/doc/user_manual/abstract_topology_vs_executable_description.odg b/doc/user_manual/abstract_topology_vs_executable_description.odg
new file mode 100644 (file)
index 0000000..aeb644f
Binary files /dev/null and b/doc/user_manual/abstract_topology_vs_executable_description.odg differ
diff --git a/doc/user_manual/abstract_topology_vs_executable_description.png b/doc/user_manual/abstract_topology_vs_executable_description.png
new file mode 100644 (file)
index 0000000..7e1dfa5
Binary files /dev/null and b/doc/user_manual/abstract_topology_vs_executable_description.png differ
diff --git a/doc/user_manual/intro_life_cycle.png b/doc/user_manual/intro_life_cycle.png
new file mode 100644 (file)
index 0000000..7eb9025
Binary files /dev/null and b/doc/user_manual/intro_life_cycle.png differ
diff --git a/examples/ccn_emu_live/dce.py b/examples/ccn_emu_live/dce.py
new file mode 100644 (file)
index 0000000..693293a
--- /dev/null
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+
+###############################################################################
+#
+#    NEPI, a framework to manage network experiments
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.runner import ExperimentRunner
+from nepi.util.netgraph import NetGraph, TopologyType
+import nepi.data.processing.ccn.parser as ccn_parser
+
+import networkx
+import socket
+import os
+
+content_name = "ccnx:/test/bunny.ts"
+
+STOP_TIME = "5000s"
+
+repofile = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), "repoFile1.0.8.2")
+
+def get_simulator(ec):
+    simulator = ec.filter_resources("LinuxNS3Simulation")
+
+    if not simulator:
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", "localhost")
+
+        simu = ec.register_resource("LinuxNS3Simulation")
+        ec.set(simu, "enableDump", True)
+        ec.set(simu, "StopTime", STOP_TIME)
+        ec.register_connection(simu, node)
+        return simu
+
+    return simulator[0]
+
+def add_collector(ec, trace_name, subdir, newname = None):
+    collector = ec.register_resource("Collector")
+    ec.set(collector, "traceName", trace_name)
+    ec.set(collector, "subDir", subdir)
+    if newname:
+        ec.set(collector, "rename", newname)
+
+    return collector
+
+def add_dce_host(ec, nid):
+    simu = get_simulator(ec)
+    
+    host = ec.register_resource("ns3::Node")
+    ec.set(host, "enableStack", True)
+    ec.register_connection(host, simu)
+
+    # Annotate the graph
+    ec.netgraph.annotate_node(nid, "host", host)
+    
+def add_dce_ccnd(ec, nid):
+    # Retrieve annotation from netgraph
+    host = ec.netgraph.node_annotation(nid, "host")
+    
+    # Add dce ccnd to the dce node
+    ccnd = ec.register_resource("ns3::LinuxDceCCND")
+    ec.set (ccnd, "stackSize", 1<<20)
+    ec.set (ccnd, "debug", 7)
+    ec.set (ccnd, "capacity", 50000)
+    ec.set (ccnd, "StartTime", "1s")
+    ec.register_connection(ccnd, host)
+
+    # Collector to retrieve ccnd log
+    collector = add_collector(ec, "stderr", nid, "log")
+    ec.register_connection(collector, ccnd)
+
+    # Annotate the graph
+    ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_dce_ccnr(ec, nid):
+    # Retrieve annotation from netgraph
+    host = ec.netgraph.node_annotation(nid, "host")
+    
+    # Add a CCN content repository to the dce node
+    ccnr = ec.register_resource("ns3::LinuxDceCCNR")
+    ec.set (ccnr, "repoFile1", repofile) 
+    ec.set (ccnr, "stackSize", 1<<20)
+    ec.set (ccnr, "StartTime", "2s")
+    ec.register_connection(ccnr, host)
+
+def add_dce_ccncat(ec, nid):
+    # Retrieve annotation from netgraph
+    host = ec.netgraph.node_annotation(nid, "host")
+   
+    # Add a ccncat application to the dce host
+    ccncat = ec.register_resource("ns3::LinuxDceCCNCat")
+    ec.set (ccncat, "contentName", content_name)
+    ec.set (ccncat, "stackSize", 1<<20)
+    ec.set (ccncat, "StartTime", "8s")
+    ec.register_connection(ccncat, host)
+
+def add_dce_fib_entry(ec, nid1, nid2):
+    # Retrieve annotations from netgraph
+    host1 = ec.netgraph.node_annotation(nid1, "host")
+    net = ec.netgraph.edge_net_annotation(nid1, nid2)
+    ip2 = net[nid2]
+
+    # Add FIB entry between peer hosts
+    ccndc = ec.register_resource("ns3::LinuxDceFIBEntry")
+    ec.set (ccndc, "protocol", "udp") 
+    ec.set (ccndc, "uri", "ccnx:/") 
+    ec.set (ccndc, "host", ip2)
+    ec.set (ccndc, "stackSize", 1<<20)
+    ec.set (ccndc, "StartTime", "4s")
+    ec.register_connection(ccndc, host1)
+
+def add_dce_net_iface(ec, nid1, nid2):
+    # Retrieve annotations from netgraph
+    host = ec.netgraph.node_annotation(nid1, "host")
+    net = ec.netgraph.edge_net_annotation(nid1, nid2)
+    ip1 = net[nid1]
+    prefix = net["prefix"]
+
+    dev = ec.register_resource("ns3::PointToPointNetDevice")
+    ec.set(dev,"DataRate", "5Mbps")
+    ec.set(dev, "ip", ip1)
+    ec.set(dev, "prefix", prefix)
+    ec.register_connection(host, dev)
+
+    queue = ec.register_resource("ns3::DropTailQueue")
+    ec.register_connection(dev, queue)
+
+    return dev
+
+def avg_interests(ec, run):
+    ## Process logs
+    logs_dir = ec.run_dir
+
+    (graph,
+        content_names,
+        interest_expiry_count,
+        interest_dupnonce_count,
+        interest_count,
+        content_count) = ccn_parser.process_content_history_logs(
+                logs_dir, 
+                ec.netgraph.topology)
+
+    shortest_path = networkx.shortest_path(graph, 
+            source = ec.netgraph.sources()[0], 
+            target = ec.netgraph.targets()[0])
+
+    ### Compute metric: Avg number of Interests seen per content name
+    ###                 normalized by the number of nodes in the shortest path
+    content_name_count = len(content_names.values())
+    nodes_in_shortest_path = len(shortest_path) - 1
+    metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
+
+    # TODO: DUMP RESULTS TO FILE
+    # TODO: DUMP GRAPH DELAYS!
+    f = open("/tmp/metric", "a+")
+    f.write("%.2f\n" % metric)
+    f.close()
+    print " METRIC", metric
+
+    return metric
+
+def add_dce_edge(ec, nid1, nid2):
+    ### Add network interfaces to hosts
+    p2p1 = add_dce_net_iface(ec, nid1, nid2)
+    p2p2 = add_dce_net_iface(ec, nid2, nid1)
+
+    # Create point to point link between interfaces
+    chan = ec.register_resource("ns3::PointToPointChannel")
+    ec.set(chan, "Delay", "0ms")
+
+    ec.register_connection(chan, p2p1)
+    ec.register_connection(chan, p2p2)
+
+    #### Add routing between CCN nodes
+    add_dce_fib_entry(ec, nid1, nid2)
+    add_dce_fib_entry(ec, nid2, nid1)
+
+def add_dce_node(ec, nid):
+    ### Add CCN nodes (ec.netgraph holds the topology graph)
+    add_dce_host(ec, nid)
+    add_dce_ccnd(ec, nid)
+        
+    if nid == ec.netgraph.targets()[0]:
+        add_dce_ccnr(ec, nid)
+
+    if nid == ec.netgraph.sources()[0]:
+        add_dce_ccncat(ec, nid)
+
+if __name__ == '__main__':
+
+    #### Create NEPI Experiment Description with LINEAR topology 
+    ec = ExperimentController("dce_ccn", 
+            topo_type = TopologyType.LINEAR, 
+            node_count = 2,
+            assign_st = True,
+            assign_ips = True,
+            add_node_callback = add_dce_node, 
+            add_edge_callback = add_dce_edge)
+    
+    print "Results stored at", ec.exp_dir
+
+    #### Retrieve the consumer to wait for ot to finish
+    ccncat = ec.filter_resources("ns3::LinuxDceCCNCat")
+   
+    #### Run experiment until metric convergences
+    rnr = ExperimentRunner()
+    runs = rnr.run(ec, min_runs = 1, max_runs = 1, 
+            compute_metric_callback = avg_interests,
+            wait_guids = ccncat,
+            wait_time = 0)
+
diff --git a/examples/ccn_emu_live/planetlab.py b/examples/ccn_emu_live/planetlab.py
new file mode 100644 (file)
index 0000000..3fd8361
--- /dev/null
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+
+###############################################################################
+#
+#    NEPI, a framework to manage network experiments
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.runner import ExperimentRunner
+from nepi.util.netgraph import NetGraph, TopologyType
+import nepi.data.processing.ccn.parser as ccn_parser
+
+import networkx
+import socket
+import os
+
+PL_NODES = dict({
+    0: "iraplab1.iralab.uni-karlsruhe.de",
+    1: "planetvs2.informatik.uni-stuttgart.de",
+    2: "dfn-ple1.x-win.dfn.de",
+    3: "planetlab2.extern.kuleuven.be",
+    4: "mars.planetlab.haw-hamburg.de",
+    5: "planetlab-node3.it-sudparis.eu",
+    6: "node2pl.planet-lab.telecom-lille1.eu",
+    7: "planetlab1.informatik.uni-wuerzburg.de",
+    8: "planet1.l3s.uni-hannover.de",
+    9: "planetlab1.wiwi.hu-berlin.de",
+    10: "pl2.uni-rostock.de", 
+    11: "planetlab1.u-strasbg.fr",
+    12: "peeramidion.irisa.fr",
+    13: "planetlab2.unineuchatel.ch", 
+    })
+
+pl_slice = os.environ.get("PL_SLICE")
+pl_user = os.environ.get("PL_USER")
+pl_password = os.environ.get("PL_PASS")
+pl_ssh_key = os.environ.get("PL_SSHKEY")
+
+content_name = "ccnx:/test/bunny.ts"
+
+pipeline = 4 # Default value for ccncat
+
+operating_system = "f14"
+
+country = "germany"
+
+repofile = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), "repoFile1.0.8.2")
+
+def add_collector(ec, trace_name, subdir, newname = None):
+    collector = ec.register_resource("Collector")
+    ec.set(collector, "traceName", trace_name)
+    ec.set(collector, "subDir", subdir)
+    if newname:
+        ec.set(collector, "rename", newname)
+
+    return collector
+
+def add_pl_host(ec, nid):
+    hostname = PL_NODES[nid]
+
+    # Add a planetlab host to the experiment description
+    host = ec.register_resource("PlanetlabNode")
+    ec.set(host, "hostname", hostname)
+    ec.set(host, "username", pl_slice)
+    ec.set(host, "identity", pl_ssh_key)
+    #ec.set(host, "pluser", pl_user)
+    #ec.set(host, "plpassword", pl_password)
+    #ec.set(host, "country", country)
+    #ec.set(host, "operatingSystem", operating_system)
+    ec.set(host, "cleanExperiment", True)
+    ec.set(host, "cleanProcesses", True)
+
+    # Annotate the graph
+    ec.netgraph.annotate_node(nid, "hostname", hostname)
+    ec.netgraph.annotate_node(nid, "host", host)
+    
+    # Annotate the graph node with an ip address
+    ip = socket.gethostbyname(hostname)
+    ec.netgraph.annotate_node_ip(nid, ip)
+
+def add_pl_ccnd(ec, nid):
+    # Retrieve annotation from netgraph
+    host = ec.netgraph.node_annotation(nid, "host")
+    
+    # Add a CCN daemon to the planetlab node
+    ccnd = ec.register_resource("LinuxCCND")
+    ec.set(ccnd, "debug", 7)
+    ec.register_connection(ccnd, host)
+    
+    # Collector to retrieve ccnd log
+    collector = add_collector(ec, "stderr", nid, "log")
+    ec.register_connection(collector, ccnd)
+
+    # Annotate the graph
+    ec.netgraph.annotate_node(nid, "ccnd", ccnd)
+
+def add_pl_ccnr(ec, nid):
+    # Retrieve annotation from netgraph
+    ccnd = ec.netgraph.node_annotation(nid, "ccnd")
+    
+    # Add a CCN content repository to the planetlab node
+    ccnr = ec.register_resource("LinuxCCNR")
+
+    ec.set(ccnr, "repoFile1", repofile)
+    ec.register_connection(ccnr, ccnd)
+
+def add_pl_ccncat(ec, nid):
+    # Retrieve annotation from netgraph
+    ccnd = ec.netgraph.node_annotation(nid, "ccnd")
+    
+    # Add a CCN cat application to the planetlab node
+    ccncat = ec.register_resource("LinuxCCNCat")
+    ec.set(ccncat, "pipeline", pipeline)
+    ec.set(ccncat, "contentName", content_name)
+    ec.register_connection(ccncat, ccnd)
+
+def add_pl_fib_entry(ec, nid1, nid2):
+    # Retrieve annotations from netgraph
+    ccnd1 = ec.netgraph.node_annotation(nid1, "ccnd")
+    hostname2 = ec.netgraph.node_annotation(nid2, "hostname")
+    
+    # Add a FIB entry between one planetlab node and its peer
+    entry = ec.register_resource("LinuxFIBEntry")
+    ec.set(entry, "host", hostname2)
+    ec.register_connection(entry, ccnd1)
+
+    # Collector to retrieve peering ping output (to measure neighbors delay)
+    ec.enable_trace(entry, "ping")
+    collector = add_collector(ec, "ping", nid1)
+    ec.register_connection(collector, entry)
+
+    return entry
+
+def avg_interests(ec, run):
+    ## Process logs
+    logs_dir = ec.run_dir
+
+    (graph,
+        content_names,
+        interest_expiry_count,
+        interest_dupnonce_count,
+        interest_count,
+        content_count) = ccn_parser.process_content_history_logs(
+                logs_dir,
+                ec.netgraph.topology,
+                parse_ping_logs = True)
+
+    shortest_path = networkx.shortest_path(graph, 
+            source = ec.netgraph.sources()[0], 
+            target = ec.netgraph.targets()[0])
+
+    ### Compute metric: Avg number of Interests seen per content name
+    ###                 normalized by the number of nodes in the shortest path
+    content_name_count = len(content_names.values())
+    nodes_in_shortest_path = len(shortest_path) - 1
+    metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
+
+    # TODO: DUMP RESULTS TO FILE
+    # TODO: DUMP GRAPH DELAYS!
+    f = open("/tmp/metric", "a+")
+    f.write("%.2f\n" % metric)
+    f.close()
+    print " METRIC", metric
+
+    return metric
+
+def add_pl_edge(ec, nid1, nid2):
+    #### Add connections between CCN nodes
+    add_pl_fib_entry(ec, nid1, nid2)
+    add_pl_fib_entry(ec, nid2, nid1)
+
+def add_pl_node(ec, nid):
+    ### Add CCN nodes (ec.netgraph holds the topology graph)
+    add_pl_host(ec, nid)
+    add_pl_ccnd(ec, nid)
+        
+    if nid == ec.netgraph.targets()[0]:
+        add_pl_ccnr(ec, nid)
+
+    if nid == ec.netgraph.sources()[0]:
+        add_pl_ccncat(ec, nid)
+
+if __name__ == '__main__':
+
+    #### Create NEPI Experiment Description with LINEAR topology 
+    ec = ExperimentController("pl_ccn", 
+            topo_type = TopologyType.LINEAR, 
+            node_count = 4, 
+            #assign_ips = True,
+            assign_st = True,
+            add_node_callback = add_pl_node, 
+            add_edge_callback = add_pl_edge)
+    
+    print "Results stored at", ec.exp_dir
+
+    #### Retrieve the content producing resource to wait for ot to finish
+    ccncat = ec.filter_resources("LinuxCCNCat")
+   
+    #### Run experiment until metric convergences
+    rnr = ExperimentRunner()
+    runs = rnr.run(ec, min_runs = 10, max_runs = 300, 
+            compute_metric_callback = avg_interests,
+            wait_guids = ccncat,
+            wait_time = 0)
+
diff --git a/examples/ccn_emu_live/repoFile1.0.8.2 b/examples/ccn_emu_live/repoFile1.0.8.2
new file mode 100644 (file)
index 0000000..a90b0d9
Binary files /dev/null and b/examples/ccn_emu_live/repoFile1.0.8.2 differ
diff --git a/examples/dce/custom_local_csma_ping.py b/examples/dce/custom_local_csma_ping.py
new file mode 100644 (file)
index 0000000..f9f40af
--- /dev/null
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+def add_ns3_node(ec, simu):
+    node = ec.register_resource("ns3::Node")
+    ec.register_connection(node, simu)
+
+    ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+    ec.register_connection(node, ipv4)
+
+    arp = ec.register_resource("ns3::ArpL3Protocol")
+    ec.register_connection(node, arp)
+    
+    icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+    ec.register_connection(node, icmp)
+
+    udp = ec.register_resource("ns3::UdpL4Protocol")
+    ec.register_connection(node, udp)
+
+    tcp = ec.register_resource("ns3::TcpL4Protocol")
+    ec.register_connection(node, tcp)
+
+    return node
+
+def add_device(ec, node, ip,  prefix):
+    dev = ec.register_resource("ns3::CsmaNetDevice")
+    ec.set(dev, "ip", ip)
+    ec.set(dev, "prefix", prefix)
+    ec.register_connection(node, dev)
+
+    queue = ec.register_resource("ns3::DropTailQueue")
+    ec.register_connection(dev, queue)
+
+    return dev
+
+ec = ExperimentController(exp_id = "dce-custom-csma-ping")
+
+node = ec.register_resource("LinuxNode")
+ec.set(node, "hostname", "localhost")
+ec.set(node, "cleanProcesses", True)
+
+simu = ec.register_resource("LinuxNS3Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+nsnode1 = add_ns3_node(ec, simu)
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+
+nsnode2 = add_ns3_node(ec, simu)
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+
+# Create channel
+chan = ec.register_resource("ns3::CsmaChannel")
+ec.set(chan, "Delay", "2ms")
+
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
+
+### create applications
+ping = ec.register_resource("ns3::LinuxDceApplication")
+ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
+ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
+        "cd iputils-s20101006/ && "
+        "sed -i 's/CFLAGS=/CFLAGS+=/g' Makefile && "
+        "make CFLAGS=-fPIC LDFLAGS='-pie -rdynamic' ping && "
+        "cp ping ${BIN_DCE} && cd - ")
+ec.set (ping, "binary", "ping")
+ec.set (ping, "stackSize", 1<<20)
+ec.set (ping, "arguments", "-c 10;-s 1000;10.0.0.2")
+ec.set (ping, "StartTime", "1s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(ping, "stdout") 
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
+
similarity index 91%
rename from examples/linux/dce/custom_dce_ccn.py
rename to examples/dce/custom_local_p2p_ccn.py
index b22d668..3ba901a 100644 (file)
@@ -43,7 +43,7 @@ def add_ns3_node(ec, simu):
 
     return node
 
-def add_point2point_device(ec, node, ip,  prefix):
+def add_device(ec, node, ip,  prefix):
     dev = ec.register_resource("ns3::PointToPointNetDevice")
     ec.set(dev, "ip", ip)
     ec.set(dev, "prefix", prefix)
@@ -59,28 +59,25 @@ ec = ExperimentController(exp_id = "dce-custom-ccn")
 node = ec.register_resource("LinuxNode")
 ec.set(node, "hostname", "localhost")
 ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
 
 simu = ec.register_resource("LinuxNS3Simulation")
 ec.set(simu, "verbose", True)
-ec.set(simu, "nsLog", "DceApplication")
-ec.set(simu, "enableDump", True)
 ec.register_connection(simu, node)
 
 nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
 
 nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
 
 # Create channel
 chan = ec.register_resource("ns3::PointToPointChannel")
 ec.set(chan, "Delay", "2ms")
 
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
 
 ### create applications
 
@@ -110,8 +107,8 @@ ec.register_connection(ccnd1, nsnode1)
 # parameters
 repofile = os.path.join(
     os.path.dirname(os.path.realpath(__file__)), 
-    "..", "..", "..",
-    "test", "resources", "linux", "ns3", "ccn", "repoFile1")
+    "..", "..", "test", "resources", "linux", 
+    "ns3", "ccn", "repoFile1")
 
 ccnr = ec.register_resource("ns3::LinuxCCNDceApplication")
 ec.set (ccnr, "binary", "ccnr")
@@ -172,4 +169,3 @@ print "%0.2f MBytes received" % (len(stdout) / 1024.0 / 1024.0 )
 
 ec.shutdown()
 
-
similarity index 86%
rename from examples/linux/dce/custom_dce_ping.py
rename to examples/dce/custom_local_p2p_ping.py
index 5210d63..153b381 100644 (file)
@@ -41,7 +41,7 @@ def add_ns3_node(ec, simu):
 
     return node
 
-def add_point2point_device(ec, node, ip,  prefix):
+def add_device(ec, node, ip,  prefix):
     dev = ec.register_resource("ns3::PointToPointNetDevice")
     ec.set(dev, "ip", ip)
     ec.set(dev, "prefix", prefix)
@@ -52,33 +52,30 @@ def add_point2point_device(ec, node, ip,  prefix):
 
     return dev
 
-ec = ExperimentController(exp_id = "dce-custom-ping")
+ec = ExperimentController(exp_id = "dce-custom-p2p-ping")
 
 node = ec.register_resource("LinuxNode")
 ec.set(node, "hostname", "localhost")
 ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
 
 simu = ec.register_resource("LinuxNS3Simulation")
 ec.set(simu, "verbose", True)
-ec.set(simu, "nsLog", "DceApplication")
-ec.set(simu, "enableDump", True)
 ec.register_connection(simu, node)
 
 nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
 
 nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
 
 # Create channel
 chan = ec.register_resource("ns3::PointToPointChannel")
 ec.set(chan, "Delay", "2ms")
 
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
 
 ### create applications
 ping = ec.register_resource("ns3::LinuxDceApplication")
@@ -104,3 +101,4 @@ stdout = ec.trace(ping, "stdout")
 ec.shutdown()
 
 print "PING OUTPUT", stdout
+
diff --git a/examples/dce/custom_local_wifi_ping.py b/examples/dce/custom_local_wifi_ping.py
new file mode 100644 (file)
index 0000000..7c61df3
--- /dev/null
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+def add_ns3_node(ec, simu):
+    node = ec.register_resource("ns3::Node")
+    ec.register_connection(node, simu)
+
+    ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+    ec.register_connection(node, ipv4)
+
+    arp = ec.register_resource("ns3::ArpL3Protocol")
+    ec.register_connection(node, arp)
+    
+    icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+    ec.register_connection(node, icmp)
+
+    udp = ec.register_resource("ns3::UdpL4Protocol")
+    ec.register_connection(node, udp)
+
+    tcp = ec.register_resource("ns3::TcpL4Protocol")
+    ec.register_connection(node, tcp)
+
+    return node
+
+def add_device(ec, node, ip, prefix, access_point = False):
+    dev = ec.register_resource("ns3::WifiNetDevice")
+    ec.set(dev, "ip", ip)
+    ec.set(dev, "prefix", prefix)
+    ec.register_connection(node, dev)
+
+    phy = ec.register_resource("ns3::YansWifiPhy")
+    ec.set(phy, "Standard", "WIFI_PHY_STANDARD_80211a")
+    ec.register_connection(dev, phy)
+
+    error = ec.register_resource("ns3::NistErrorRateModel")
+    ec.register_connection(phy, error)
+
+    manager = ec.register_resource("ns3::ArfWifiManager")
+    ec.register_connection(dev, manager)
+
+    if access_point:
+        mac = ec.register_resource("ns3::ApWifiMac")
+    else:
+        mac = ec.register_resource("ns3::StaWifiMac")
+
+    ec.set(mac, "Standard", "WIFI_PHY_STANDARD_80211a")
+    ec.register_connection(dev, mac)
+
+    return dev, phy
+
+def add_constant_mobility(ec, node, x, y, z):
+    mobility = ec.register_resource("ns3::ConstantPositionMobilityModel") 
+    position = "%d:%d:%d" % (x, y, z)
+    ec.set(mobility, "Position", position)
+    ec.register_connection(node, mobility)
+    return mobility
+
+def add_wifi_channel(ec):
+    channel = ec.register_resource("ns3::YansWifiChannel")
+    delay = ec.register_resource("ns3::ConstantSpeedPropagationDelayModel")
+    ec.register_connection(channel, delay)
+
+    loss  = ec.register_resource("ns3::LogDistancePropagationLossModel")
+    ec.register_connection(channel, loss)
+
+    return channel
+
+ec = ExperimentController(exp_id = "dce-custom-wifi-ping")
+
+node = ec.register_resource("LinuxNode")
+ec.set(node, "hostname", "localhost")
+ec.set(node, "cleanProcesses", True)
+
+simu = ec.register_resource("LinuxNS3Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+nsnode1 = add_ns3_node(ec, simu)
+add_constant_mobility(ec, nsnode1, 0, 0, 0)
+dev1, phy1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+
+nsnode2 = add_ns3_node(ec, simu)
+add_constant_mobility(ec, nsnode2, 50, 50, 0)
+dev2, phy2 = add_device(ec, nsnode2, "10.0.0.2", "30", access_point = True)
+
+# Create channel
+chan = add_wifi_channel(ec)
+ec.register_connection(chan, phy1)
+ec.register_connection(chan, phy2)
+
+### create applications
+ping = ec.register_resource("ns3::LinuxDceApplication")
+ec.set (ping, "sources", "http://www.skbuff.net/iputils/iputils-s20101006.tar.bz2")
+ec.set (ping, "build", "tar xvjf ${SRC}/iputils-s20101006.tar.bz2 && "
+        "cd iputils-s20101006/ && "
+        "sed -i 's/CFLAGS=/CFLAGS+=/g' Makefile && "
+        "make CFLAGS=-fPIC LDFLAGS='-pie -rdynamic' ping && "
+        "cp ping ${BIN_DCE} && cd - ")
+ec.set (ping, "binary", "ping")
+ec.set (ping, "stackSize", 1<<20)
+ec.set (ping, "arguments", "-c 10;-s 1000;10.0.0.2")
+ec.set (ping, "StartTime", "1s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(ping, "stdout") 
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
+
similarity index 89%
rename from examples/linux/dce/dce_ccn_application.py
rename to examples/dce/wrapped_local_p2p_ccncat.py
index b4c1544..94f69c2 100644 (file)
@@ -43,7 +43,7 @@ def add_ns3_node(ec, simu):
 
     return node
 
-def add_point2point_device(ec, node, ip,  prefix):
+def add_device(ec, node, ip,  prefix):
     dev = ec.register_resource("ns3::PointToPointNetDevice")
     ec.set(dev, "ip", ip)
     ec.set(dev, "prefix", prefix)
@@ -54,30 +54,29 @@ def add_point2point_device(ec, node, ip,  prefix):
 
     return dev
 
-ec = ExperimentController(exp_id = "dce-ccn-app")
+ec = ExperimentController(exp_id = "dce-ccncat")
 
 node = ec.register_resource("LinuxNode")
 ec.set(node, "hostname", "localhost")
 ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
 
 simu = ec.register_resource("LinuxNS3Simulation")
 ec.register_connection(simu, node)
 
 nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
 
 nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
 
 # Create channel
 chan = ec.register_resource("ns3::PointToPointChannel")
 ec.set(chan, "Delay", "2ms")
 
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
 
 ### create applications
 # Add ccnd to ns-3 node1
@@ -92,8 +91,8 @@ ec.register_connection(ccnd1, nsnode1)
 # Add CCN repository with content to ns-3 node1
 repofile = os.path.join(
     os.path.dirname(os.path.realpath(__file__)), 
-    "..", "..", "..",
-    "test", "resources", "linux", "ns3", "ccn", "repoFile1")
+    "..", "..", "test", "resources", "linux", 
+    "ns3", "ccn", "repoFile1")
 
 ccnr = ec.register_resource("ns3::LinuxDceCCNR")
 ec.set (ccnr, "repoFile1", repofile) 
similarity index 96%
rename from examples/linux/dce/dce_ccnpeek_application.py
rename to examples/dce/wrapped_local_p2p_ccnpeek.py
index 014eb6e..f1ca9e6 100644 (file)
@@ -41,12 +41,11 @@ def add_ns3_node(ec, simu):
 
     return node
 
-ec = ExperimentController(exp_id = "dce-ccnpeek-app")
+ec = ExperimentController(exp_id = "dce-local-ccnpeek")
 
 node = ec.register_resource("LinuxNode")
 ec.set(node, "hostname", "localhost")
 ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
 
 simu = ec.register_resource("LinuxNS3Simulation")
 ec.register_connection(simu, node)
@@ -82,3 +81,4 @@ stdout = ec.trace(ccnpeek, "stdout")
 ec.shutdown()
 
 print "PEEK received", stdout
+
similarity index 88%
rename from examples/linux/dce/dce_ping_application.py
rename to examples/dce/wrapped_local_p2p_ping.py
index c4d137a..e94c349 100644 (file)
@@ -41,7 +41,7 @@ def add_ns3_node(ec, simu):
 
     return node
 
-def add_point2point_device(ec, node, ip,  prefix):
+def add_device(ec, node, ip,  prefix):
     dev = ec.register_resource("ns3::PointToPointNetDevice")
     ec.set(dev, "ip", ip)
     ec.set(dev, "prefix", prefix)
@@ -57,25 +57,24 @@ ec = ExperimentController(exp_id = "dce-ping-app")
 node = ec.register_resource("LinuxNode")
 ec.set(node, "hostname", "localhost")
 ec.set(node, "cleanProcesses", True)
-#ec.set(node, "cleanHome", True)
 
 simu = ec.register_resource("LinuxNS3Simulation")
 ec.register_connection(simu, node)
 
 nsnode1 = add_ns3_node(ec, simu)
-p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
-ec.set(p2p1, "DataRate", "5Mbps")
+dev1 = add_device(ec, nsnode1, "10.0.0.1", "30")
+ec.set(dev1, "DataRate", "5Mbps")
 
 nsnode2 = add_ns3_node(ec, simu)
-p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
-ec.set(p2p2, "DataRate", "5Mbps")
+dev2 = add_device(ec, nsnode2, "10.0.0.2", "30")
+ec.set(dev2, "DataRate", "5Mbps")
 
 # Create channel
 chan = ec.register_resource("ns3::PointToPointChannel")
 ec.set(chan, "Delay", "2ms")
 
-ec.register_connection(chan, p2p1)
-ec.register_connection(chan, p2p2)
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
 
 ### create applications
 ping = ec.register_resource("ns3::LinuxDcePing")
@@ -96,3 +95,4 @@ stdout = ec.trace(ping, "stdout")
 ec.shutdown()
 
 print "PING OUTPUT", stdout
+
similarity index 63%
rename from examples/linux/ccn/two_nodes_file_retrieval.py
rename to examples/linux/ccn_simple_transfer.py
index 8e40c3b..7e06207 100644 (file)
 #
 # Author: Alina Quereilhac <alina.quereilhac@inria.fr>
 #
-# Instructions to run this example:
-#
-# 1. First edit the script file where required (See ASSING messages)
-#
-# 2. Then, run the script:
+
+# Example of how to run this experiment (replace with your information):
 #
 # $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATHS:src python examples/linux/ccn/two_nodes_file_retrieval.py
+# python examples/linux/ccn_simple_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+
+# CCN topology:
+#
+#                
+#                 
+#  content                  ccncat
+#  Linux host               Linux host
+#     0 ------- network -------- 1
 #
 
 from nepi.execution.ec import ExperimentController
 
+from optparse import OptionParser, SUPPRESS_HELP
 import os
 
-ssh_key = ####### <<< ASSING the absolute path to the private SSH key to login into the remote host >>>
-ssh_user = ####### <<< ASSING the SSH username >>>
+usage = ("usage: %prog -a <hostanme1> -b <hostname2> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname1", dest="hostname1", 
+        help="Remote host 1", type="str")
+parser.add_option("-b", "--hostname2", dest="hostname2", 
+        help="Remote host 2", type="str")
+parser.add_option("-u", "--username", dest="username", 
+        help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key", 
+        help="Path to private SSH key to be used for connection", 
+        type="str")
+(options, args) = parser.parse_args()
+
+hostname1 = options.hostname1
+hostname2 = options.hostname2
+username = options.username
+ssh_key = options.ssh_key
 
 ## Create the experiment controller
-ec = ExperimentController(exp_id = "demo_CCN")
+ec = ExperimentController(exp_id = "ccn_simple_transfer")
+
+##### CONFIGURING NODE 1
 
 ## Register node 1
 node1 = ec.register_resource("LinuxNode")
 # Set the hostname of the first node to use for the experiment
-hostname1 = "peeramidion.irisa.fr" ##### <<< ASSIGN the hostname of a host you have SSSH access to >>>
 ec.set(node1, "hostname", hostname1)
 # username should be your SSH user 
-ec.set(node1, "username", ssh_user)
+ec.set(node1, "username", username)
 # Absolute path to the SSH private key
 ec.set(node1, "identity", ssh_key)
 # Clean all files, results, etc, from previous experiments wit the same exp_id
@@ -51,32 +74,12 @@ ec.set(node1, "cleanExperiment", True)
 # Kill all running processes in the node before running the experiment
 ec.set(node1, "cleanProcesses", True)
 
-## Register node 2 
-node2 = ec.register_resource("LinuxNode")
-# Set the hostname of the first node to use for the experiment
-hostname2 = "planetlab2.upc.es" ##### <<< ASSIGN the hostname of a host you have SSSH access to >>>
-ec.set(node2, "hostname", hostname2)
-# username should be your SSH user 
-ec.set(node2, "username", ssh_user)
-# Absolute path to the SSH private key
-ec.set(node2, "identity", ssh_key)
-# Clean all files, results, etc, from previous experiments wit the same exp_id
-ec.set(node2, "cleanExperiment", True)
-# Kill all running processes in the node before running the experiment
-ec.set(node2, "cleanProcesses", True)
-
 ## Register a CCN daemon in node 1
 ccnd1 = ec.register_resource("LinuxCCND")
 # Set ccnd log level to 7
 ec.set(ccnd1, "debug", 7)
 ec.register_connection(ccnd1, node1)
 
-## Register a CCN daemon in node 2
-ccnd2 = ec.register_resource("LinuxCCND")
-# Set ccnd log level to 7
-ec.set(ccnd2, "debug", 7)
-ec.register_connection(ccnd2, node2)
-
 ## Register a repository in node 1
 ccnr1 = ec.register_resource("LinuxCCNR")
 ec.register_connection(ccnr1, ccnd1)
@@ -84,17 +87,46 @@ ec.register_connection(ccnr1, ccnd1)
 ## Push the file into the repository
 local_path_to_content = os.path.join(
         os.path.dirname(os.path.realpath(__file__)),
-            "..", "..",
-            "big_buck_bunny_240p_mpeg4_lq.ts")
+            "..", "big_buck_bunny_240p_mpeg4_lq.ts")
 
-# Register a FIB entry from node 1 to node 2
+content_name = "ccnx:/test/FILE"
+
+# Add a content to the repository
 co = ec.register_resource("LinuxCCNContent")
-ec.set(co, "contentName", "ccnx:/test/FILE1")
+ec.set(co, "contentName", content_name)
 # NEPI will upload the specified file to the remote node and write it
 # into the CCN repository
 ec.set(co, "content", local_path_to_content)
 ec.register_connection(co, ccnr1)
 
+##### CONFIGURING NODE 2
+
+## Register node 2 
+node2 = ec.register_resource("LinuxNode")
+# Set the hostname of the first node to use for the experiment
+ec.set(node2, "hostname", hostname2)
+# username should be your SSH user 
+ec.set(node2, "username", username)
+# Absolute path to the SSH private key
+ec.set(node2, "identity", ssh_key)
+# Clean all files, results, etc, from previous experiments wit the same exp_id
+ec.set(node2, "cleanExperiment", True)
+# Kill all running processes in the node before running the experiment
+ec.set(node2, "cleanProcesses", True)
+
+## Register a CCN daemon in node 2
+ccnd2 = ec.register_resource("LinuxCCND")
+# Set ccnd log level to 7
+ec.set(ccnd2, "debug", 7)
+ec.register_connection(ccnd2, node2)
+
+## Retrieve the file stored in node 1 from node 2
+ccncat = ec.register_resource("LinuxCCNCat")
+ec.set(ccncat, "contentName", content_name)
+ec.register_connection(ccncat, ccnd2)
+
+##### INTERCONNECTING CCN NODES ...
+
 # Register a FIB entry from node 1 to node 2
 entry1 = ec.register_resource("LinuxFIBEntry")
 ec.set(entry1, "host", hostname2)
@@ -105,36 +137,19 @@ entry2 = ec.register_resource("LinuxFIBEntry")
 ec.set(entry2, "host", hostname1)
 ec.register_connection(entry2, ccnd2)
 
-## Retrieve the file stored in node 1 from node 2
-command = "ccncat ccnx:/test/FILE1"
-app = ec.register_resource("LinuxCCNApplication")
-ec.set(app, "command", command)
-ec.register_connection(app, ccnd2)
-
-# Register a collector to automatically collect the ccnd logs
-# to a local directory
-results_dir = "/tmp/demo_CCN_results"
-col1 = ec.register_resource("Collector")
-ec.set(col1, "traceName", "stderr")
-ec.set(col1, "storeDir", results_dir)
-ec.set(col1, "subDir", hostname1)
-ec.register_connection(col1, ccnd1)
-
-col2 = ec.register_resource("Collector")
-ec.set(col2, "traceName", "stderr")
-ec.set(col2, "storeDir", results_dir)
-ec.set(col2, "subDir", hostname2)
-ec.register_connection(col2, ccnd2)
+##### STARTING THE EXPERIMENT
 
 ## Deploy all resources
 ec.deploy()
 
 # Wait until the ccncat is finished
-ec.wait_finished([app])
+ec.wait_finished([ccncat])
 
-## CCND logs will be collected to the results_dir upon shutdown.
-## We can aldo get the content of the logs now:
-#print "LOG2", ec.trace(ccnd1, "stderr")
-#print "LOG 1", ec.trace(ccnd2, "stderr")
+stdout = ec.trace(ccncat, "stdout")
+f = open("video.ts", "w")
+f.write(stdout)
+f.close()
 
 ec.shutdown()
+
+print "Transfered FILE stored localy at video.ts"
diff --git a/examples/linux/ccn_transfer_using_linuxapp.py b/examples/linux/ccn_transfer_using_linuxapp.py
new file mode 100644 (file)
index 0000000..de85570
--- /dev/null
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+# NOTE: This experiment example uses the generic LinuxApplication
+#       ResourceManager to do the CCN set up in the hosts.
+#       Alternatively, CCN specific ResourceManagers can be used
+#       (i.e. LinuxCCND, LinuxCCNR, etc...), and those require less 
+#       manual configuration.
+#
+#
+
+# CCN topology:
+#
+#                
+#                 
+#  content                ccncat
+#  Linux host               Linux host
+#  0 ------- Internet ------ 0
+#           
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/linux/ccn_advanced_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname1", dest="hostname1", 
+        help="Remote host 1", type="str")
+parser.add_option("-b", "--hostname2", dest="hostname2", 
+        help="Remote host 2", type="str")
+parser.add_option("-u", "--username", dest="username", 
+        help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key", 
+        help="Path to private SSH key to be used for connection", 
+        type="str")
+(options, args) = parser.parse_args()
+
+hostname1 = options.hostname1
+hostname2 = options.hostname2
+username = options.username
+ssh_key = options.ssh_key
+
+def add_node(ec, host, user, ssh_key = None):
+    node = ec.register_resource("LinuxNode")
+    ec.set(node, "hostname", host)
+    ec.set(node, "username", user)
+    ec.set(node, "identity", ssh_key)
+    ec.set(node, "cleanHome", True)
+    ec.set(node, "cleanProcesses", True)
+    return node
+
+def add_ccnd(ec, peers):
+    # Dependencies for Fedora
+    depends = ( " autoconf openssl-devel  expat-devel libpcap-devel "
+                " ecryptfs-utils-devel libxml2-devel automake gawk " 
+                " gcc gcc-c++ git pcre-devel make ")
+
+    # UBUNTU / DEBIAN
+    # depends = ( " autoconf libssl-dev libexpat-dev libpcap-dev "
+    #            " libecryptfs0 libxml2-utils automake gawk gcc g++ "
+    #            " git-core pkg-config libpcre3-dev make ")
+
+    sources = "http://www.ccnx.org/releases/ccnx-0.8.2.tar.gz"
+
+    build = (
+        # Evaluate if ccnx binaries are already installed
+        " ( "
+            "  test -f ${BIN}/ccnx-0.8.2/bin/ccnd"
+        " ) || ( "
+        # If not, untar and build
+            " ( "
+            " mkdir -p ${SRC}/ccnx-0.8.2 && "
+                " tar xf ${SRC}/ccnx-0.8.2.tar.gz --strip-components=1 -C ${SRC}/ccnx-0.8.2 "
+             " ) && "
+                "cd ${SRC}/ccnx-0.8.2 && "
+                # Just execute and silence warnings...
+                "( ./configure && make ) "
+         " )") 
+
+    install = (
+        # Evaluate if ccnx binaries are already installed
+        " ( "
+            "  test -f ${BIN}/ccnx-0.8.2/bin/ccnd"
+        " ) || ( "
+            "  mkdir -p ${BIN}/ccnx-0.8.2/bin && "
+            "  cp -r ${SRC}/ccnx-0.8.2/bin ${BIN}/ccnx-0.8.2"
+        " )"
+    )
+
+    env = "PATH=$PATH:${BIN}/ccnx-0.8.2/bin"
+
+    # BASH command -> ' ccndstart ; ccndc add ccnx:/ udp  host ;  ccnr '
+    command = "ccndstart && "
+    peers = map(lambda peer: "ccndc add ccnx:/ udp  %s" % peer, peers)
+    command += " ; ".join(peers) + " && "
+    command += " ccnr & "
+
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "depends", depends)
+    ec.set(app, "sources", sources)
+    ec.set(app, "install", install)
+    ec.set(app, "build", build)
+    ec.set(app, "env", env)
+    ec.set(app, "command", command)
+
+    return app
+
+def add_publish(ec, movie, content_name):
+    env = "PATH=$PATH:${BIN}/ccnx-0.8.2/bin"
+    command = "ccnseqwriter -r %s" % content_name
+
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "stdin", movie)
+    ec.set(app, "env", env)
+    ec.set(app, "command", command)
+
+    return app
+
+def add_ccncat(ec, content_name):
+    env = "PATH=$PATH:${BIN}/ccnx-0.8.2/bin"
+    command = "ccncat %s" % content_name
+
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "env", env)
+    ec.set(app, "command", command)
+
+    return app
+
+## Create the experiment controller
+ec = ExperimentController(exp_id = "ccn_advanced_transfer")
+
+# Register first PlanetLab host
+node1 = add_node(ec, hostname1, username, ssh_key)
+
+# Register CCN setup for host
+peers = [hostname2]
+ccnd1 = add_ccnd(ec, peers)
+ec.register_connection(ccnd1, node1)
+
+# Register content producer application (ccnseqwriter)
+## Push the file into the repository
+local_path_to_content = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)),
+            "..", "big_buck_bunny_240p_mpeg4_lq.ts")
+
+content_name = "ccnx:/test/FILE"
+
+pub = add_publish(ec, local_path_to_content, content_name)
+ec.register_connection(pub, node1)
+
+# The movie can only be published after ccnd is running
+ec.register_condition(pub, ResourceAction.START, 
+        ccnd1, ResourceState.STARTED)
+
+# Register Linux host
+node2 = add_node(ec, hostname2, username, ssh_key)
+
+# Register CCN setup for Linux host
+peers = [hostname1]
+ccnd2 = add_ccnd(ec, peers)
+ec.register_connection(ccnd2, node2)
+# Register consumer application (ccncat)
+ccncat = add_ccncat(ec, content_name)
+ec.register_connection(ccncat, node2)
+
+# The file can only be retrieved after ccnd is running
+ec.register_condition(ccncat, ResourceAction.START, 
+        ccnd2, ResourceState.STARTED)
+
+# And also, the file can only be retrieved after it was published
+ec.register_condition(ccncat, ResourceAction.START, 
+        pub, ResourceState.STARTED)
+
+# Deploy all ResourceManagers
+ec.deploy()
+
+# Wait until the applications are finished
+apps = [ccncat]
+ec.wait_finished(apps)
+
+stdout = ec.trace(ccncat, "stdout")
+f = open("video.ts", "w")
+f.write(stdout)
+f.close()
+
+# Shutdown the experiment controller
+ec.shutdown()
+
+print "Transfered FILE stored localy at video.ts"
+
similarity index 94%
rename from examples/linux/file_transfer.py
rename to examples/linux/netcat_file_transfer.py
index 7d07ed6..e2047d1 100644 (file)
 #         Alina Quereilhac <alina.quereilhac@inria.fr>
 #
 #
-# Example of how to run this experiment (replace with your credentials):
+# Example of how to run this experiment (replace with your information):
 #
 # $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATH:~/repos/nepi/src python examples/linux/file_transfer.py -u inria_nepi -i ~/.ssh/id_rsa_planetlab -a planetlab1.u-strasbg.fr -b planetlab1.utt.fr
-
+# python examples/linux/netcat_file_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
 
 from nepi.execution.ec import ExperimentController
 from nepi.execution.resource import ResourceAction, ResourceState
@@ -52,7 +51,7 @@ username = options.username
 ssh_key = options.ssh_key
 
 ## Create the experiment controller
-ec = ExperimentController(exp_id = "file_transfer")
+ec = ExperimentController(exp_id = "nc_file_transfer")
 
 ## Register node 1
 node1 = ec.register_resource("LinuxNode")
@@ -140,3 +139,5 @@ f.close()
 
 ec.shutdown()
 
+print "Total bytes transfered saved to bw.txt..."
+
index 6b5a249..3e9f03a 100644 (file)
 #
 # Author: Alina Quereilhac <alina.quereilhac@inria.fr>
 
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/linux/ping.py -a <hostname> -u <username> -i <ssh-key>
+
+
 from nepi.execution.ec import ExperimentController 
 
+from optparse import OptionParser, SUPPRESS_HELP
+import os
+
+usage = ("usage: %prog -a <hostanme> -u <username> -i <ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-a", "--hostname", dest="hostname", 
+        help="Remote host", type="str")
+parser.add_option("-u", "--username", dest="username", 
+        help="Username to SSH to remote host", type="str")
+parser.add_option("-i", "--ssh-key", dest="ssh_key", 
+        help="Path to private SSH key to be used for connection", 
+        type="str")
+(options, args) = parser.parse_args()
+
+hostname = options.hostname
+username = options.username
+ssh_key = options.ssh_key
+
 ec = ExperimentController(exp_id = "ping-exp")
         
-hostname = ## Add a string with the target hostname
-username = ## Add a string with the username to SSH hostname
-
 node = ec.register_resource("LinuxNode")
 ec.set(node, "hostname", hostname)
 ec.set(node, "username", username)
+ec.set(node, "identity", ssh_key)
 ec.set(node, "cleanHome", True)
 ec.set(node, "cleanProcesses", True)
 
 app = ec.register_resource("LinuxApplication")
-ec.set(app, "command", "ping -c3 www.google.com")
+ec.set(app, "command", "ping -c3 nepi.inria.fr")
 ec.register_connection(app, node)
 
 ec.deploy()
@@ -42,3 +65,4 @@ ec.wait_finished(app)
 print ec.trace(app, "stdout")
 
 ec.shutdown()
+
similarity index 99%
rename from examples/linux/ccn/ccncat_2_nodes.py
rename to examples/linux/testing/ccncat_2_nodes.py
index 34d3350..7efe118 100644 (file)
@@ -33,7 +33,7 @@
 #                
 #                 
 #  content                ccncat
-#  PL host               Linux host
+#  Linux host               Linux host
 #  0 ------- Internet ------ 0
 #           
 
@@ -92,10 +92,9 @@ def add_stream(ec, ccnd, content_name):
 
     return app
 
-def add_collector(ec, trace_name, store_dir):
+def add_collector(ec, trace_name):
     collector = ec.register_resource("Collector")
     ec.set(collector, "traceName", trace_name)
-    ec.set(collector, "storeDir", store_dir)
 
     return collector
 
@@ -136,7 +135,7 @@ if __name__ == '__main__':
     
     ( pl_user, movie, exp_id, pl_ssh_key, results_dir ) = get_options()
 
-    ec = ExperimentController(exp_id = exp_id)
+    ec = ExperimentController(exp_id = exp_id, local_dir = results_dir)
 
     # hosts in the US
     #host1 = "planetlab4.wail.wisc.edu"
@@ -214,7 +213,7 @@ if __name__ == '__main__':
             app, ResourceState.STARTED, time = "10s")
 
     # Register a collector to automatically collect traces
-    collector = add_collector(ec, "stderr", results_dir)
+    collector = add_collector(ec, "stderr")
     for ccnd in ccnds.values():
         ec.register_connection(collector, ccnd)
 
index 6d01a9b..49e2e7e 100644 (file)
 #
 # Author: Alina Quereilhac <alina.quereilhac@inria.fr>
 #
-# Example of how to run this experiment (replace with your credentials):
+# Example of how to run this experiment (replace with your information):
 #
 # $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATH:~/repos/nepi/src python examples/linux/vlc_streaming.py -u inria_nepi -i ~/.ssh/id_rsa_planetlab -a planetlab1.u-strasbg.fr -b planetlab1.utt.fr | vlc -
-
+# python examples/linux/vlc_streaming.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
 
 from nepi.execution.ec import ExperimentController
 from nepi.execution.resource import ResourceState, ResourceAction 
@@ -115,8 +114,12 @@ ec.deploy()
 # Wait until the ccncat is finished
 ec.wait_finished([server])
 
-print ec.trace(client, "VIDEO")
+video = ec.trace(client, "VIDEO")
+f = open("video.ts", "w")
+f.write(video)
+f.close()
 
 ec.shutdown()
 
+print "Streamed VIDEO stored localy at video.ts"
 
diff --git a/examples/ns3/local_csma_ping.py b/examples/ns3/local_csma_ping.py
new file mode 100644 (file)
index 0000000..b2cca6b
--- /dev/null
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+ec = ExperimentController(exp_id = "ns3-local-csma-ping")
+
+# Simulation will executed in the local machine
+node = ec.register_resource("LinuxNode")
+ec.set(node, "hostname", "localhost")
+
+# Add a simulation resource
+simu = ec.register_resource("LinuxNS3Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+## Add a ns-3 node with its protocol stack
+nsnode1 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode1, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode1, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode1, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode1, icmp)
+
+# Add a csma net device to the node
+dev1 = ec.register_resource("ns3::CsmaNetDevice")
+ec.set(dev1, "ip", "10.0.0.1")
+ec.set(dev1, "prefix", "30")
+ec.register_connection(nsnode1, dev1)
+queue1 = ec.register_resource("ns3::DropTailQueue")
+ec.register_connection(dev1, queue1)
+
+## Add another ns-3 node with its protocol stack
+nsnode2 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode2, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode2, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode2, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode2, icmp)
+
+# Add a csma net device to the node
+dev2 = ec.register_resource("ns3::CsmaNetDevice")
+ec.set(dev2, "ip", "10.0.0.2")
+ec.set(dev2, "prefix", "30")
+ec.register_connection(nsnode2, dev2)
+queue2 = ec.register_resource("ns3::DropTailQueue")
+ec.register_connection(dev2, queue2)
+
+# Add a csma channel
+chan = ec.register_resource("ns3::CsmaChannel")
+ec.set(chan, "Delay", "0s")
+ec.register_connection(chan, dev1)
+ec.register_connection(chan, dev2)
+
+### create pinger
+ping = ec.register_resource("ns3::V4Ping")
+ec.set (ping, "Remote", "10.0.0.2")
+ec.set (ping, "Interval", "1s")
+ec.set (ping, "Verbose", True)
+ec.set (ping, "StartTime", "0s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(simu, "stdout") 
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
similarity index 99%
rename from examples/linux/ns3/wifi_ping.py
rename to examples/ns3/local_mobile_wifi_ping.py
index 47bc335..f0be548 100644 (file)
@@ -126,7 +126,7 @@ ec.set(node, "hostname", "localhost")
 simu = ec.register_resource("LinuxNS3Simulation")
 ec.set(simu, "verbose", True)
 ec.set(simu, "enableDump", True)
-ec.set (simu, "stopTime", "22s")
+ec.set (simu, "StopTime", "22s")
 ec.register_connection(simu, node)
 
 x = 30
similarity index 98%
rename from examples/linux/ns3/local_ping.py
rename to examples/ns3/local_p2p_ping.py
index 1a31958..8b9f379 100644 (file)
@@ -20,7 +20,7 @@
 
 from nepi.execution.ec import ExperimentController 
 
-ec = ExperimentController(exp_id = "ns3-local-ping")
+ec = ExperimentController(exp_id = "ns3-local-p2p-ping")
 
 # Simulation will executed in the local machine
 node = ec.register_resource("LinuxNode")
diff --git a/examples/ns3/local_wifi_ping.py b/examples/ns3/local_wifi_ping.py
new file mode 100644 (file)
index 0000000..9d854d7
--- /dev/null
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+ec = ExperimentController(exp_id = "ns3-local-wifi-ping")
+
+# Simulation will executed in the local machine
+node = ec.register_resource("LinuxNode")
+ec.set(node, "hostname", "localhost")
+
+# Add a simulation resource
+simu = ec.register_resource("LinuxNS3Simulation")
+ec.set(simu, "verbose", True)
+ec.register_connection(simu, node)
+
+## Add a ns-3 node with its protocol stack
+nsnode1 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode1, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode1, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode1, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode1, icmp)
+
+# Adding constant mobility to the ns-3 node
+mobility1 = ec.register_resource("ns3::ConstantPositionMobilityModel") 
+position1 = "%d:%d:%d" % (0, 0, 0)
+ec.set(mobility1, "Position", position1)
+ec.register_connection(nsnode1, mobility1)
+
+# Add a wifi access point net device to the node
+dev1 = ec.register_resource("ns3::WifiNetDevice")
+ec.set(dev1, "ip", "10.0.0.1")
+ec.set(dev1, "prefix", "30")
+ec.register_connection(nsnode1, dev1)
+
+phy1 = ec.register_resource("ns3::YansWifiPhy")
+ec.set(phy1, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev1, phy1)
+
+error1 = ec.register_resource("ns3::NistErrorRateModel")
+ec.register_connection(phy1, error1)
+
+manager1 = ec.register_resource("ns3::ArfWifiManager")
+ec.register_connection(dev1, manager1)
+
+mac1 = ec.register_resource("ns3::ApWifiMac")
+ec.set(mac1, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev1, mac1)
+
+## Add another ns-3 node with its protocol stack
+nsnode2 = ec.register_resource("ns3::Node")
+ec.register_connection(nsnode2, simu)
+
+ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+ec.register_connection(nsnode2, ipv4)
+arp = ec.register_resource("ns3::ArpL3Protocol")
+ec.register_connection(nsnode2, arp)
+icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+ec.register_connection(nsnode2, icmp)
+
+# Adding constant mobility to the ns-3 node
+mobility2 = ec.register_resource("ns3::ConstantPositionMobilityModel") 
+position2 = "%d:%d:%d" % (50, 50, 0)
+ec.set(mobility2, "Position", position1)
+ec.register_connection(nsnode2, mobility2)
+
+# Add a wifi station net device to the node
+dev2 = ec.register_resource("ns3::WifiNetDevice")
+ec.set(dev2, "ip", "10.0.0.2")
+ec.set(dev2, "prefix", "30")
+ec.register_connection(nsnode2, dev2)
+
+phy2 = ec.register_resource("ns3::YansWifiPhy")
+ec.set(phy2, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev2, phy2)
+
+error2 = ec.register_resource("ns3::NistErrorRateModel")
+ec.register_connection(phy2, error2)
+
+manager2 = ec.register_resource("ns3::ArfWifiManager")
+ec.register_connection(dev2, manager2)
+
+mac2 = ec.register_resource("ns3::StaWifiMac")
+ec.set(mac2, "Standard", "WIFI_PHY_STANDARD_80211a")
+ec.register_connection(dev2, mac2)
+
+# Add a wifi channel
+chan = ec.register_resource("ns3::YansWifiChannel")
+delay = ec.register_resource("ns3::ConstantSpeedPropagationDelayModel")
+ec.register_connection(chan, delay)
+loss = ec.register_resource("ns3::LogDistancePropagationLossModel")
+ec.register_connection(chan, loss)
+ec.register_connection(chan, phy1)
+ec.register_connection(chan, phy2)
+
+### create pinger
+ping = ec.register_resource("ns3::V4Ping")
+ec.set (ping, "Remote", "10.0.0.2")
+ec.set (ping, "Interval", "1s")
+ec.set (ping, "Verbose", True)
+ec.set (ping, "StartTime", "0s")
+ec.set (ping, "StopTime", "20s")
+ec.register_connection(ping, nsnode1)
+
+ec.deploy()
+
+ec.wait_finished([ping])
+
+stdout = ec.trace(simu, "stdout") 
+
+ec.shutdown()
+
+print "PING OUTPUT", stdout
similarity index 97%
rename from examples/linux/ns3/remote_ping.py
rename to examples/ns3/remote_p2p_ping.py
index e5a870f..6f04d78 100644 (file)
@@ -20,7 +20,7 @@
 
 from nepi.execution.ec import ExperimentController 
 
-from optparse import OptionParser, SUPPRESS_HELP
+from optparse import OptionParser
 
 usage = ("usage: %prog -H <hostanme> -u <username> -i <ssh-key>")
 
@@ -38,7 +38,7 @@ hostname = options.hostname
 username = options.username
 identity = options.ssh_key
 
-ec = ExperimentController(exp_id = "ns3-remote-ping")
+ec = ExperimentController(exp_id = "ns3-remote-p2p-ping")
 
 # Simulation will run in a remote machine
 node = ec.register_resource("LinuxNode")
index 8fd81fe..b39c335 100644 (file)
@@ -1,45 +1,45 @@
-"""
-    NEPI, a framework to manage network experiments
-    Copyright (C) 2013 INRIA
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-    Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-            Julien Tribino <julien.tribino@inria.fr>
-
-    Example :
-      - Testbed : Nitos
-      - Explanation :
-
-       VLC Streaming on VLC
-                   
-     Node
-     omf.nitos.node0xx 
-     0
-     |
-     |
-     0
-     xEyes
-   
-      - Experiment:
-        - t0 : Deployment
-        - t1 : xEeyes Start
-        - t2 (t1 + 10s) : xEyes stop
-        - t3 (t2 + 2s) : Kill the application
-"""
-
 #!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
+#         Julien Tribino <julien.tribino@inria.fr>
+      
+# Topology
+#
+#
+#  Testbed : Nitos
+#
+#     Node
+#     omf.nitos.node0xx 
+#     0
+#     |
+#     |
+#     0
+#     xEyes
+#   
+#      - Experiment:
+#        - t0 : Deployment
+#        - t1 : xEeyes Start
+#        - t2 (t1 + 10s) : xEyes stop
+#        - t3 (t2 + 2s) : Kill the application
+#
+#
+
 from nepi.execution.resource import ResourceFactory, ResourceAction, ResourceState
 from nepi.execution.ec import ExperimentController
 
@@ -49,8 +49,8 @@ ec = ExperimentController()
 # Create and Configure the Nodes
 node1 = ec.register_resource("OMFNode")
 ec.set(node1, 'hostname', 'omf.nitos.node0XX')
-ec.set(node1, 'xmppServer', "ZZZ")
-ec.set(node1, 'xmppUser', "nitlab.inf.uth.gr")
+ec.set(node1, 'xmppServer', "nitlab.inf.uth.gr")
+ec.set(node1, 'xmppUser', "<YOUR-SLICE>")
 ec.set(node1, 'xmppPort', "5222")
 ec.set(node1, 'xmppPassword', "1234")
 ec.set(node1, 'version', "5")
diff --git a/examples/omf/vod_exp/big_buck_bunny_240p_mpeg4_lq.ts b/examples/omf/vod_exp/big_buck_bunny_240p_mpeg4_lq.ts
new file mode 100755 (executable)
index 0000000..d947b4b
Binary files /dev/null and b/examples/omf/vod_exp/big_buck_bunny_240p_mpeg4_lq.ts differ
diff --git a/examples/omf/vod_exp/conf_Broadcast.vlm b/examples/omf/vod_exp/conf_Broadcast.vlm
new file mode 100644 (file)
index 0000000..2a5ed8a
--- /dev/null
@@ -0,0 +1,5 @@
+new BUNNY broadcast enabled loop
+setup BUNNY input /home/inria_lguevgeo/.nepi/nepi-usr/src/big_buck_bunny_240p_mpeg4_lq.ts
+setup BUNNY output #rtp{access=udp,mux=ts,sdp=rtsp://0.0.0.0:8554/BUNNY}
+new test_sched schedule enabled
+setup test_sched append control BUNNY play
diff --git a/examples/omf/vod_exp/conf_VoD.vlm b/examples/omf/vod_exp/conf_VoD.vlm
new file mode 100644 (file)
index 0000000..417795c
--- /dev/null
@@ -0,0 +1,3 @@
+new BUNNY vod enabled
+setup BUNNY input /home/inria_lguevgeo/.nepi/nepi-usr/src/big_buck_bunny_240p_mpeg4_lq.ts 
+
diff --git a/examples/omf/vod_exp/vod_experiment.py b/examples/omf/vod_exp/vod_experiment.py
new file mode 100755 (executable)
index 0000000..b3459e8
--- /dev/null
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+import os
+import time
+import argparse
+
+# Set experiment for broadcast or vod mode
+
+parser = argparse.ArgumentParser(description='NEPI VoD/Broadcast experiment')
+parser.add_argument('-m', '--mode', help='Set vlc mode, possible values <vod> or <broadcast>', required=True)
+args = parser.parse_args()
+
+mode = args.mode
+
+# Create the entity Experiment Controller
+
+exp_id = "vod_exp"
+ec = ExperimentController(exp_id)
+
+# Define SFA credentials
+
+slicename = 'ple.inria.lguevgeo'
+sfauser = 'ple.inria.lucia_guevgeozian_odizzio'
+sfaPrivateKey = '/user/lguevgeo/home/.sfi/lucia_guevgeozian_odizzio.pkey'
+
+# Functions for nodes and ifaces registration
+
+def create_planetlab_node(ec, host):
+    node = ec.register_resource("PlanetlabSfaNode")
+    ec.set(node, "hostname", host)
+    ec.set(node, "username", "inria_lguevgeo")
+    ec.set(node, "sfauser", sfauser)
+    ec.set(node, "sfaPrivateKey", sfaPrivateKey)
+    ec.set(node, 'cleanExperiment', True)
+    return node
+
+def create_omf_node(ec, host):
+    node = ec.register_resource("WilabtSfaNode")
+    ec.set(node, "host", host)
+    ec.set(node, "slicename", slicename)
+    ec.set(node, "sfauser", sfauser)
+    ec.set(node, "sfaPrivateKey", sfaPrivateKey)
+    ec.set(node, "gatewayUser", "nepi")
+    ec.set(node, "gateway", "bastion.test.iminds.be")
+    ec.set(node, "disk_image", 'NepiVlcOMF6Baseline')
+    ec.set(node, 'xmppServer', "xmpp.ilabt.iminds.be")
+    ec.set(node, 'xmppUser', "nepi")
+    ec.set(node, 'xmppPort', "5222")
+    ec.set(node, 'xmppPassword', "1234")
+    return node
+
+def create_omf_iface(ec, ip, node):
+    iface = ec.register_resource("OMFWifiInterface")
+    ec.set(iface, 'name', 'wlan0')
+    ec.set(iface, 'mode', "adhoc")
+    ec.set(iface, 'hw_mode', "g")
+    ec.set(iface, 'essid', "vlc")
+    ec.set(iface, 'ip', ip)
+    ec.register_connection(iface, node)
+    return iface
+
+# Register Internet VLC server
+
+video_server = create_planetlab_node(ec, 'planetlab3.xeno.cl.cam.ac.uk')
+
+# Register wifi media center and client nodes
+
+wifi_center = create_omf_node(ec, 'zotacB1')
+client1 = create_omf_node(ec, 'zotacB3')
+client2 = create_omf_node(ec, 'zotacB5')
+client3 = create_omf_node(ec, 'zotacC1')
+client4 = create_omf_node(ec, 'zotacC3')
+client5 = create_omf_node(ec, 'zotacC4')
+
+omf_nodes = [wifi_center, client1, client2, client3, client4, client5]
+
+# Register ifaces in wireless nodes
+
+iface_center = create_omf_iface(ec, "192.168.0.1/24", wifi_center)
+iface_client1 = create_omf_iface(ec, "192.168.0.2/24", client1)
+iface_client2 = create_omf_iface(ec, "192.168.0.3/24", client2)
+iface_client3 = create_omf_iface(ec, "192.168.0.4/24", client3)
+iface_client4 = create_omf_iface(ec, "192.168.0.5/24", client4)
+iface_client5 = create_omf_iface(ec, "192.168.0.6/24", client5)
+
+omf_ifaces = [iface_center, iface_client1, iface_client2, iface_client3, iface_client4, iface_client5]
+
+# Register channel
+
+chan = ec.register_resource("OMFChannel")
+ec.set(chan, 'channel', "6")
+
+# Register connection ifaces - channel
+
+ec.register_connection(iface_center, chan)
+ec.register_connection(iface_client1, chan)
+ec.register_connection(iface_client2, chan)
+ec.register_connection(iface_client3, chan)
+ec.register_connection(iface_client4, chan)
+ec.register_connection(iface_client5, chan)
+
+resources = [video_server] + omf_nodes + omf_ifaces + [chan]
+
+# Deploy physical resources and wait until they become provisioned
+
+ec.deploy(resources)
+
+ec.wait_deployed(resources)
+  
+time.sleep(3)
+
+# Functions for applications registration in the nodes
+
+def create_vlc_server(ec, video_server, mode):
+    vlc_server = ec.register_resource("LinuxApplication")
+    ec.set(vlc_server, "depends", "vlc")
+    ec.set(vlc_server, "sources", "examples/omf/demo_openlab/big_buck_bunny_240p_mpeg4_lq.ts")
+    # Depending on the mode selected to run the experiment, 
+    # different configuation files and command to run are
+    # uploaded to the server
+    if mode == 'vod':
+        ec.set(vlc_server, "files", "examples/omf/demo_openlab/conf_VoD.vlm")
+        ec.set(vlc_server, "command", "sudo -S dbus-uuidgen --ensure ; cvlc --vlm-conf ${SHARE}/conf_VoD.vlm --rtsp-host 128.232.103.203:5554 2>/tmp/logpl.txt")
+    elif mode == 'broadcast':
+        ec.set(vlc_server, "files", "examples/omf/demo_openlab/conf_Broadcast.vlm")
+        ec.set(vlc_server, "command", "sudo -S dbus-uuidgen --ensure ; cvlc --vlm-conf ${SHARE}/conf_Broadcast.vlm --rtsp-host 128.232.103.203:5554 2>/tmp/logpl.txt")
+    ec.register_connection(video_server, vlc_server)
+    return vlc_server
+
+def create_omf_app(ec, command, node):
+    app = ec.register_resource("OMFApplication")
+    ec.set(app, 'command', command)
+    ec.register_connection(app, node)
+    return app
+
+
+# Run the VLC server in the Planetlab node
+
+vlc_server = create_vlc_server(ec, video_server, mode)
+
+# Upload configuration to the wifi media center and run VLC
+
+if mode == 'vod':
+    update_file_wificenter = "echo -e 'new BUNNY vod enabled\\n"\
+       "setup BUNNY input rtsp://128.232.103.203:5554/BUNNY' > /root/wificenter.vlm"
+    command_wificenter =  "/root/vlc/vlc-1.1.13/cvlc --vlm-conf /root/wificenter.vlm --rtsp-host 192.168.0.1:5554"
+elif mode == 'broadcast':
+    update_file_wificenter = "echo -e 'new BUNNY broadcast enabled loop\\n"\
+       "setup BUNNY input rtsp://128.232.103.203:8554/BUNNY\\n"\
+       "setup BUNNY output #rtp{access=udp,mux=ts,sdp=rtsp://0.0.0.0:8554/BUNNY}\\n\\n"\
+       "new test_sched schedule enabled\\n"\
+       "setup test_sched append control BUNNY play' > /root/wificenter.vlm"
+    command_wificenter =  "/root/vlc/vlc-1.1.13/cvlc --vlm-conf /root/wificenter.vlm --rtsp-host 192.168.0.1:8554"
+
+upload_conf = create_omf_app(ec, update_file_wificenter , wifi_center)
+vlc_wificenter = create_omf_app(ec, command_wificenter , wifi_center)
+
+ec.register_condition(upload_conf, ResourceAction.START, vlc_server, ResourceState.STARTED , "2s")
+ec.register_condition(vlc_wificenter, ResourceAction.START, upload_conf, ResourceState.STARTED , "2s")
+
+# measurements in video server (PL node)
+measure_videoserver = ec.register_resource("LinuxApplication")
+ec.set(measure_videoserver, "depends", "tcpdump")
+ec.set(measure_videoserver, "sudo", True)
+command = "tcpdump -i eth0 not arp -n -w /tmp/capplserver_%s.pcap" % ("$(date +'%Y%m%d%H%M%S')")
+ec.set(measure_videoserver, "command", command)
+ec.register_connection(measure_videoserver, video_server)
+
+# Deploy servers
+ec.deploy([vlc_server, upload_conf, vlc_wificenter, measure_videoserver])
+
+ec.wait_started([vlc_server, upload_conf, vlc_wificenter, measure_videoserver])
+
+time.sleep(3)
+
+def deploy_experiment(ec, clients, wifi_center):
+
+    # measurements in transmitter eth0
+    command_measure_wificentereth0 = "/usr/sbin/tcpdump -i eth0 not arp -n -w /tmp/capwificen_eth0_%s_%s.pcap" % (len(clients), "$(date +'%Y%m%d%H%M%S')")
+    measure_wificentereth0 = create_omf_app(ec, command_measure_wificentereth0, wifi_center)
+    ec.register_condition(measure_wificentereth0, ResourceAction.STOP, measure_wificentereth0, ResourceState.STARTED , "65s")
+
+    # measurements in transmitter wlan0
+    command_measure_wificenterwlan0 = "/usr/sbin/tcpdump -i wlan0 not arp -n -w /tmp/capwificen_wlan0_%s_%s.pcap" % (len(clients), "$(date +'%Y%m%d%H%M%S')")
+    measure_wificenterwlan0 = create_omf_app(ec, command_measure_wificenterwlan0, wifi_center)
+    ec.register_condition(measure_wificenterwlan0, ResourceAction.STOP, measure_wificenterwlan0, ResourceState.STARTED , "65s")
+
+    # kill tcpdumps in wificenter
+    command_kill_measure_wificentereth0 = "killall /usr/sbin/tcpdump"
+    kill_measure_wificentereth0 = create_omf_app(ec, command_kill_measure_wificentereth0, wifi_center)
+    ec.register_condition(kill_measure_wificentereth0, ResourceAction.START, measure_wificentereth0, ResourceState.STARTED , "65s")
+    ec.register_condition(kill_measure_wificentereth0, ResourceAction.STOP, kill_measure_wificentereth0, ResourceState.STARTED , "2s")
+
+
+    apps = [measure_wificentereth0, measure_wificenterwlan0, kill_measure_wificentereth0]
+    delay = '2s'
+    for client in clients:
+        client_host = ec.get(client, 'host').split('.')[0]
+        # measurements in clients
+        command_measure_client = "/usr/sbin/tcpdump -i wlan0 not arp -n -w /tmp/capcli_%s_%s_%s.pcap" % (client_host, len(clients), "$(date +'%Y%m%d%H%M%S')")
+        # run vlc client
+        if mode == 'broadcast':
+            command_client =  "/root/vlc/vlc-1.1.13/cvlc rtsp://192.168.0.1:8554/BUNNY --sout=file/ts:%s_%s_%s.ts 2>/tmp/logcli.txt" % (client_host, len(clients), "$(date +'%Y%m%d%H%M%S')")
+        elif mode == 'vod':    
+            command_client =  "/root/vlc/vlc-1.1.13/cvlc rtsp://192.168.0.1:5554/BUNNY --sout=file/ts:%s_%s_%s.ts 2>/tmp/logcli.txt" % (client_host, len(clients), "$(date +'%Y%m%d%H%M%S')")
+
+        # kill vlc client and tcpdump
+        command_client_killvlc = "killall vlc vlc_app"
+        command_client_killtcp = "killall /usr/sbin/tcpdump"
+
+        run_client = create_omf_app(ec, command_client, client)
+        measure_client = create_omf_app(ec, command_measure_client, client)
+        kill_clientvlc = create_omf_app(ec, command_client_killvlc, client)
+        kill_clienttcp = create_omf_app(ec, command_client_killtcp, client)
+        ec.register_condition(run_client, ResourceAction.START, measure_client, ResourceState.STARTED , delay)
+        ec.register_condition([run_client, measure_client], ResourceAction.STOP, run_client, ResourceState.STARTED , "60s")
+        ec.register_condition(kill_clientvlc, ResourceAction.START, run_client, ResourceState.STARTED , "60s")
+        ec.register_condition(kill_clienttcp, ResourceAction.START, measure_client, ResourceState.STARTED , "60s")
+        ec.register_condition(kill_clientvlc, ResourceAction.STOP, kill_clientvlc, ResourceState.STARTED , "2s")
+        ec.register_condition(kill_clienttcp, ResourceAction.STOP, kill_clienttcp, ResourceState.STARTED , "2s")
+        apps.append(run_client)
+        apps.append(measure_client)
+        apps.append(kill_clientvlc)
+        apps.append(kill_clienttcp)
+    
+    return apps
+
+#################
+## 1 client run #
+#################
+
+apps1 = deploy_experiment(ec, [client1], wifi_center)
+
+ec.deploy(apps1)
+ec.wait_finished(apps1)
+
+################
+# 3 client run #
+################
+
+#apps3 = deploy_experiment(ec, [client1, client2, client3], wifi_center)
+#
+#ec.deploy(apps3)
+#ec.wait_finished(apps3)
+
+################
+# 5 client run #
+################
+#
+#apps5 = deploy_experiment(ec, [client1, client2, client3, client4, client5], wifi_center)
+
+#ec.deploy(apps5)
+#ec.wait_finished(apps5)
+
+ec.shutdown()
+
+# End
index d1e4135..4778911 100644 (file)
@@ -55,24 +55,24 @@ def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, no
     ec.register_connection(ovs, node)
     return ovs
 
-def add_port(ec, port_name, ovs):
+def add_port(ec, port_name, network, ovs):
     port = ec.register_resource("OVSPort")
     ec.set(port, "port_name", port_name)
+    ec.set(port, "network", network)
     ec.register_connection(port, ovs)
     return port
 
-def add_tap(ec, ip4, prefix4, pointopoint, node):
+def add_tap(ec, endpoint_ip, endpoint_prefix, pointopoint, node):
     tap = ec.register_resource("PlanetlabTap")
-    ec.set(tap, "ip4", ip4)
-    ec.set(tap, "prefix4", prefix4)
+    ec.set(tap, "endpoint_ip", endpoint_ip)
+    ec.set(tap, "endpoint_prefix", endpoint_prefix)
     ec.set(tap, "pointopoint", pointopoint)
     ec.set(tap, "up", True)
     ec.register_connection(tap, node)
     return tap
 
-def add_tunnel(ec, network, port0, tap):
-    tunnel = ec.register_resource("OVSTunnel")
-    ec.set(tunnel, "network", network)
+def add_tunnel(ec, port0, tap):
+    tunnel = ec.register_resource("LinuxUdpTunnel")
     ec.register_connection(port0, tunnel)
     ec.register_connection(tunnel, tap)
     return tunnel
@@ -92,7 +92,7 @@ switch2 = "planetlab2.upc.es"
 host1 = "planetlab2.ionio.gr"
 host2 = "iraplab2.iralab.uni-karlsruhe.de"
 
-ip_controller = "xxx.yyy.zzz.ttt"
+ip_controller = "1.1.1.1"
 
 #XXX : Depends on the Vsys_tag of your slice
 network = "192.168.3.0"
@@ -111,10 +111,10 @@ ovs1 = add_ovs(ec, "nepi_bridge_1", "192.168.3.1/24", ip_controller, "6633", s1_
 ovs2 = add_ovs(ec, "nepi_bridge_2", "192.168.3.2/24", ip_controller, "6633", s2_node)
 
 # Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port3 = add_port(ec, "nepi_port3", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port4 = add_port(ec, "nepi_port4", ovs2)
+port1 = add_port(ec, "nepi_port1", network, ovs1)
+port3 = add_port(ec, "nepi_port3", network, ovs1)
+port2 = add_port(ec, "nepi_port2", network, ovs2)
+port4 = add_port(ec, "nepi_port4", network, ovs2)
 
 h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
 h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
@@ -124,9 +124,9 @@ tap1 = add_tap(ec, "192.168.3.3", 24, "192.168.3.1", h1_node)
 tap2 = add_tap(ec, "192.168.3.4", 24, "192.168.3.2", h2_node)
 
 # Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, port4)
+tunnel1 = add_tunnel(ec, port1, tap1)
+tunnel2 = add_tunnel(ec, port2, tap2)
+tunnel3 = add_tunnel(ec, port3, port4)
 
 # Add ping commands
 app1 = add_app(ec, "ping -c5 192.168.3.2", s1_node)
index 6adc924..b48485e 100644 (file)
@@ -53,24 +53,24 @@ def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, no
     ec.register_connection(ovs, node)
     return ovs
 
-def add_port(ec, port_name, ovs):
+def add_port(ec, port_name, network, ovs):
     port = ec.register_resource("OVSPort")
     ec.set(port, "port_name", port_name)
+    ec.set(port, "network", network)
     ec.register_connection(port, ovs)
     return port
 
-def add_tap(ec, ip4, prefix4, pointopoint, node):
+def add_tap(ec, endpoint_ip, endpoint_prefix, pointopoint, node):
     tap = ec.register_resource("PlanetlabTap")
-    ec.set(tap, "ip4", ip4)
-    ec.set(tap, "prefix4", prefix4)
+    ec.set(tap, "endpoint_ip", endpoint_ip)
+    ec.set(tap, "endpoint_prefix", endpoint_prefix)
     ec.set(tap, "pointopoint", pointopoint)
     ec.set(tap, "up", True)
     ec.register_connection(tap, node)
     return tap
 
-def add_tunnel(ec, network, port0, tap):
-    tunnel = ec.register_resource("OVSTunnel")
-    ec.set(tunnel, "network", network)
+def add_tunnel(ec, port0, tap):
+    tunnel = ec.register_resource("LinuxUdpTunnel")
     ec.register_connection(port0, tunnel)
     ec.register_connection(tunnel, tap)
     return tunnel
@@ -87,7 +87,7 @@ ec = ExperimentController(exp_id = "test-tr")
 #XXX : Need to put 6 working nodes or to let Nepi find for you
 switch1 = "planetlab2.virtues.fi"
 switch2 = "planetlab2.upc.es"
-switch3 = "planetlab2.cs.aueb.gr"
+switch3 = "planetlab1.informatik.uni-erlangen.de"
 host1 = "planetlab2.ionio.gr"
 host2 = "iraplab2.iralab.uni-karlsruhe.de"
 host3 = "planetlab2.diku.dk"
@@ -113,13 +113,13 @@ ovs2 = add_ovs(ec, "nepi_bridge_2", "192.168.3.4/24", ip_controller, "6633", s2_
 ovs3 = add_ovs(ec, "nepi_bridge_3", "192.168.3.6/24", ip_controller, "6633", s3_node)
 
 # Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port4 = add_port(ec, "nepi_port4", ovs1)
-port7 = add_port(ec, "nepi_port7", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port5 = add_port(ec, "nepi_port5", ovs2)
-port3 = add_port(ec, "nepi_port3", ovs3)
-port6 = add_port(ec, "nepi_port6", ovs3)
+port1 = add_port(ec, "nepi_port1", network, ovs1)
+port4 = add_port(ec, "nepi_port4", network, ovs1)
+port7 = add_port(ec, "nepi_port7", network, ovs1)
+port2 = add_port(ec, "nepi_port2", network, ovs2)
+port5 = add_port(ec, "nepi_port5", network, ovs2)
+port3 = add_port(ec, "nepi_port3", network, ovs3)
+port6 = add_port(ec, "nepi_port6", network, ovs3)
 
 h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
 h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
@@ -131,11 +131,11 @@ tap2 = add_tap(ec, "192.168.3.3", 24, "192.168.3.4", h2_node)
 tap3 = add_tap(ec, "192.168.3.5", 24, "192.168.3.6", h3_node)
 
 # Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, tap3)
-tunnel4 = add_tunnel(ec, network, port4, port5)
-tunnel5 = add_tunnel(ec, network, port7, port6)
+tunnel1 = add_tunnel(ec, port1, tap1)
+tunnel2 = add_tunnel(ec, port2, tap2)
+tunnel3 = add_tunnel(ec, port3, tap3)
+tunnel4 = add_tunnel(ec, port4, port5)
+tunnel5 = add_tunnel(ec, port7, port6)
 #tunnel6 = add_tunnel(ec, network, port8, port9)
 
 # Add ping commands
index 2b04dd9..7dac095 100644 (file)
@@ -63,24 +63,24 @@ def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, no
     ec.register_connection(ovs, node)
     return ovs
 
-def add_port(ec, port_name, ovs):
+def add_port(ec, port_name, network, ovs):
     port = ec.register_resource("OVSPort")
     ec.set(port, "port_name", port_name)
+    ec.set(port, "network", network)
     ec.register_connection(port, ovs)
     return port
 
-def add_tap(ec, ip4, prefix4, pointopoint, node):
+def add_tap(ec, endpoint_ip, endpoint_prefix, pointopoint, node):
     tap = ec.register_resource("PlanetlabTap")
-    ec.set(tap, "ip4", ip4)
-    ec.set(tap, "prefix4", prefix4)
+    ec.set(tap, "endpoint_ip", endpoint_ip)
+    ec.set(tap, "endpoint_prefix", endpoint_prefix)
     ec.set(tap, "pointopoint", pointopoint)
     ec.set(tap, "up", True)
     ec.register_connection(tap, node)
     return tap
 
-def add_tunnel(ec, network, port0, tap):
-    tunnel = ec.register_resource("OVSTunnel")
-    ec.set(tunnel, "network", network)
+def add_tunnel(ec, port0, tap):
+    tunnel = ec.register_resource("LinuxUdpTunnel")
     ec.register_connection(port0, tunnel)
     ec.register_connection(tunnel, tap)
     return tunnel
@@ -97,7 +97,7 @@ ec = ExperimentController(exp_id = "test-tr")
 #XXX : Need to put 6 working nodes or to let Nepi find for you
 switch1 = "planetlab2.virtues.fi"
 switch2 = "planetlab2.upc.es"
-switch3 = "planetlab2.cs.aueb.gr"
+switch3 = "planetlab1.informatik.uni-erlangen.de"
 host1 = "planetlab2.ionio.gr"
 host2 = "iraplab2.iralab.uni-karlsruhe.de"
 host3 = "planetlab2.diku.dk"
@@ -124,15 +124,15 @@ ovs2 = add_ovs(ec, "nepi_bridge_2", "192.168.3.4/24", ip_controller, "6633", s2_
 ovs3 = add_ovs(ec, "nepi_bridge_3", "192.168.3.6/24", ip_controller, "6633", s3_node)
 
 # Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port4 = add_port(ec, "nepi_port4", ovs1)
-port7 = add_port(ec, "nepi_port7", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port5 = add_port(ec, "nepi_port5", ovs2)
-port8 = add_port(ec, "nepi_port8", ovs2)
-port3 = add_port(ec, "nepi_port3", ovs3)
-port6 = add_port(ec, "nepi_port6", ovs3)
-port9 = add_port(ec, "nepi_port9", ovs3)
+port1 = add_port(ec, "nepi_port1", network, ovs1)
+port4 = add_port(ec, "nepi_port4", network, ovs1)
+port7 = add_port(ec, "nepi_port7", network, ovs1)
+port2 = add_port(ec, "nepi_port2", network, ovs2)
+port5 = add_port(ec, "nepi_port5", network, ovs2)
+port8 = add_port(ec, "nepi_port8", network, ovs2)
+port3 = add_port(ec, "nepi_port3", network, ovs3)
+port6 = add_port(ec, "nepi_port6", network, ovs3)
+port9 = add_port(ec, "nepi_port9", network, ovs3)
 
 h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
 h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
@@ -144,12 +144,12 @@ tap2 = add_tap(ec, "192.168.3.3", 24, "192.168.3.4", h2_node)
 tap3 = add_tap(ec, "192.168.3.5", 24, "192.168.3.6", h3_node)
 
 # Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, tap3)
-tunnel4 = add_tunnel(ec, network, port4, port5)
-tunnel5 = add_tunnel(ec, network, port7, port6)
-tunnel6 = add_tunnel(ec, network, port8, port9)
+tunnel1 = add_tunnel(ec, port1, tap1)
+tunnel2 = add_tunnel(ec, port2, tap2)
+tunnel3 = add_tunnel(ec, port3, tap3)
+tunnel4 = add_tunnel(ec, port4, port5)
+tunnel5 = add_tunnel(ec, port7, port6)
+tunnel6 = add_tunnel(ec, port8, port9)
 
 # Add ping commands
 app1 = add_app(ec, "ping -c5 192.168.3.4", s1_node)
index e85da45..f5941f1 100644 (file)
@@ -50,10 +50,10 @@ def add_node(ec, host, user):
     ec.set(node, "cleanProcesses", True)
     return node
 
-def add_tap(ec, ip4, prefix4, pointopoint, node):
+def add_tap(ec, endpoint_ip, endpoint_prefix, pointopoint, node):
     tap = ec.register_resource("PlanetlabTap")
-    ec.set(tap, "ip4", ip4)
-    ec.set(tap, "prefix4", prefix4)
+    ec.set(tap, "endpoint_ip", endpoint_ip)
+    ec.set(tap, "endpoint_prefix", endpoint_prefix)
     ec.set(tap, "pointopoint", pointopoint)
     ec.set(tap, "up", True)
     ec.register_connection(tap, node)
diff --git a/examples/planetlab/ccn/two_nodes_file_retrieval.py b/examples/planetlab/ccn/two_nodes_file_retrieval.py
deleted file mode 100644 (file)
index 096268f..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2014 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-#
-# Instructions to run this example:
-#
-# 1. First edit the script file where required (See ASSING messages)
-#
-# 2. Then, run the script:
-#
-# $ cd <path-to-nepi>
-# $ PYTHONPATH=$PYTHONPATHS:src python examples/linux/ccn/two_nodes_file_retrieval.py
-#
-
-from nepi.execution.ec import ExperimentController
-
-import os
-
-pl_user = ######### <<< ASSIGN the username used to login to the PlanetLab website >>>
-pl_pass = ######## <<< ASSIGN the password used to login to the PlanetLab website >>>
-pl_ssh_key = ####### <<< ASSING the absolute path to the private SSH key used for Planetlab >>>
-slicename = ####### <<< ASSING the PlanetLab slicename >>>
-
-## Create the experiment controller
-ec = ExperimentController(exp_id = "demo_CCN")
-
-## Register node 1
-node1 = ec.register_resource("PlanetlabNode")
-# Configure NEPI to automatically find and allocate a node in France
-# ec.set(node1, "country", "France")
-# Else, if you want a node in particular set the hostname
-ec.set(node1, "hostname", "peeramidion.irisa.fr")
-# PlanetLab (website) account username
-ec.set(node1, "pluser", pl_user)
-# PlanetLab (website) account password
-ec.set(node1, "plpassword", pl_pass)
-# username should be your PlanetLab slice name 
-ec.set(node1, "username", slicename)
-# Absolute path to the SSH private key for PlanetLab
-ec.set(node1, "identity", pl_ssh_key)
-# Clean all files, results, etc, from previous experiments wit the same exp_id
-ec.set(node1, "cleanExperiment", True)
-# Kill all running processes in the PlanetLab node before running the experiment
-ec.set(node1, "cleanProcesses", True)
-
-## Register node 2 
-node2 = ec.register_resource("PlanetlabNode")
-# Configure NEPI to automatically find and allocate a node in Spain
-#ec.set(node2, "country", "Spain")
-# Else, if you want a node in particular set the hostname
-ec.set(node2, "hostname", "planetlab2.upc.es")
-# PlanetLab (website) account username
-ec.set(node2, "pluser", pl_user)
-# PlanetLab (website) account password
-ec.set(node2, "plpassword", pl_pass)
-# username should be your PlanetLab slice name 
-ec.set(node2, "username", slicename)
-# Absolute path to the SSH private key for PlanetLab
-ec.set(node2, "identity", pl_ssh_key)
-# Clean all files, results, etc, from previous experiments wit the same exp_id
-ec.set(node2, "cleanExperiment", True)
-# Kill all running processes in the PlanetLab node before running the experiment
-ec.set(node2, "cleanProcesses", True)
-
-## Register a CCN daemon in node 1
-ccnd1 = ec.register_resource("LinuxCCND")
-# Set ccnd log level to 7
-ec.set(ccnd1, "debug", 7)
-ec.register_connection(ccnd1, node1)
-
-## Register a CCN daemon in node 2
-ccnd2 = ec.register_resource("LinuxCCND")
-# Set ccnd log level to 7
-ec.set(ccnd2, "debug", 7)
-ec.register_connection(ccnd2, node2)
-
-## Register a repository in node 1
-ccnr1 = ec.register_resource("LinuxCCNR")
-ec.register_connection(ccnr1, ccnd1)
-
-## Push the file into the repository
-local_path_to_content = os.path.join(
-        os.path.dirname(os.path.realpath(__file__)),
-            "..", "..",
-            "big_buck_bunny_240p_mpeg4_lq.ts")
-
-co = ec.register_resource("LinuxCCNContent")
-ec.set(co, "contentName", "ccnx:/test/FILE1")
-# NEPI will upload the specified file to the remote node and write it
-# into the CCN repository
-ec.set(co, "content", local_path_to_content)
-ec.register_connection(co, ccnr1)
-
-## Deploy all resources
-ec.deploy()
-
-## Wait until node 1 and 2 are deployed, so we can retrieve the hostnames
-## of the nodes automatically allocated in planetlab
-ec.wait_deployed([node1, node2])
-
-## Get the hostnames of the two PlanetLab nodes
-hostname1 = ec.get(node1, "hostname")
-print "hostname 1: ", hostname1
-hostname2 = ec.get(node2, "hostname")
-print "hostname 2: ", hostname2
-
-# Register a FIB entry from node 1 to node 2
-entry1 = ec.register_resource("LinuxFIBEntry")
-ec.set(entry1, "host", hostname2)
-ec.register_connection(entry1, ccnd1)
-
-# Register a FIB entry from node 1 to node 2
-entry2 = ec.register_resource("LinuxFIBEntry")
-ec.set(entry2, "host", hostname1)
-ec.register_connection(entry2, ccnd2)
-
-## Retrieve the file stored in node 1 from node 2
-command = "ccncat ccnx:/test/FILE1"
-app = ec.register_resource("LinuxCCNApplication")
-ec.set(app, "command", command)
-ec.register_connection(app, ccnd2)
-
-# Register a collector to automatically collect the ccnd logs
-# to a local directory
-results_dir = "/tmp/demo_CCN_results"
-col1 = ec.register_resource("Collector")
-ec.set(col1, "traceName", "stderr")
-ec.set(col1, "storeDir", results_dir)
-ec.set(col1, "subDir", hostname1)
-ec.register_connection(col1, ccnd1)
-
-col2 = ec.register_resource("Collector")
-ec.set(col2, "traceName", "stderr")
-ec.set(col2, "storeDir", results_dir)
-ec.set(col2, "subDir", hostname2)
-ec.register_connection(col2, ccnd2)
-
-## Deploy the rest of the resources
-ec.deploy(guids=[entry1, entry2, app, col1, col2])
-
-# Wait until the ccncat is finished
-ec.wait_finished([app])
-
-## CCND logs will be collected to the results_dir upon shutdown.
-## We can aldo get the content of the logs now:
-#print "LOG2", ec.trace(ccnd1, "stderr")
-#print "LOG 1", ec.trace(ccnd2, "stderr")
-
-ec.shutdown()
-
diff --git a/examples/planetlab/ccn_simple_transfer.py b/examples/planetlab/ccn_simple_transfer.py
new file mode 100644 (file)
index 0000000..9a3d5b0
--- /dev/null
@@ -0,0 +1,169 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2014 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/ccn_simple_transfer.py -a <hostname1> -b <hostname2> -u <username> -i <ssh-key>
+
+# CCN topology:
+#
+#                
+#                 
+#  content                  ccncat
+#  Linux host               Linux host
+#     0 ------- network -------- 1
+#
+
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser, SUPPRESS_HELP
+import os
+
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+    "-k <pl-ssh-key> -a <hostanme1> -b <hostname2> ")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+        help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+        help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+        help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+        help="Path to private SSH key associated with the PL account",
+        type="str")
+parser.add_option("-a", "--hostname1", dest="hostname1", 
+        help="Remote host 1", type="str")
+parser.add_option("-b", "--hostname2", dest="hostname2", 
+        help="Remote host 2", type="str")
+(options, args) = parser.parse_args()
+
+hostname1 = options.hostname1
+hostname2 = options.hostname2
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+
+## Create the experiment controller
+ec = ExperimentController(exp_id = "pl_ccn_simple_transfer")
+
+##### CONFIGURING NODE 1
+
+## Register node 1
+node1 = ec.register_resource("PlanetlabNode")
+# Set the hostname of the first node to use for the experiment
+ec.set(node1, "hostname", hostname1)
+# username should be your SSH user 
+ec.set(node1, "username", pl_slice)
+# Path to the SSH private key
+ec.set(node1, "identity", pl_ssh_key)
+# Planetlab web site user and password
+ec.set(node1, "pluser", pl_user)
+ec.set(node1, "plpassword", pl_password)
+# Clean all files, results, etc, from previous experiments wit the same exp_id
+ec.set(node1, "cleanExperiment", True)
+# Kill all running processes in the node before running the experiment
+ec.set(node1, "cleanProcesses", True)
+
+## Register a CCN daemon in node 1
+ccnd1 = ec.register_resource("LinuxCCND")
+# Set ccnd log level to 7
+ec.set(ccnd1, "debug", 7)
+ec.register_connection(ccnd1, node1)
+
+## Register a repository in node 1
+ccnr1 = ec.register_resource("LinuxCCNR")
+ec.register_connection(ccnr1, ccnd1)
+
+## Push the file into the repository
+local_path_to_content = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)),
+            "..", "big_buck_bunny_240p_mpeg4_lq.ts")
+
+content_name = "ccnx:/test/FILE"
+
+# Add a content to the repository
+co = ec.register_resource("LinuxCCNContent")
+ec.set(co, "contentName", content_name)
+# NEPI will upload the specified file to the remote node and write it
+# into the CCN repository
+ec.set(co, "content", local_path_to_content)
+ec.register_connection(co, ccnr1)
+
+##### CONFIGURING NODE 2
+
+## Register node 2 
+node2 = ec.register_resource("PlanetlabNode")
+# Set the hostname of the first node to use for the experiment
+ec.set(node2, "hostname", hostname2)
+# username should be your SSH user 
+ec.set(node2, "username", pl_slice)
+# Path to the SSH private key
+ec.set(node2, "identity", pl_ssh_key)
+# Planetlab web site user and password
+ec.set(node2, "pluser", pl_user)
+ec.set(node2, "plpassword", pl_password)
+# Clean all files, results, etc, from previous experiments wit the same exp_id
+ec.set(node2, "cleanExperiment", True)
+# Kill all running processes in the node before running the experiment
+ec.set(node2, "cleanProcesses", True)
+
+## Register a CCN daemon in node 2
+ccnd2 = ec.register_resource("LinuxCCND")
+# Set ccnd log level to 7
+ec.set(ccnd2, "debug", 7)
+ec.register_connection(ccnd2, node2)
+
+## Retrieve the file stored in node 1 from node 2
+ccncat = ec.register_resource("LinuxCCNCat")
+ec.set(ccncat, "contentName", content_name)
+ec.register_connection(ccncat, ccnd2)
+
+##### INTERCONNECTING CCN NODES ...
+
+# Register a FIB entry from node 1 to node 2
+entry1 = ec.register_resource("LinuxFIBEntry")
+ec.set(entry1, "host", hostname2)
+ec.register_connection(entry1, ccnd1)
+
+# Register a FIB entry from node 2 to node 1
+entry2 = ec.register_resource("LinuxFIBEntry")
+ec.set(entry2, "host", hostname1)
+ec.register_connection(entry2, ccnd2)
+
+##### STARTING THE EXPERIMENT
+
+## Deploy all resources
+ec.deploy()
+
+# Wait until the ccncat is finished
+ec.wait_finished([ccncat])
+
+stdout = ec.trace(ccncat, "stdout")
+f = open("video.ts", "w")
+f.write(stdout)
+f.close()
+
+ec.shutdown()
+
+print "Transfered FILE stored localy at video.ts"
+
diff --git a/examples/planetlab/openvswitch/ovs_ping_exp.py b/examples/planetlab/openvswitch/ovs_ping_exp.py
deleted file mode 100644 (file)
index 18c2f15..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/env python
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-#         Alexandros Kouvakas <alexandros.kouvakas@gmail.com>
-#
-#         Switch1 ------- Switch2         
-#            /                \           
-#           /                  \          
-#          /                    \         
-#       Host1                  Host2      
-
-
-
-from nepi.execution.ec import ExperimentController
-import os, time
-
-def add_node(ec, host, user, pl_user, pl_password):
-    node = ec.register_resource("PlanetlabNode")
-    ec.set(node, "hostname", host)
-    ec.set(node, "username", user)
-    if pl_user:
-        ec.set(node, "pluser", pl_user)
-    if pl_password:
-        ec.set(node, "plpassword", pl_password)
-    ec.set(node, "cleanHome", True)
-    ec.set(node, "cleanProcesses", True)
-
-    return node
-
-def add_ovs(ec, bridge_name, virtual_ip_pref, controller_ip, controller_port, node):
-    ovs = ec.register_resource("OVSWitch")
-    ec.set(ovs, "bridge_name", bridge_name)
-    ec.set(ovs, "virtual_ip_pref", virtual_ip_pref)
-    ec.set(ovs, "controller_ip", controller_ip)
-    ec.set(ovs, "controller_port", controller_port)
-    ec.register_connection(ovs, node)
-    return ovs
-
-def add_port(ec, port_name, ovs):
-    port = ec.register_resource("OVSPort")
-    ec.set(port, "port_name", port_name)
-    ec.register_connection(port, ovs)
-    return port
-
-def add_tap(ec, ip4, prefix4, pointopoint, node):
-    tap = ec.register_resource("PlanetlabTap")
-    ec.set(tap, "ip4", ip4)
-    ec.set(tap, "prefix4", prefix4)
-    ec.set(tap, "pointopoint", pointopoint)
-    ec.set(tap, "up", True)
-    ec.register_connection(tap, node)
-    return tap
-
-def add_tunnel(ec, network, port0, tap):
-    tunnel = ec.register_resource("OVSTunnel")
-    ec.set(tunnel, "network", network)
-    ec.register_connection(port0, tunnel)
-    ec.register_connection(tunnel, tap)
-    return tunnel
-
-def add_app(ec, command, node):
-    app = ec.register_resource("LinuxApplication")
-    ec.set(app, "command", command)
-    ec.register_connection(app, node)
-    return app
-
-# Create the EC
-ec = ExperimentController(exp_id = "test")
-
-switch1 = "planetlab2.virtues.fi"
-switch2 = "planetlab2.upc.es"
-host1 = "planetlab2.ionio.gr"
-host2 = "iraplab2.iralab.uni-karlsruhe.de"
-
-network = "192.168.3.0"
-
-slicename = "inria_nepi"
-
-pl_user = os.environ.get("PL_USER")
-pl_password = os.environ.get("PL_PASS")
-
-s1_node = add_node(ec, switch1, slicename, pl_user, pl_password)
-s2_node = add_node(ec, switch2, slicename, pl_user, pl_password)
-
-# Add switches 
-ovs1 = add_ovs(ec, "nepi_bridge", "192.168.3.1/24", "85.23.168.77", "6633", s1_node)
-ovs2 = add_ovs(ec, "nepi_bridge", "192.168.3.2/24", "85.23.168.77", "6633", s2_node)
-
-# Add ports on ovs
-port1 = add_port(ec, "nepi_port1", ovs1)
-port3 = add_port(ec, "nepi_port3", ovs1)
-port2 = add_port(ec, "nepi_port2", ovs2)
-port4 = add_port(ec, "nepi_port4", ovs2)
-
-h1_node = add_node(ec, host1, slicename, pl_user, pl_password)
-h2_node = add_node(ec, host2, slicename, pl_user, pl_password)
-
-# Add tap devices
-tap1 = add_tap(ec, "192.168.3.3", 24, "192.168.3.1", h1_node)
-tap2 = add_tap(ec, "192.168.3.4", 24, "192.168.3.2", h2_node)
-
-# Connect the nodes
-tunnel1 = add_tunnel(ec, network, port1, tap1)
-tunnel2 = add_tunnel(ec, network, port2, tap2)
-tunnel3 = add_tunnel(ec, network, port3, port4)
-
-# Add ping commands
-app1 = add_app(ec, "ping -c5 192.168.3.2", s1_node)
-app2 = add_app(ec, "ping -c5 192.168.3.3", s1_node)
-app3 = add_app(ec, "ping -c5 192.168.3.4", s1_node)
-app4 = add_app(ec, "ping -c5 192.168.3.1", s2_node)
-app5 = add_app(ec, "ping -c5 192.168.3.3", s2_node)
-app6 = add_app(ec, "ping -c5 192.168.3.4", s2_node)
-app7 = add_app(ec, "ping -c5 192.168.3.1", h1_node)
-app8 = add_app(ec, "ping -c5 192.168.3.2", h1_node)
-app9 = add_app(ec, "ping -c5 192.168.3.4", h1_node)
-app10 = add_app(ec, "ping -c5 192.168.3.1", h2_node)
-app11 = add_app(ec, "ping -c5 192.168.3.2", h2_node)
-app12 = add_app(ec, "ping -c5 192.168.3.3", h2_node)
-
-ec.deploy()
-
-ec.wait_finished([app1, app2, app3, app4, app5, app6, app7, app8, app9, app10, app11, app12])
-
-# Retreive ping results and save
-# them in a file
-ping1 = ec.trace(app1, 'stdout')
-ping2 = ec.trace(app2, 'stdout')
-ping3 = ec.trace(app3, 'stdout')
-ping4 = ec.trace(app4, 'stdout')
-ping5 = ec.trace(app5, 'stdout')
-ping6 = ec.trace(app6, 'stdout')
-ping7 = ec.trace(app7, 'stdout')
-ping8 = ec.trace(app8, 'stdout')
-ping9 = ec.trace(app9, 'stdout')
-ping10 = ec.trace(app10, 'stdout')
-ping11 = ec.trace(app11, 'stdout')
-ping12 = ec.trace(app12, 'stdout')
-
-
-f = open("examples/openvswitch/ping_res.txt", 'w')
-
-if not ping12:
-  ec.shutdown()
-
-f.write(ping1)
-f.write(ping2)
-f.write(ping3)
-f.write(ping4)
-f.write(ping5)
-f.write(ping6)
-f.write(ping7)
-f.write(ping8)
-f.write(ping9)
-f.write(ping10)
-f.write(ping11)
-f.write(ping12)
-f.close()
-
-# Delete the overlay network
-ec.shutdown()
-
-
-
-
-
similarity index 52%
rename from examples/planetlab/ping_experiment.py
rename to examples/planetlab/ping.py
index 4a693fd..e36ba61 100644 (file)
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #
 # Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
+#         Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/ping.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>  
+
 
 from nepi.execution.ec import ExperimentController
-from nepi.execution.resource import ResourceAction, ResourceState
 
+from optparse import OptionParser
 import os
 
-exp_id = "ping_exp"
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+     "-k <pl-ssh-key>")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+        help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+        help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+        help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+        help="Path to private SSH key associated with the PL account",
+        type="str")
 
-# Create the entity Experiment Controller:
-ec = ExperimentController(exp_id)
+(options, args) = parser.parse_args()
 
-# Register the nodes resources:
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+
+## Create the experiment controller
+ec = ExperimentController(exp_id = "pl_ping")
+
+# Register a Planetlab Node with no restrictions, it can be any node
+node = ec.register_resource("PlanetlabNode")
 
 # The username in this case is the slice name, the one to use for login in 
 # via ssh into PlanetLab nodes. Replace with your own slice name.
-username = "inria_sfatest"
+ec.set(node, "username", pl_slice)
+ec.set(node, "identity", pl_ssh_key)
 
 # The pluser and plpassword are the ones used to login in the PlanetLab web 
 # site. Replace with your own user and password account information.
-pl_user = "lucia.guevgeozian_odizzio@inria.fr"
-pl_password =  os.environ.get("PL_PASS")
-
-# Define a Planetlab Node with no restriction, it can be any node
-node = ec.register_resource('PlanetlabNode')
-ec.set(node, "username", username)
 ec.set(node, "pluser", pl_user)
 ec.set(node, "plpassword", pl_password)
+
+# Remove previous results
 ec.set(node, "cleanHome", True)
 ec.set(node, "cleanProcesses", True)
 
 # Define a ping application
-app = ec.register_resource('LinuxApplication')
-ec.set(app, 'command', 'ping -c5 google.com > ping_google.txt')
+app = ec.register_resource("LinuxApplication")
+ec.set(app, "command", "ping -c3 nepi.inria.fr")
 
 # Connect the application to the node
 ec.register_connection(node, app)
@@ -60,14 +85,9 @@ ec.deploy()
 # Wait until the application is finish to retrive the trace:
 ec.wait_finished(app)
 
-trace = ec.trace(app, 'ping_google.txt')
+trace = ec.trace(app, "stdout")
 
-# Choose a directory to store the traces locally, change to a convenient path for you:
-directory = "examples/planetlab/"
-trace_file = directory + "ping_google.txt"
-f = open(trace_file, "w")
-f.write(trace)
-f.close()
+print "PING outout ", trace
 
 # Do the experiment controller shutdown:
 ec.shutdown()
similarity index 53%
rename from examples/planetlab/ping_filters_experiment.py
rename to examples/planetlab/ping_with_filters.py
index 79d4738..71f23d9 100644 (file)
 #
 # Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
 
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/ping_with_filters.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key>  
+
 from nepi.execution.ec import ExperimentController
 from nepi.execution.resource import ResourceAction, ResourceState
 
+from optparse import OptionParser
 import os
 
-def create_node(ec, username, pl_user, pl_password, hostname=None, country=None,
-                operatingSystem=None, minBandwidth=None, minCpu=None):
+
+def create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, 
+       hostname = None, country = None, operatingSystem = None, 
+       minBandwidth = None, minCpu = None):
 
     node = ec.register_resource("PlanetlabNode")
 
-    if username:
-        ec.set(node, "username", username)
-    if pl_user:
-        ec.set(node, "pluser", pl_user)
-    if pl_password:
-        ec.set(node, "plpassword", pl_password)
+    ec.set(node, "username", pl_slice)
+    ec.set(node, "identity", pl_ssh_key)
+    ec.set(node, "pluser", pl_user)
+    ec.set(node, "plpassword", pl_password)
 
     if hostname:
         ec.set(node, "hostname", hostname)
@@ -51,9 +57,10 @@ def create_node(ec, username, pl_user, pl_password, hostname=None, country=None,
     
     return node
 
-def add_app(ec, command, node, sudo=None, video=None, depends=None, forward_x11=None, \
-        env=None):
+def add_app(ec, command, node, newname = None, sudo = None, 
+        video = None, depends = None, forward_x11 = None, env = None):
     app = ec.register_resource("LinuxApplication")
+
     if sudo is not None:
         ec.set(app, "sudo", sudo)
     if video is not None:
@@ -64,59 +71,90 @@ def add_app(ec, command, node, sudo=None, video=None, depends=None, forward_x11=
         ec.set(app, "forwardX11", forward_x11)
     if env is not None:
         ec.set(app, "env", env)
+
     ec.set(app, "command", command)
 
     ec.register_connection(app, node)
 
+    # add collector to download application standar output
+    collector = ec.register_resource("Collector")
+    ec.set(collector, "traceName", "stdout")
+    if newname:
+        ec.set(collector, "rename", newname)
+    ec.register_connection(app, collector)
+
     return app
 
-exp_id = "ping_filters_exp"
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+    "-k <pl-ssh-key> -c <country> -o <operating-system> -H <hostname> ")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+        help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+        help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+        help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+        help="Path to private SSH key associated with the PL account",
+        type="str")
+parser.add_option("-c", "--country", dest="country",
+        help="Country for the PL hosts",
+        type="str")
+parser.add_option("-o", "--os", dest="os",
+        help="Operating system for the PL hosts", default="f14",
+        type="str")
+parser.add_option("-H", "--hostname", dest="hostname",
+        help="PlanetLab hostname",
+        type="str")
+
+(options, args) = parser.parse_args()
+
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+hostname = options.hostname
+country = options.country
+os = options.os
 
 # Create the entity Experiment Controller:
-ec = ExperimentController(exp_id)
+ec = ExperimentController("pl_ping_filters")
 
 # Register the nodes resources:
 
-# The username in this case is the slice name, the one to use for login in 
-# via ssh into PlanetLab nodes. Replace with your own slice name.
-username = "inria_sfatest"
-
-# The pluser and plpassword are the ones used to login in the PlanetLab web 
-# site. Replace with your own user and password account information.
-pl_user = "lucia.guevgeozian_odizzio@inria.fr"
-pl_password =  os.environ.get("PL_PASS")
-
 # Choose the PlanetLab nodes for the experiment, in this example 5 nodes are
 # used, and they are picked according to different criterias.
 
 # First node will be the one defined by its hostname.
-hostname = "planetlab2.utt.fr"
-node1 = create_node(ec, username, pl_user, pl_password, hostname=hostname)
+node1 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, 
+        hostname = hostname)
 
-# Second node will be any node in France.
-country = "France"
-node2 = create_node(ec, username, pl_user, pl_password, country=country)
+# Second node will be any node in the selected country.
+node2 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, 
+        country=country)
 
-# Third node will be a node in France that has Fedora 14 installed.
-operatingSystem = "f14"
-node3 = create_node(ec, username, pl_user, pl_password, country=country,
-                operatingSystem=operatingSystem)
+# Third node will be a node in the selected country and with the selected
+# fedora OS
+node3 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, 
+        country = country,
+        operatingSystem = os)
 
 # Forth node will have at least 50% of CPU available
 minCpu=50
-node4 = create_node(ec, username, pl_user, pl_password, minCpu=minCpu)
+node4 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, 
+        minCpu = minCpu)
 
 # Fifth node can be any node, constrains are not important.
-node5 = create_node(ec, username, pl_user, pl_password)
+node5 = create_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password)
 
 # Register the applications to run in the nodes, in this case just ping to the 
 # first node:
-apps_per_node = dict()
 apps = []
 for node in [node2, node3, node4, node5]:
-    command = "ping -c5 %s > ping%s.txt" % (hostname, node)
-    app = add_app(ec, command, node)
-    apps_per_node[node] = app
+    command = "ping -c5 %s" % hostname
+    trace_name = "%s.ping" % hostname
+    app = add_app(ec, command, node, newname = trace_name)
     apps.append(app)
 
 # Register conditions
@@ -125,7 +163,7 @@ for node in [node2, node3, node4, node5]:
 # before the rest of the nodes. This assures that no other resource will use the
 # identified node even if the constraints matchs. 
 # In this example node2, node3, node4 and node5, are deployed after node1 is 
-# provisioned. node1 must be the node planetlab2.utt.fr, meanwhile node2, node3,
+# provisioned. node1 must be the node hostname, meanwhile node2, node3,
 # node4 and node5 just need to fulfill certain constraints.
 # Applications are always deployed after nodes, so no need to register conditions
 # for the apps in this example.
@@ -141,19 +179,7 @@ ec.deploy()
 # Wait until the applications are finish to retrive the traces:
 ec.wait_finished(apps)
 
-traces = dict() 
-for node, app in apps_per_node.iteritems():
-    ping_string = "ping%s.txt" % node
-    trace = ec.trace(app, ping_string)
-    traces[node]= trace
-
-# Choose a directory to store the traces locally, change to a convenient path for you:
-directory = "examples/planetlab/"
-for node, trace in traces.iteritems():
-    trace_file = directory + "ping%s.txt" % node
-    f = open(trace_file, "w")
-    f.write(trace)
-    f.close()
+print "Results stored at", ec.exp_dir
 
 # Do the experiment controller shutdown:
 ec.shutdown()
diff --git a/examples/planetlab/select_nodes.py b/examples/planetlab/select_nodes.py
new file mode 100644 (file)
index 0000000..c95ef06
--- /dev/null
@@ -0,0 +1,103 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2014 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+
+# Example of how to run this experiment (replace with your information):
+#
+# $ cd <path-to-nepi>
+# python examples/planetlab/select_nodes.py -s <pl-slice> -u <pl-user> -p <pl-password> -k <pl-ssh-key> -c <country> -o <operating-system> -n <node-count> 
+
+
+from nepi.execution.ec import ExperimentController
+
+from optparse import OptionParser
+import os
+
+usage = ("usage: %prog -s <pl-slice> -u <pl-user> -p <pl-password> "
+    "-k <pl-ssh-key> -c <country> -o <operating-system> -n <node-count> ")
+
+parser = OptionParser(usage = usage)
+parser.add_option("-s", "--pl-slice", dest="pl_slice",
+        help="PlanetLab slicename", type="str")
+parser.add_option("-u", "--pl-user", dest="pl_user",
+        help="PlanetLab web username", type="str")
+parser.add_option("-p", "--pl-password", dest="pl_password",
+        help="PlanetLab web password", type="str")
+parser.add_option("-k", "--pl-ssh-key", dest="pl_ssh_key",
+        help="Path to private SSH key associated with the PL account",
+        type="str")
+parser.add_option("-c", "--country", dest="country",
+        help="Country for the PL hosts",
+        type="str")
+parser.add_option("-o", "--os", dest="os",
+        help="Operating system for the PL hosts",
+        type="str")
+parser.add_option("-n", "--node-count", dest="node_count",
+        help="Number of PL hosts to provision",
+        default = 2,
+        type="int")
+
+(options, args) = parser.parse_args()
+
+pl_slice = options.pl_slice
+pl_ssh_key = options.pl_ssh_key
+pl_user = options.pl_user
+pl_password = options.pl_password
+country = options.country
+os = options.os
+node_count = options.node_count
+
+def add_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, country, os):
+    node = ec.register_resource("PlanetlabNode")
+    ec.set(node, "username", pl_slice)
+    ec.set(node, "identity", pl_ssh_key)
+    ec.set(node, "pluser", pl_user)
+    ec.set(node, "plpassword", pl_password)
+
+    if country:
+        ec.set(node, "country", country)
+    if os:
+        ec.set(node, "operatingSystem", os)
+
+    ec.set(node, "cleanHome", True)
+    ec.set(node, "cleanProcesses", True)
+
+    return node
+
+## Create the experiment controller
+ec = ExperimentController(exp_id="host_select")
+
+nodes = []
+
+for i in xrange(node_count):
+    node = add_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, country, os)
+    nodes.append(node)
+
+ec.deploy()
+
+ec.wait_deployed(nodes)
+
+print "SELECTED HOSTS"
+
+for node in nodes:
+    print ec.get(node, "hostname")
+
+ec.shutdown()
+
+
index 00001d8..5685886 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,6 @@ setup(
         platforms   = "Linux, OSX",
         packages    = [
             "nepi",
-            "nepi.design",
             "nepi.execution",
             "nepi.resources",
             "nepi.resources.all",
@@ -27,7 +26,12 @@ setup(
             "nepi.resources.omf",
             "nepi.resources.planetlab",
             "nepi.resources.planetlab.openvswitch",
-            "nepi.util"],
+            "nepi.util",
+            "nepi.util.parsers",
+            "nepi.data",
+            "nepi.data.processing",
+            "nepi.data.processing.ccn",
+            "nepi.data.processing.ping"],
         package_dir = {"": "src"},
         package_data = {
             "nepi.resources.planetlab" : [ "scripts/*.py" ],
diff --git a/src/nepi/data/__init__.py b/src/nepi/data/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/data/processing/__init__.py b/src/nepi/data/processing/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/data/processing/ccn/__init__.py b/src/nepi/data/processing/ccn/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/data/processing/ccn/parser.py b/src/nepi/data/processing/ccn/parser.py
new file mode 100644 (file)
index 0000000..f6452f4
--- /dev/null
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+
+###############################################################################
+#
+#    CCNX benchmark
+#    Copyright (C) 2014 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+#
+# This library contains functions to parse (CCNx) ccnd logs.
+#
+# Results from experiments must be stored in a directory
+# named with the experiment run id.
+# ccnd logs are stored in .log files in a subdirectory per node.
+# The following diagram exemplifies the experiment result directory
+# structure (nidi is the unique identifier assigned to node i):
+#
+#    run_id
+#               \   nid1
+#                        \ nid2.log
+#               \   nid2
+#                        \ nid1.log
+#               \   nid3
+#                        \ nid3.log
+#
+
+import collections
+import functools
+import networkx
+import os
+import pickle
+import tempfile
+
+from nepi.util.timefuncs import compute_delay_ms
+from nepi.util.statfuncs import compute_mean
+import nepi.data.processing.ping.parser as ping_parser
+
+def is_control(content_name):
+    return content_name.startswith("ccnx:/%C1") or \
+            content_name.startswith("ccnx:/ccnx") or \
+            content_name.startswith("ccnx:/...")
+
+
+def parse_file(filename):
+    """ Parses message information from ccnd log files
+
+        filename: path to ccndlog file
+
+    """
+
+    faces = dict()
+    sep = " "
+
+    f = open(filename, "r")
+
+    data = []
+
+    for line in f:
+        cols =  line.strip().split(sep)
+
+        # CCN_PEEK
+        # MESSAGE interest_from
+        # 1374181938.808523 ccnd[9245]: debug.4352 interest_from 6 ccnx:/test/bunny.ts (23 bytes,sim=0CDCC1D7)
+        #
+        # MESSAGE interest_to
+        # 1374181938.812750 ccnd[9245]: debug.3502 interest_to 5 ccnx:/test/bunny.ts (39 bytes,i=2844,sim=0CDCC1D7)
+        #
+        # MESSAGE CONTENT FROM
+        # 1374181938.868682 ccnd[9245]: debug.4643 content_from 5 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
+        #
+        # MESSAGE CONTENT_TO
+        # 1374181938.868772 ccnd[9245]: debug.1619 content_to 6 ccnx:/test/bunny.ts/%FD%05%1E%85%8FVw/%00/%9E%3D%01%D9%3Cn%95%2BvZ%8
+        #
+        # 1375596708.222304 ccnd[9758]: debug.3692 interest_expiry ccnx:/test/bunny.ts/%FD%05%1E%86%B1GS/%00%0A%F7 (44 bytes,c=0:1,i=2819,sim=49FA8048)
+
+        # External face creation
+        # 1374181452.965961 ccnd[9245]: accepted datagram client id=5 (flags=0x40012) 204.85.191.10 port 9695
+
+        if line.find("accepted datagram client") > -1:
+            face_id = (cols[5]).replace("id=",'')
+            ip = cols[7] 
+            port = cols[9]
+            faces[face_id] = (ip, port)
+            continue
+
+        # 1374181452.985296 ccnd[9245]: releasing face id 4 (slot 4)
+        if line.find("releasing face id") > -1:
+            face_id = cols[5]
+            if face_id in faces:
+                del faces[face_id]
+            continue
+
+        if len(cols) < 6:
+            continue
+
+        timestamp = cols[0]
+        message_type = cols[3]
+
+        if message_type not in ["interest_from", "interest_to", "content_from", 
+                "content_to", "interest_dupnonce", "interest_expiry"]:
+            continue
+
+        face_id = cols[4] 
+        content_name = cols[5]
+
+        # Interest Nonce ? -> 412A74-0844-0008-50AA-F6EAD4
+        nonce = ""
+        if message_type in ["interest_from", "interest_to", "interest_dupnonce"]:
+            last = cols[-1]
+            if len(last.split("-")) == 5:
+                nonce = last
+
+        try:
+            size = int((cols[6]).replace('(',''))
+        except:
+            print "interest_expiry without face id!", line
+            continue
+
+        # If no external IP address was identified for this face
+        # asume it is a local face
+        peer = "localhost"
+
+        if face_id in faces:
+            peer, port = faces[face_id]
+
+        data.append((content_name, timestamp, message_type, peer, face_id, 
+            size, nonce, line))
+
+    f.close()
+
+    return data
+
+def dump_content_history(content_history):
+    f = tempfile.NamedTemporaryFile(delete=False)
+    pickle.dump(content_history, f)
+    f.close()
+    return f.name
+
+def load_content_history(fname):
+    f = open(fname, "r")
+    content_history = pickle.load(f)
+    f.close()
+
+    os.remove(fname)
+    return content_history
+
+def annotate_cn_node(graph, nid, ips2nid, data, content_history):
+    for (content_name, timestamp, message_type, peer, face_id, 
+            size, nonce, line) in data:
+
+        # Ignore control messages for the time being
+        if is_control(content_name):
+            continue
+
+        if message_type == "interest_from" and \
+                peer == "localhost":
+            graph.node[nid]["ccn_consumer"] = True
+        elif message_type == "content_from" and \
+                peer == "localhost":
+            graph.node[nid]["ccn_producer"] = True
+
+        # Ignore local messages for the time being. 
+        # They could later be used to calculate the processing times
+        # of messages.
+        if peer == "localhost":
+            continue
+
+        # remove digest
+        if message_type in ["content_from", "content_to"]:
+            content_name = "/".join(content_name.split("/")[:-1])
+           
+        if content_name not in content_history:
+            content_history[content_name] = list()
+      
+        peernid = ips2nid[peer]
+        graph.add_edge(nid, peernid)
+
+        content_history[content_name].append((timestamp, message_type, nid, 
+            peernid, nonce, size, line))
+
+def annotate_cn_graph(logs_dir, graph, parse_ping_logs = False):
+    """ Adds CCN content history for each node in the topology graph.
+
+    """
+    
+    # Make a copy of the graph to ensure integrity
+    graph = graph.copy()
+
+    ips2nid = dict()
+
+    for nid in graph.nodes():
+        ips = graph.node[nid]["ips"]
+        for ip in ips:
+            ips2nid[ip] = nid
+
+    found_files = False
+
+    # Now walk through the ccnd logs...
+    for dirpath, dnames, fnames in os.walk(logs_dir):
+        # continue if we are not at the leaf level (if there are subdirectories)
+        if dnames: 
+            continue
+        
+        # Each dirpath correspond to a different node
+        nid = os.path.basename(dirpath)
+
+        # Cast to numeric nid if necessary
+        if int(nid) in graph.nodes():
+            nid = int(nid)
+    
+        content_history = dict()
+
+        for fname in fnames:
+            if fname.endswith(".log"):
+                found_files = True
+                filename = os.path.join(dirpath, fname)
+                data = parse_file(filename)
+                annotate_cn_node(graph, nid, ips2nid, data, content_history)
+
+        # Avoid storing everything in memory, instead dump to a file
+        # and reference the file
+        fname = dump_content_history(content_history)
+        graph.node[nid]["history"] = fname
+
+    if not found_files:
+        msg = "No CCND output files were found to parse at %s " % logs_dir
+        raise RuntimeError, msg
+
+    if parse_ping_logs:
+        ping_parser.annotate_cn_graph(logs_dir, graph)
+
+    return graph
+
+def ccn_producers(graph):
+    """ Returns the nodes that are content providers """
+    return [nid for nid in graph.nodes() \
+            if graph.node[nid].get("ccn_producer")]
+
+def ccn_consumers(graph):
+    """ Returns the nodes that are content consumers """
+    return [nid for nid in graph.nodes() \
+            if graph.node[nid].get("ccn_consumer")]
+
+def process_content_history(graph):
+    """ Compute CCN message counts and aggregates content historical 
+    information in the content_names dictionary 
+    
+    """
+
+    ## Assume single source
+    source = ccn_consumers(graph)[0]
+
+    interest_expiry_count = 0
+    interest_dupnonce_count = 0
+    interest_count = 0
+    content_count = 0
+    content_names = dict()
+
+    # Collect information about exchanged messages by content name and
+    # link delay info.
+    for nid in graph.nodes():
+        # Load the data collected from the node's ccnd log
+        fname = graph.node[nid]["history"]
+        history = load_content_history(fname)
+
+        for content_name in history.keys():
+            hist = history[content_name]
+
+            for (timestamp, message_type, nid1, nid2, nonce, size, line) in hist:
+                if message_type in ["content_from", "content_to"]:
+                    # The first Interest sent will not have a version or chunk number.
+                    # The first Content sent back in reply, will end in /=00 or /%00.
+                    # Make sure to map the first Content to the first Interest.
+                    if content_name.endswith("/=00"):
+                        content_name = "/".join(content_name.split("/")[0:-2])
+
+                # Add content name to dictionary
+                if content_name not in content_names:
+                    content_names[content_name] = dict()
+                    content_names[content_name]["interest"] = dict()
+                    content_names[content_name]["content"] = list()
+
+                # Classify interests by replica
+                if message_type in ["interest_from"] and \
+                        nonce not in content_names[content_name]["interest"]:
+                    content_names[content_name]["interest"][nonce] = list()
+     
+                # Add consumer history
+                if nid == source:
+                    if message_type in ["interest_to", "content_from"]:
+                        # content name history as seen by the source
+                        if "consumer_history" not in content_names[content_name]:
+                            content_names[content_name]["consumer_history"] = list()
+
+                        content_names[content_name]["consumer_history"].append(
+                                (timestamp, message_type)) 
+
+                # Add messages per content name and cumulate totals by message type
+                if message_type == "interest_dupnonce":
+                    interest_dupnonce_count += 1
+                elif message_type == "interest_expiry":
+                    interest_expiry_count += 1
+                elif message_type == "interest_from":
+                    interest_count += 1
+                    # Append to interest history of the content name
+                    content_names[content_name]["interest"][nonce].append(
+                            (timestamp, nid2, nid1))
+                elif message_type == "content_from":
+                    content_count += 1
+                    # Append to content history of the content name
+                    content_names[content_name]["content"].append((timestamp, nid2, nid1))
+                else:
+                    continue
+            del hist
+        del history
+
+    # Compute the time elapsed between the time an interest is sent
+    # in the consumer node and when the content is received back
+    for content_name in content_names.keys():
+        # order content and interest messages by timestamp
+        content_names[content_name]["content"] = sorted(
+              content_names[content_name]["content"])
+        
+        for nonce, timestamps in content_names[content_name][
+                    "interest"].iteritems():
+              content_names[content_name]["interest"][nonce] = sorted(
+                        timestamps)
+      
+        history = sorted(content_names[content_name]["consumer_history"])
+        content_names[content_name]["consumer_history"] = history
+
+        # compute the rtt time of the message
+        rtt = None
+        waiting_content = False 
+        interest_timestamp = None
+        content_timestamp = None
+        
+        for (timestamp, message_type) in history:
+            if not waiting_content and message_type == "interest_to":
+                waiting_content = True
+                interest_timestamp = timestamp
+                continue
+
+            if waiting_content and message_type == "content_from":
+                content_timestamp = timestamp
+                break
+    
+        # If we can't determine who sent the interest, discard it
+        rtt = -1
+        if interest_timestamp and content_timestamp:
+            rtt = compute_delay_ms(content_timestamp, interest_timestamp)
+
+        content_names[content_name]["rtt"] = rtt
+        content_names[content_name]["lapse"] = (interest_timestamp, content_timestamp)
+
+    return (graph,
+        content_names,
+        interest_expiry_count,
+        interest_dupnonce_count,
+        interest_count,
+        content_count)
+
+def process_content_history_logs(logs_dir, graph, parse_ping_logs = False):
+    """ Parse CCN logs and aggregate content history information in graph.
+    Returns annotated graph and message countn and content names history.
+
+    """
+    ## Process logs and analyse data
+    try:
+        graph = annotate_cn_graph(logs_dir, graph, 
+                parse_ping_logs = parse_ping_logs)
+    except:
+        print "Skipping: Error parsing ccnd logs", logs_dir
+        raise
+
+    source = ccn_consumers(graph)[0]
+    target = ccn_producers(graph)[0]
+
+    # Process the data from the ccnd logs, but do not re compute
+    # the link delay. 
+    try:
+        (graph,
+        content_names,
+        interest_expiry_count,
+        interest_dupnonce_count,
+        interest_count,
+        content_count) = process_content_history(graph)
+    except:
+        print "Skipping: Error processing ccn data", logs_dir
+        raise
+
+    return (graph,
+            content_names,
+            interest_expiry_count,
+            interest_dupnonce_count,
+            interest_count,
+            content_count) 
diff --git a/src/nepi/data/processing/ping/__init__.py b/src/nepi/data/processing/ping/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/data/processing/ping/parser.py b/src/nepi/data/processing/ping/parser.py
new file mode 100644 (file)
index 0000000..c5745a1
--- /dev/null
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+
+###############################################################################
+#
+#    CCNX benchmark
+#    Copyright (C) 2014 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+#
+###############################################################################
+
+#
+# This library contains functions to parse log files generated using ping. 
+#
+
+import collections
+import re
+import os
+
+# RE to match line starting "traceroute to"
+_rre = re.compile("\d+ bytes from ((?P<hostname>[^\s]+) )?\(?(?P<ip>[^\s]+)\)??: icmp_.eq=\d+ ttl=\d+ time=(?P<time>[^\s]+) ms")
+
+def parse_file(filename):
+    """
+        filename: path to traceroute file
+
+    """
+
+    f = open(filename, "r")
+
+    # Traceroute info
+    target_ip = None
+    target_hostname = None
+   
+    data = []
+
+    for line in f:
+        # match traceroute to ...
+        m = re.match(_rre, line)
+        if not m:
+            continue
+
+        target_ip = m.groupdict()["ip"]
+        # FIX THIS: Make sure the regular expression does not inlcude 
+        # the ')' in the ip group 
+        target_ip = target_ip.replace(")","")
+        target_hostname = m.groupdict()["hostname"]
+        time = m.groupdict()["time"]
+        data.append((target_ip, target_hostname, time))
+
+    f.close()
+
+    return data
+
+def annotate_cn_node(graph, nid1, ips2nid, data):
+    for (target_ip, target_hostname, time) in data:
+        nid2 = ips2nid[target_ip]
+
+        if "delays" not in graph.edge[nid1][nid2]:
+            graph.edge[nid1][nid2]["delays"] = []
+
+        time = float(time.replace("ms", "").replace(" ",""))
+
+        graph.edge[nid1][nid2]["delays"].append(time)
+
+def annotate_cn_graph(logs_dir, graph): 
+    """ Add delay inormation to graph using data collected using
+    ping.
+
+    """
+    ips2nid = dict()
+
+    for nid in graph.nodes():
+        ips = graph.node[nid]["ips"]
+        for ip in ips:
+            ips2nid[ip] = nid
+
+    # Walk through the ping logs...
+    found_files = False
+
+    for dirpath, dnames, fnames in os.walk(logs_dir):
+        # continue if we are not at the leaf level (if there are subdirectories)
+        if dnames: 
+            continue
+        
+        # Each dirpath correspond to a different host
+        nid = os.path.basename(dirpath)
+    
+        for fname in fnames:
+            if fname.endswith(".ping"):
+                found_files = True
+                filename = os.path.join(dirpath, fname)
+                data = parse_file(filename)
+                annotate_cn_node(graph, nid, ips2nid, data)
+
+    if not found_files:
+        msg = "No PING output files were found to parse at %s " % logs_dir 
+        raise RuntimeError, msg
+
+    # Take as weight the most frequent value
+    for nid1, nid2 in graph.edges():
+        delays = collections.Counter(graph.edge[nid1][nid2]["delays"])
+        weight = delays.most_common(1)[0][0]
+        del graph.edge[nid1][nid2]["delays"]
+        graph.edge[nid1][nid2]["weight"] = weight
+
+    return graph
+
+
diff --git a/src/nepi/design/__init__.py b/src/nepi/design/__init__.py
deleted file mode 100644 (file)
index 013e4b7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#!/usr/bin/python
diff --git a/src/nepi/design/box.py b/src/nepi/design/box.py
deleted file mode 100644 (file)
index 2da0710..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-from nepi.util import guid
-
-guid_gen = guid.GuidGenerator()
-
-class Attributes(object):
-    def __init__(self):
-        super(Attributes, self).__init__()
-        self._attributes = dict()
-
-    def __getattr__(self, name):
-        try:
-            return self._attributes[name]
-        except:
-            return super(Attributes, self).__getattribute__(name)
-
-    def __setattr__(self, name, value):
-        try:
-            if value == None:
-                old = self._attributes[name]
-                del self._attributes[name]
-                return old
-
-            self._attributes[name] = value
-            return value
-        except:
-            return super(Attributes, self).__setattr__(name, value)
-
-class Connections(object):
-    def __init__(self):
-        super(Connections, self).__init__()
-        self._connections = set()
-
-    def __getattr__(self, guid_or_label):
-        try:
-            for b in self._connections:
-                if guid_or_label in [b.guid, b.label]:
-                    return b
-        except:
-            return super(Connections, self).__getattribute__(guid_or_label)
-
-class Box(object):
-    def __init__(self, label = None, guid = None):
-        super(Box, self).__init__()
-        self._guid = guid_gen.next(guid)
-        self._a = Attributes()
-        self._c = Connections()
-        self._tags = set()
-        self.label = label or self._guid
-
-        # Graphical information to draw box
-        self.x = 0
-        self.y = 0
-        self.width = 4
-        self.height = 4
-
-    @property
-    def tags(self):
-        return self._tags
-
-    @property
-    def attributes(self):
-        return self._a._attributes.keys()
-
-    @property
-    def a(self):
-        return self._a
-
-    @property
-    def c(self):
-        return self._c
-
-    @property
-    def guid(self):
-        return self._guid
-
-    @property
-    def connections(self):
-        return set(self._c._connections)
-
-    def tadd(self, name):
-        self._tags.add(name)
-
-    def tdel(self, name):
-        self._tags.remove(name)
-
-    def thas(self, name):
-        return name in self._tags
-
-    def connect(self, box, cascade = True):
-        self._c._connections.add(box)
-        if cascade:
-            box.connect(self, cascade = False)
-
-    def disconnect(self, box, cascade = True):
-        self._c._connections.remove(box)
-        if cascade:
-            box.disconnect(self, cascade = False)
-
-    def is_connected(self, box):
-        return box in self.connections
-
index fa2b104..bf0a853 100644 (file)
@@ -55,6 +55,7 @@ class Flags:
     # Attribute global is set to all resources of rtype
     Global  = 1 << 7 # 128
 
+
 class Attribute(object):
     """
     .. class:: Class Args :
@@ -190,6 +191,7 @@ class Attribute(object):
         adequate validation"""
         return True
 
+    @property
     def has_changed(self):
         """ Returns true if the value has changed from the default """
         return self.value != self.default
index 257742e..2e573dd 100644 (file)
@@ -24,6 +24,9 @@ from nepi.execution.resource import ResourceFactory, ResourceAction, \
         ResourceState, ResourceState2str
 from nepi.execution.scheduler import HeapScheduler, Task, TaskStatus
 from nepi.execution.trace import TraceAttr
+from nepi.util.serializer import ECSerializer, SFormats
+from nepi.util.plotter import ECPlotter, PFormats
+from nepi.util.netgraph import NetGraph, TopologyType 
 
 # TODO: use multiprocessing instead of threading
 # TODO: Allow to reconnect to a running experiment instance! (reconnect mode vs deploy mode)
@@ -32,6 +35,7 @@ import functools
 import logging
 import os
 import sys
+import tempfile
 import time
 import threading
 import weakref
@@ -98,7 +102,7 @@ class ExperimentController(object):
     .. note::
 
     An experiment, or scenario, is defined by a concrete set of resources,
-    behavior, configuration and interconnection of those resources. 
+    and the behavior, configuration and interconnection of those resources. 
     The Experiment Description (ED) is a detailed representation of a
     single experiment. It contains all the necessary information to 
     allow repeating the experiment. NEPI allows to describe
@@ -113,7 +117,7 @@ class ExperimentController(object):
     recreated (and re-run) by instantiating an EC and recreating 
     the same experiment description. 
 
-    In NEPI, an experiment is represented as a graph of interconnected
+    An experiment is represented as a graph of interconnected
     resources. A resource is a generic concept in the sense that any
     component taking part of an experiment, whether physical of
     virtual, is considered a resource. A resources could be a host, 
@@ -123,10 +127,9 @@ class ExperimentController(object):
     single resource. ResourceManagers are specific to a resource
     type (i.e. An RM to control a Linux application will not be
     the same as the RM used to control a ns-3 simulation).
-    To support a new type of resource in NEPI, a new RM must be 
-    implemented. NEPI already provides a variety of
-    RMs to control basic resources, and new can be extended from
-    the existing ones.
+    To support a new type of resource, a new RM must be implemented. 
+    NEPI already provides a variety of RMs to control basic resources, 
+    and new can be extended from the existing ones.
 
     Through the EC interface the user can create ResourceManagers (RMs),
     configure them and interconnect them, to describe an experiment.
@@ -148,10 +151,40 @@ class ExperimentController(object):
     exp_id, which can be re-used in different ExperimentController,
     and the run_id, which is unique to one ExperimentController instance, and
     is automatically generated by NEPI.
-        
+   
     """
 
-    def __init__(self, exp_id = None): 
+    @classmethod
+    def load(cls, filepath, format = SFormats.XML):
+        serializer = ECSerializer()
+        ec = serializer.load(filepath)
+        return ec
+
+    def __init__(self, exp_id = None, local_dir = None, persist = False,
+            add_node_callback = None, add_edge_callback = None, **kwargs):
+        """ ExperimentController entity to model an execute a network 
+        experiment.
+        
+        :param exp_id: Human readable name to identify the experiment
+        :type exp_id: str
+
+        :param local_dir: Path to local directory where to store experiment
+            related files
+        :type local_dir: str
+
+        :param persist: Save an XML description of the experiment after 
+        completion at local_dir
+        :type persist: bool
+
+        :param add_node_callback: Callback to invoke for node instantiation
+        when automatic topology creation mode is used 
+        :type add_node_callback: function
+
+        :param add_edge_callback: Callback to invoke for edge instantiation 
+        when automatic topology creation mode is used 
+        :type add_edge_callback: function
+
+        """
         super(ExperimentController, self).__init__()
 
         # Logging
@@ -169,6 +202,17 @@ class ExperimentController(object):
         # resources used, etc)
         self._exp_id = exp_id or "exp-%s" % os.urandom(8).encode('hex')
 
+        # Local path where to store experiment related files (results, etc)
+        if not local_dir:
+            local_dir = tempfile.gettempdir() # /tmp
+
+        self._local_dir = local_dir
+        self._exp_dir = os.path.join(local_dir, self.exp_id)
+        self._run_dir = os.path.join(self.exp_dir, self.run_id)
+
+        # If True persist the experiment controller in XML format, after completion
+        self._persist = persist
+
         # generator of globally unique ids
         self._guid_generator = guid.GuidGenerator()
         
@@ -198,10 +242,16 @@ class ExperimentController(object):
         # EC state
         self._state = ECState.RUNNING
 
+        # Automatically construct experiment description 
+        self._netgraph = None
+        if add_node_callback or add_edge_callback or kwargs.get("topology"):
+            self._build_from_netgraph(add_node_callback, add_edge_callback, 
+                    **kwargs)
+
         # The runner is a pool of threads used to parallelize 
         # execution of tasks
-        nthreads = int(os.environ.get("NEPI_NTHREADS", "20"))
-        self._runner = ParallelRun(maxthreads = nthreads)
+        self._nthreads = 20
+        self._runner = None
 
         # Event processing thread
         self._cond = threading.Condition()
@@ -246,6 +296,52 @@ class ExperimentController(object):
         """
         return self._run_id
 
+    @property
+    def nthreads(self):
+        """ Returns the number of processing nthreads used
+
+        """
+        return self._nthreads
+
+    @property
+    def local_dir(self):
+        """ Root local directory for experiment files
+
+        """
+        return self._local_dir
+
+    @property
+    def exp_dir(self):
+        """ Local directory to store results and other files related to the 
+        experiment.
+
+        """
+        return self._exp_dir
+
+    @property
+    def run_dir(self):
+        """ Local directory to store results and other files related to the 
+        experiment run.
+
+        """
+        return self._run_dir
+
+    @property
+    def persist(self):
+        """ If True, persists the ExperimentController to XML format upon 
+        experiment completion
+
+        """
+        return self._persist
+
+    @property
+    def netgraph(self):
+        """ Return NetGraph instance if experiment description was automatically 
+        generated
+
+        """
+        return self._netgraph
+
     @property
     def abort(self):
         """ Returns True if the experiment has failed and should be interrupted,
@@ -365,7 +461,31 @@ class ExperimentController(object):
                 guids.append(guid)
 
                 time.sleep(0.5)
-  
+
+    def plot(self, dirpath = None, format= PFormats.FIGURE, show = False):
+        plotter = ECPlotter()
+        fpath = plotter.plot(self, dirpath = dirpath, format= format, 
+                show = show)
+        return fpath
+
+    def serialize(self, format = SFormats.XML):
+        serializer = ECSerializer()
+        sec = serializer.load(self, format = format)
+        return sec
+
+    def save(self, dirpath = None, format = SFormats.XML):
+        if dirpath == None:
+            dirpath = self.run_dir
+
+        try:
+            os.makedirs(dirpath)
+        except OSError:
+            pass
+
+        serializer = ECSerializer()
+        path = serializer.save(self, dirpath, format = format)
+        return path
+
     def get_task(self, tid):
         """ Returns a task by its id
 
@@ -380,7 +500,7 @@ class ExperimentController(object):
     def get_resource(self, guid):
         """ Returns a registered ResourceManager by its guid
 
-            :param guid: Id of the task
+            :param guid: Id of the resource
             :type guid: int
             
             :rtype: ResourceManager
@@ -389,21 +509,51 @@ class ExperimentController(object):
         rm = self._resources.get(guid)
         return rm
 
+    def get_resources_by_type(self, rtype):
+        """ Returns the ResourceManager objects of type rtype
+
+            :param rtype: Resource type
+            :type rtype: string
+            
+            :rtype: list of ResourceManagers
+            
+        """
+        rms = []
+        for guid, rm in self._resources.iteritems():
+            if rm.get_rtype() == rtype: 
+                rms.append(rm)
+        return rms
+
     def remove_resource(self, guid):
         del self._resources[guid]
 
     @property
     def resources(self):
-        """ Returns the set() of guids of all the ResourceManager
+        """ Returns the guids of all ResourceManagers 
 
             :return: Set of all RM guids
-            :rtype: set
+            :rtype: list
 
         """
         keys = self._resources.keys()
 
         return keys
 
+    def filter_resources(self, rtype):
+        """ Returns the guids of all ResourceManagers of type rtype
+
+            :param rtype: Resource type
+            :type rtype: string
+            
+            :rtype: list of guids
+            
+        """
+        rms = []
+        for guid, rm in self._resources.iteritems():
+            if rm.get_rtype() == rtype: 
+                rms.append(rm.guid)
+        return rms
+
     def register_resource(self, rtype, guid = None):
         """ Registers a new ResourceManager of type 'rtype' in the experiment
         
@@ -895,6 +1045,9 @@ class ExperimentController(object):
 
         self.wait_released(guids)
 
+        if self.persist:
+            self.save()
+
         for guid in guids:
             if self.get(guid, "hardRelease"):
                 self.remove_resource(guid)
@@ -1000,6 +1153,8 @@ class ExperimentController(object):
 
         """
 
+        self._nthreads = int(os.environ.get("NEPI_NTHREADS", str(self._nthreads)))
+        self._runner = ParallelRun(maxthreads = self.nthreads)
         self._runner.start()
 
         while not self._stop:
@@ -1076,3 +1231,19 @@ class ExperimentController(object):
         self._cond.notify()
         self._cond.release()
 
+    def _build_from_netgraph(self, add_node_callback, add_edge_callback, 
+            **kwargs):
+        """ Automates experiment description using a NetGraph instance.
+        """
+        self._netgraph = NetGraph(**kwargs)
+
+        if add_node_callback:
+            ### Add resources to the EC
+            for nid in self.netgraph.nodes():
+                add_node_callback(self, nid)
+
+        if add_edge_callback:
+            #### Add connections between resources
+            for nid1, nid2 in self.netgraph.edges():
+                add_edge_callback(self, nid1, nid2)
+
index e82ced0..0f75e9a 100644 (file)
@@ -31,7 +31,7 @@ import sys
 import threading
 import weakref
 
-reschedule_delay = "1s"
+reschedule_delay = "0.5s"
 
 class ResourceAction:
     """ Action that a user can order to a Resource Manager
@@ -113,11 +113,13 @@ def failtrap(func):
         try:
             return func(self, *args, **kwargs)
         except:
+            self.fail()
+            
             import traceback
             err = traceback.format_exc()
-            self.error(err)
-            self.debug("SETTING guid %d to state FAILED" % self.guid)
-            self.fail()
+            logger = Logger(self._rtype)
+            logger.error(err)
+            logger.error("SETTING guid %d to state FAILED" % self.guid)
             raise
     
     return wrapped
@@ -560,11 +562,14 @@ class ResourceManager(Logger):
             try:
                 self.do_release()
             except:
+                self.set_released()
+
                 import traceback
                 err = traceback.format_exc()
-                self.error(err)
-
-                self.set_released()
+                msg = " %s guid %d ----- FAILED TO RELEASE ----- \n %s " % (
+                        self._rtype, self.guid, err)
+                logger = Logger(self._rtype)
+                logger.debug(msg)
 
     def fail(self):
         """ Sets the RM to state FAILED.
@@ -615,7 +620,7 @@ class ResourceManager(Logger):
         :rtype: str
         """
         attr = self._attrs[name]
-        return attr.has_changed()
+        return attr.has_changed
 
     def has_flag(self, name, flag):
         """ Returns true if the attribute has the flag 'flag'
@@ -749,6 +754,18 @@ class ResourceManager(Logger):
                 connected.append(rm)
         return connected
 
+    def is_rm_instance(self, rtype):
+        """ Returns True if the RM is instance of 'rtype'
+
+        :param rtype: Type of the RM we look for
+        :type rtype: str
+        :return: True|False
+        """
+        rclass = ResourceFactory.get_resource_type(rtype)
+        if isinstance(self, rclass):
+            return True
+        return False
+
     @failtrap
     def _needs_reschedule(self, group, state, time):
         """ Internal method that verify if 'time' has elapsed since 
@@ -1038,12 +1055,18 @@ class ResourceManager(Logger):
     def set_released(self, time = None):
         """ Mark ResourceManager as REALEASED """
         self.set_state(ResourceState.RELEASED, "_release_time", time)
-        self.debug("----- RELEASED ---- ")
+
+        msg = " %s guid %d ----- RELEASED ----- " % (self._rtype, self.guid)
+        logger = Logger(self._rtype)
+        logger.debug(msg)
 
     def set_failed(self, time = None):
         """ Mark ResourceManager as FAILED """
         self.set_state(ResourceState.FAILED, "_failed_time", time)
-        self.debug("----- FAILED ---- ")
+
+        msg = " %s guid %d ----- FAILED ----- " % (self._rtype, self.guid)
+        logger = Logger(self._rtype)
+        logger.debug(msg)
 
     def set_discovered(self, time = None):
         """ Mark ResourceManager as DISCOVERED """
diff --git a/src/nepi/execution/runner.py b/src/nepi/execution/runner.py
new file mode 100644 (file)
index 0000000..3a09ff4
--- /dev/null
@@ -0,0 +1,155 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController, ECState
+
+import math
+import numpy
+import os
+import time
+
+class ExperimentRunner(object):
+    """ The ExperimentRunner entity is reponsible of
+    re-running an experiment described by an ExperimentController 
+    multiple time.
+
+    """
+    def __init__(self):
+        super(ExperimentRunner, self).__init__()
+    
+    def run(self, ec, min_runs = 1, max_runs = -1, wait_time = 0, 
+            wait_guids = [], compute_metric_callback = None, 
+            evaluate_convergence_callback = None ):
+        """ Re-runs a same experiment multiple times
+
+        :param ec: Experiment description of experiment to run
+        :type ec: ExperimentController
+
+        :param min_runs: Minimum number of repetitions for experiment
+        :type min_runs: int
+
+        :param max_runs: Maximum number of repetitions for experiment
+        :type max_runs: int
+
+        :param wait_time: Time to wait in seconds between invoking
+            ec.deploy() and ec.release()
+        :type wait_time: float
+
+        :param wait_guids: List of guids to pass to ec.wait_finished
+            after invoking ec.deploy()
+        :type wait_guids: list 
+
+        :param compute_metric_callback: function to invoke after each 
+            experiment run, to compute an experiment metric. 
+            It will be invoked with the ec and the run count as arguments,
+            and it must return a numeric value for the computed metric:
+
+                metric = compute_metric_callback(ec, run)
+            
+        :type compute_metric_callback: function 
+
+        :param evaluate_convergence_callback: function to evaluate whether the 
+            collected metric samples have converged and the experiment runner
+            can stop. It will be invoked with the ec, the run count and the
+            list of collected metric samples as argument, and it must return
+            either True or False:
+
+                stop = evaluate_convergence_callback(ec, run, metrics)
+
+            If stop is True, then the runner will exit.
+            
+        :type evaluate_convergence_callback: function 
+
+        """
+
+        if (not max_runs or max_runs < 0) and not compute_metric_callback:
+            msg = "Undefined STOP condition, set stop_callback or max_runs"
+            raise RuntimeError, msg
+
+        if compute_metric_callback and not evaluate_convergence_callback:
+            evaluate_convergence_callback = self.evaluate_normal_convergence
+            ec.logger.info(" Treating data as normal to evaluate convergence. "
+                    "Experiment will stop when the standard error with 95% "
+                    "confidence interval is >= 5% of the mean of the collected samples ")
+        
+        # Force persistence of experiment controller
+        ec._persist = True
+
+        filepath = ec.save(dirpath = ec.exp_dir)
+
+        samples = []
+        run = 0
+        stop = False
+
+        while not stop: 
+            run += 1
+
+            ec = self.run_experiment(filepath, wait_time, wait_guids)
+            
+            ec.logger.info(" RUN %d \n" % run)
+
+            if compute_metric_callback:
+                metric = compute_metric_callback(ec, run)
+                if metric is not None:
+                    samples.append(metric)
+
+                    if run >= min_runs and evaluate_convergence_callback:
+                        if evaluate_convergence_callback(ec, run, samples):
+                            stop = True
+
+            if run >= min_runs and max_runs > -1 and run >= max_runs :
+                stop = True
+
+            del ec
+
+        return run
+
+    def evaluate_normal_convergence(self, ec, run, samples):
+        if len(samples) == 0:
+            msg = "0 samples collected"
+            raise RuntimeError, msg
+        
+        x = numpy.array(samples)
+        n = len(samples)
+        std = x.std()
+        se = std / math.sqrt(n)
+        m = x.mean()
+        se95 = se * 2
+        
+        ec.logger.info(" RUN %d - SAMPLES %d MEAN %.2f STD %.2f SE95%% %.2f \n" % (
+            run, n, m, std, se95 ) )
+
+        return m * 0.05 >= se95
+
+    def run_experiment(self, filepath, wait_time, wait_guids): 
+        ec = ExperimentController.load(filepath)
+
+        ec.deploy()
+    
+        ec.wait_finished(wait_guids)
+        time.sleep(wait_time)
+
+        ec.release()
+
+        if ec.state == ECState.FAILED:
+            raise RuntimeError, "Experiment failed"
+
+        return ec
+
+
index bb8c1c8..6742fc8 100644 (file)
@@ -28,7 +28,7 @@ import tempfile
 
 @clsinit_copy
 class Collector(ResourceManager):
-    """ The collector is reponsible of collecting traces
+    """ The collector entity is reponsible of collecting traces
     of a same type associated to RMs into a local directory.
 
     .. class:: Class Args :
@@ -51,18 +51,6 @@ class Collector(ResourceManager):
                 "Name of the trace to be collected", 
                 flags = Flags.Design)
 
-        store_dir = Attribute("storeDir", 
-                "Path to local directory to store trace results", 
-                default = tempfile.gettempdir(),
-                flags = Flags.Design)
-
-        use_run_id = Attribute("useRunId", 
-                "If set to True stores traces into a sub directory named after "
-                "the RUN ID assigned by the EC", 
-                type = Types.Bool,
-                default = False,
-                flags = Flags.Design)
-
         sub_dir = Attribute("subDir", 
                 "Sub directory to collect traces into", 
                 flags = Flags.Design)
@@ -72,10 +60,8 @@ class Collector(ResourceManager):
                 flags = Flags.Design)
 
         cls._register_attribute(trace_name)
-        cls._register_attribute(store_dir)
         cls._register_attribute(sub_dir)
         cls._register_attribute(rename)
-        cls._register_attribute(use_run_id)
 
     def __init__(self, ec, guid):
         super(Collector, self).__init__(ec, guid)
@@ -94,17 +80,14 @@ class Collector(ResourceManager):
             self.error(msg)
             raise RuntimeError, msg
 
-        self._store_path = self.get("storeDir")
-
-        if self.get("useRunId"):
-            self._store_path = os.path.join(self._store_path, self.ec.run_id)
+        self._store_path = self.ec.run_dir
 
         subdir = self.get("subDir")
         if subdir:
-            self._store_path = os.path.join(self._store_path, subdir)
+            self._store_path = os.path.join(self.store_path, subdir)
         
         msg = "Creating local directory at %s to store %s traces " % (
-            self._store_path, trace_name)
+                self.store_path, trace_name)
         self.info(msg)
 
         try:
@@ -130,10 +113,11 @@ class Collector(ResourceManager):
 
         rms = self.get_connected()
         for rm in rms:
-            result = self.ec.trace(rm.guid, trace_name)
             fpath = os.path.join(self.store_path, "%d.%s" % (rm.guid, 
-                rename))
+                 rename))
+
             try:
+                result = self.ec.trace(rm.guid, trace_name)
                 f = open(fpath, "w")
                 f.write(result)
                 f.close()
index 86cf9c9..ca9772d 100644 (file)
@@ -173,7 +173,9 @@ class LinuxApplication(ResourceManager):
         super(LinuxApplication, self).__init__(ec, guid)
         self._pid = None
         self._ppid = None
+        self._node = None
         self._home = "app-%s" % self.guid
+
         # whether the command should run in foreground attached
         # to a terminal
         self._in_foreground = False
@@ -194,9 +196,16 @@ class LinuxApplication(ResourceManager):
 
     @property
     def node(self):
-        node = self.get_connected(LinuxNode.get_rtype())
-        if node: return node[0]
-        return None
+        if not self._node:
+            node = self.get_connected(LinuxNode.get_rtype())
+            if not node: 
+                msg = "Application %s guid %d NOT connected to Node" % (
+                        self._rtype, self.guid)
+                raise RuntimeError, msg
+
+            self._node = node[0]
+
+        return self._node
 
     @property
     def app_home(self):
@@ -639,7 +648,7 @@ class LinuxApplication(ResourceManager):
                     if (proc and proc.poll()) or err:
                         msg = " Failed to STOP command '%s' " % self.get("command")
                         self.error(msg, out, err)
-        
+            
             super(LinuxApplication, self).do_stop()
 
     def do_release(self):
@@ -711,6 +720,7 @@ class LinuxApplication(ResourceManager):
     def execute_command(self, command, 
             env = None,
             sudo = False,
+            tty = False,
             forward_x11 = False,
             blocking = False):
 
@@ -722,6 +732,7 @@ class LinuxApplication(ResourceManager):
 
         return self.node.execute(command,
                 sudo = sudo,
+                tty = tty,
                 forward_x11 = forward_x11,
                 blocking = blocking)
 
index ffaee50..1fc8979 100644 (file)
@@ -282,7 +282,7 @@ class LinuxCCND(LinuxApplication):
 
     @property
     def _sources(self):
-        return "http://www.ccnx.org/releases/ccnx-0.8.1.tar.gz"
+        return "http://www.ccnx.org/releases/ccnx-0.8.2.tar.gz"
 
     @property
     def _build(self):
index 8bf9bd1..4b7e9c4 100644 (file)
@@ -85,14 +85,37 @@ class LinuxFIBEntry(LinuxApplication):
         super(LinuxFIBEntry, self).__init__(ec, guid)
         self._home = "fib-%s" % self.guid
         self._ping = None
-        self._mtr = None
         self._traceroute = None
+        self._ccnd = None
 
     @property
     def ccnd(self):
-        ccnd = self.get_connected(LinuxCCND.get_rtype())
-        if ccnd: return ccnd[0]
-        return None
+        if not self._ccnd:
+            ccnd = self.get_connected(LinuxCCND.get_rtype())
+            if ccnd: 
+                self._ccnd = ccnd[0]
+            
+        return self._ccnd
+
+    @property
+    def ping(self):
+        if not self._ping:
+            from nepi.resources.linux.ping import LinuxPing
+            ping = self.get_connected(LinuxPing.get_rtype())
+            if ping: 
+                self._ping = ping[0]
+            
+        return self._ping
+
+    @property
+    def traceroute(self):
+        if not self._traceroute:
+            from nepi.resources.linux.traceroute import LinuxTraceroute
+            traceroute = self.get_connected(LinuxTraceroute.get_rtype())
+            if traceroute: 
+                self._traceroute = traceroute[0]
+            
+        return self._traceroute
 
     @property
     def node(self):
@@ -101,11 +124,14 @@ class LinuxFIBEntry(LinuxApplication):
 
     def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
         if name == "ping":
-            return self.ec.trace(self._ping, "stdout", attr, block, offset)
-        if name == "mtr":
-            return self.ec.trace(self._mtr, "stdout", attr, block, offset)
+            if not self.ping:
+                return None
+            return self.ec.trace(self.ping.guid, "stdout", attr, block, offset)
+
         if name == "traceroute":
-            return self.ec.trace(self._traceroute, "stdout", attr, block, offset)
+            if not self.traceroute:
+                return None
+            return self.ec.trace(self.traceroute.guid, "stdout", attr, block, offset)
 
         return super(LinuxFIBEntry, self).trace(name, attr, block, offset)
     
@@ -159,38 +185,28 @@ class LinuxFIBEntry(LinuxApplication):
             raise RuntimeError, msg
         
     def configure(self):
-        if self.trace_enabled("ping"):
+        if self.trace_enabled("ping") and not self.ping:
             self.info("Configuring PING trace")
-            self._ping = self.ec.register_resource("LinuxPing")
-            self.ec.set(self._ping, "printTimestamp", True)
-            self.ec.set(self._ping, "target", self.get("host"))
-            self.ec.set(self._ping, "earlyStart", True)
-            self.ec.register_connection(self._ping, self.node.guid)
+            ping = self.ec.register_resource("LinuxPing")
+            self.ec.set(ping, "printTimestamp", True)
+            self.ec.set(ping, "target", self.get("host"))
+            self.ec.set(ping, "earlyStart", True)
+            self.ec.register_connection(ping, self.node.guid)
+            self.ec.register_connection(ping, self.guid)
             # schedule ping deploy
-            self.ec.deploy(guids=[self._ping], group = self.deployment_group)
-
-        if self.trace_enabled("mtr"):
-            self.info("Configuring MTR trace")
-            self._mtr = self.ec.register_resource("LinuxMtr")
-            self.ec.set(self._mtr, "noDns", True)
-            self.ec.set(self._mtr, "printTimestamp", True)
-            self.ec.set(self._mtr, "continuous", True)
-            self.ec.set(self._mtr, "target", self.get("host"))
-            self.ec.set(self._mtr, "earlyStart", True)
-            self.ec.register_connection(self._mtr, self.node.guid)
-            # schedule mtr deploy
-            self.ec.deploy(guids=[self._mtr], group = self.deployment_group)
+            self.ec.deploy(guids=[ping], group = self.deployment_group)
 
-        if self.trace_enabled("traceroute"):
+        if self.trace_enabled("traceroute") and not self.traceroute:
             self.info("Configuring TRACEROUTE trace")
-            self._traceroute = self.ec.register_resource("LinuxTraceroute")
-            self.ec.set(self._traceroute, "printTimestamp", True)
-            self.ec.set(self._traceroute, "continuous", True)
-            self.ec.set(self._traceroute, "target", self.get("host"))
-            self.ec.set(self._traceroute, "earlyStart", True)
-            self.ec.register_connection(self._traceroute, self.node.guid)
+            traceroute = self.ec.register_resource("LinuxTraceroute")
+            self.ec.set(traceroute, "printTimestamp", True)
+            self.ec.set(traceroute, "continuous", True)
+            self.ec.set(traceroute, "target", self.get("host"))
+            self.ec.set(traceroute, "earlyStart", True)
+            self.ec.register_connection(traceroute, self.node.guid)
+            self.ec.register_connection(traceroute, self.guid)
             # schedule mtr deploy
-            self.ec.deploy(guids=[self._traceroute], group = self.deployment_group)
+            self.ec.deploy(guids=[traceroute], group = self.deployment_group)
 
     def do_start(self):
         if self.state == ResourceState.READY:
index 62f94cf..d69ac30 100644 (file)
@@ -47,7 +47,7 @@ class LinuxGRETunnel(LinuxTunnel):
         connected = []
         for guid in self.connections:
             rm = self.ec.get_resource(guid)
-            if hasattr(rm, "gre_connect_command"):
+            if hasattr(rm, "gre_connect"):
                 connected.append(rm)
         return connected
 
@@ -55,48 +55,11 @@ class LinuxGRETunnel(LinuxTunnel):
         # Return the command to execute to initiate the connection to the
         # other endpoint
         connection_run_home = self.run_home(endpoint)
-        gre_connect_command = endpoint.gre_connect_command(
-                remote_endpoint, connection_run_home)
-
-        # upload command to connect.sh script
-        shfile = os.path.join(self.app_home(endpoint), "gre-connect.sh")
-        endpoint.node.upload(gre_connect_command,
-                shfile,
-                text = True, 
-                overwrite = False)
-
-        # invoke connect script
-        cmd = "bash %s" % shfile
-        (out, err), proc = endpoint.node.run(cmd, self.run_home(endpoint)) 
-             
-        # check if execution errors occurred
-        msg = " Failed to connect endpoints "
-        
-        if proc.poll() or err:
-            self.error(msg, out, err)
-            raise RuntimeError, msg
-    
-        # Wait for pid file to be generated
-        pid, ppid = endpoint.node.wait_pid(self.run_home(endpoint))
-        
-        # If the process is not running, check for error information
-        # on the remote machine
-        if not pid or not ppid:
-            (out, err), proc = endpoint.node.check_errors(self.run_home(endpoint))
-            # Out is what was written in the stderr file
-            if err:
-                msg = " Failed to start command '%s' " % command
-                self.error(msg, out, err)
-                raise RuntimeError, msg
-        
-        # After creating the TAP, the pl-vif-create.py script
-        # will write the name of the TAP to a file. We wait until
-        # we can read the interface name from the file.
-        vif_name = endpoint.wait_vif_name()
-        endpoint.set("deviceName", vif_name) 
-
-        # Wait if name
-        return True
+        connection_app_home = self.app_home(endpoint)
+        data = endpoint.gre_connect(remote_endpoint, 
+                connection_app_home,
+                connection_run_home) 
+        return data
 
     def establish_connection(self, endpoint, remote_endpoint, data):
         pass
index 01a1d40..232a9df 100644 (file)
@@ -28,6 +28,7 @@ import collections
 import os
 import random
 import re
+import socket
 import tempfile
 import time
 import threading
@@ -194,6 +195,10 @@ class LinuxNode(ResourceManager):
         gateway = Attribute("gateway", "Hostname of the gateway machine",
                 flags = Flags.Design)
 
+        ip = Attribute("ip", "Linux host public IP address. "
+                   "Must not be modified by the user unless hostname is 'localhost'",
+                    flags = Flags.Design)
+
         cls._register_attribute(hostname)
         cls._register_attribute(username)
         cls._register_attribute(port)
@@ -206,6 +211,7 @@ class LinuxNode(ResourceManager):
         cls._register_attribute(tear_down)
         cls._register_attribute(gateway_user)
         cls._register_attribute(gateway)
+        cls._register_attribute(ip)
 
     def __init__(self, ec, guid):
         super(LinuxNode, self).__init__(ec, guid)
@@ -278,8 +284,7 @@ class LinuxNode(ResourceManager):
         if self._os:
             return self._os
 
-        if self.get("hostname") not in ["localhost", "127.0.0.1"] and \
-                not self.get("username"):
+        if not self.localhost and not self.get("username"):
             msg = "Can't resolve OS, insufficient data "
             self.error(msg)
             raise RuntimeError, msg
@@ -333,7 +338,7 @@ class LinuxNode(ResourceManager):
 
     @property
     def localhost(self):
-        return self.get("hostname") in ['localhost', '127.0.0.7', '::1']
+        return self.get("hostname") in ['localhost', '127.0.0.1', '::1']
 
     def do_provision(self):
         # check if host is alive
@@ -362,6 +367,15 @@ class LinuxNode(ResourceManager):
 
         self.mkdir(paths)
 
+        # Get Public IP address
+        if not self.get("ip"):
+            if self.localhost:
+                ip = socket.gethostbyname(socket.gethostname())
+            else:
+                ip = socket.gethostbyname(self.get("hostname"))
+
+            self.set("ip", ip)
+
         super(LinuxNode, self).do_provision()
 
     def do_deploy(self):
@@ -404,8 +418,8 @@ class LinuxNode(ResourceManager):
 
     def clean_processes(self):
         self.info("Cleaning up processes")
-        if self.get("hostname") in ["localhost", "127.0.0.2"]:
+
+        if self.localhost:
             return 
         
         if self.get("username") != 'root':
@@ -539,7 +553,8 @@ class LinuxNode(ResourceManager):
             stdout = 'stdout', 
             stderr = 'stderr', 
             sudo = False,
-            tty = False):
+            tty = False,
+            strict_host_checking = False):
         
         self.debug("Running command '%s'" % command)
         
@@ -570,7 +585,8 @@ class LinuxNode(ResourceManager):
                     agent = True,
                     identity = self.get("identity"),
                     server_key = self.get("serverKey"),
-                    tty = tty
+                    tty = tty,
+                    strict_host_checking = strict_host_checking
                     )
 
         return (out, err), proc
@@ -589,7 +605,8 @@ class LinuxNode(ResourceManager):
                     gw = self.get("gateway"),
                     agent = True,
                     identity = self.get("identity"),
-                    server_key = self.get("serverKey")
+                    server_key = self.get("serverKey"),
+                    strict_host_checking = False
                     )
         
         return pidtuple
@@ -608,7 +625,8 @@ class LinuxNode(ResourceManager):
                         gw = self.get("gateway"),
                         agent = True,
                         identity = self.get("identity"),
-                        server_key = self.get("serverKey")
+                        server_key = self.get("serverKey"),
+                        strict_host_checking = False
                         )
            
         return status
@@ -633,7 +651,8 @@ class LinuxNode(ResourceManager):
                         agent = True,
                         sudo = sudo,
                         identity = self.get("identity"),
-                        server_key = self.get("serverKey")
+                        server_key = self.get("serverKey"),
+                        strict_host_checking = False
                         )
 
         return (out, err), proc
index 8bf4ab1..226acf0 100644 (file)
@@ -145,15 +145,15 @@ class LinuxNS3DceCCND(LinuxNS3CCNDceApplication):
                 " ecryptfs-utils-devel libxml2-devel automake gawk " 
                 " gcc gcc-c++ git pcre-devel make ")
         elif self.simulation.node.use_deb:
-            return ( " autoconf libssl-dev libexpat-dev libpcap-dev "
+            return ( " autoconf libssl-dev libexpat1-dev libpcap-dev "
                 " libecryptfs0 libxml2-utils automake gawk gcc g++ "
                 " git-core pkg-config libpcre3-dev make ")
         return ""
 
-
     @property
     def _sources(self):
-        return "http://www.ccnx.org/releases/ccnx-0.8.1.tar.gz"
+        #return "http://www.ccnx.org/releases/ccnx-0.8.1.tar.gz"
+        return "http://www.ccnx.org/releases/ccnx-0.8.2.tar.gz"
 
     @property
     def _build(self):
index 63e5d19..6137825 100644 (file)
@@ -53,11 +53,16 @@ class LinuxNS3DceFIBEntry(LinuxNS3CCNDceApplication):
                 "Peer host public IP used in network connection for this FIB entry. ",
                 flags = Flags.Design)
 
+        home = Attribute("home", "Sets HOME environmental variable. ",
+                default = "/root",
+            flags = Flags.Design)
         cls._register_attribute(uri)
         cls._register_attribute(protocol)
         cls._register_attribute(host)
         cls._register_attribute(port)
         cls._register_attribute(ip)
+        cls._register_attribute(home)
 
     def _instantiate_object(self):
         if not self.get("binary"):
@@ -66,8 +71,22 @@ class LinuxNS3DceFIBEntry(LinuxNS3CCNDceApplication):
         if not self.get("arguments"):
             self.set("arguments", self._arguments)
 
+        if not self.get("environment"):
+            self.set("environment", self._environment)
+        
         super(LinuxNS3DceFIBEntry, self)._instantiate_object()
 
+    @property
+    def _environment(self):
+        envs = dict({
+            "home": "HOME",
+            })
+
+        env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))), 
+            [k for k in envs.keys() if self.get(k)]))
+
+        return env
+
     @property
     def _arguments(self):
         args = ["-v", "add"]
index f9b9322..63d8d37 100644 (file)
@@ -92,14 +92,12 @@ class LinuxNS3Simulation(LinuxApplication, NS3Simulation):
 
         ns3_version = Attribute("ns3Version",
             "Version of ns-3 to install from nsam repo",
-            #default = "ns-3.19", 
             default = "ns-3.20", 
             #default = "ns-3-dev", 
             flags = Flags.Design)
 
         pybindgen_version = Attribute("pybindgenVersion",
             "Version of pybindgen to install from bazar repo",
-            #default = "864", 
             default = "868", 
             #default = "876", 
             flags = Flags.Design)
@@ -116,7 +114,7 @@ class LinuxNS3Simulation(LinuxApplication, NS3Simulation):
             type = Types.Bool,
             flags = Flags.Design)
 
-        stoptime = Attribute("stopTime",
+        stoptime = Attribute("StopTime",
             "Time at which the simulation will stop",
             flags = Flags.Design)
 
@@ -308,6 +306,10 @@ class LinuxNS3Simulation(LinuxApplication, NS3Simulation):
             if self.get("populateRoutingTables") == True:
                 self.invoke(IPV4_GLOBAL_ROUTING_HELPER_UUID, "PopulateRoutingTables")
 
+            time = self.get("StopTime")
+            if time:
+                self._client.stop(time=time) 
+
             self._client.start()
 
             self.set_started()
@@ -321,11 +323,8 @@ class LinuxNS3Simulation(LinuxApplication, NS3Simulation):
 
         """
         if self.state == ResourceState.STARTED:
-            time = None
-            if self.get("stopTime"):
-                time = self.get("stopTime")
-
-            self._client.stop(time=time) 
+            if not self.get("StopTime"):
+                self._client.stop() 
             self.set_stopped()
 
     def do_release(self):
diff --git a/src/nepi/resources/linux/scripts/linux-udp-connect.py b/src/nepi/resources/linux/scripts/linux-udp-connect.py
new file mode 100644 (file)
index 0000000..ffd8740
--- /dev/null
@@ -0,0 +1,192 @@
+import errno
+import os
+import time
+import signal
+import socket
+import tunchannel
+import struct
+import fcntl
+
+from optparse import OptionParser
+
+IFF_TUN = 0x0001
+IFF_TAP     = 0x0002
+IFF_NO_PI   = 0x1000
+TUNSETIFF   = 0x400454ca
+
+# Trak SIGTERM, and set global termination flag instead of dying
+TERMINATE = []
+def _finalize(sig,frame):
+    global TERMINATE
+    TERMINATE.append(None)
+signal.signal(signal.SIGTERM, _finalize)
+
+# SIGUSR1 suspends forwading, SIGUSR2 resumes forwarding
+SUSPEND = []
+def _suspend(sig,frame):
+    global SUSPEND
+    if not SUSPEND:
+        SUSPEND.append(None)
+signal.signal(signal.SIGUSR1, _suspend)
+
+def _resume(sig,frame):
+    global SUSPEND
+    if SUSPEND:
+        SUSPEND.remove(None)
+signal.signal(signal.SIGUSR2, _resume)
+
+def open_tap(vif_name, vif_type, pi):
+    flags = 0
+    flags |= vif_type
+
+    if not pi:
+        flags |= IFF_NO_PI
+
+    fd = os.open("/dev/net/tun", os.O_RDWR)
+
+    err = fcntl.ioctl(fd, TUNSETIFF, struct.pack("16sH", vif_name, flags))
+    if err < 0:
+        os.close(fd)
+        raise RuntimeError("Could not configure device %s" % vif_name)
+
+    return fd
+
+def get_options():
+    usage = ("usage: %prog -N <vif_name> -t <vif-type> -p <pi> "
+            "-b <bwlimit> -c <cipher> -k <cipher-key> -q <txqueuelen> " 
+            "-l <local-port-file> -r <remote-port-file> -H <remote-host> "
+            "-R <ret-file> ")
+    
+    parser = OptionParser(usage = usage)
+
+    parser.add_option("-N", "--vif-name", dest="vif_name",
+        help = "The name of the virtual interface",
+        type="str")
+
+    parser.add_option("-t", "--vif-type", dest="vif_type",
+        help = "Virtual interface type. Either IFF_TAP or IFF_TUN. "
+            "Defaults to IFF_TAP. ", 
+        default = IFF_TAP,
+        type="str")
+
+    parser.add_option("-n", "--pi", dest="pi", 
+            action="store_true", 
+            default = False,
+            help="Enable PI header")
+
+    parser.add_option("-b", "--bwlimit", dest="bwlimit",
+        help = "Specifies the interface's emulated bandwidth in bytes ",
+        default = None, type="int")
+
+    parser.add_option("-q", "--txqueuelen", dest="txqueuelen",
+        help = "Specifies the interface's transmission queue length. ",
+        default = 1000, type="int")
+
+    parser.add_option("-c", "--cipher", dest="cipher",
+        help = "Cipher to encript communication. "
+            "One of PLAIN, AES, Blowfish, DES, DES3. ",
+        default = None, type="str")
+
+    parser.add_option("-k", "--cipher-key", dest="cipher_key",
+        help = "Specify a symmetric encryption key with which to protect "
+            "packets across the tunnel. python-crypto must be installed "
+            "on the system." ,
+        default = None, type="str")
+
+    parser.add_option("-l", "--local-port-file", dest="local_port_file",
+        help = "File where to store the local binded UDP port number ", 
+        default = "local_port_file", type="str")
+
+    parser.add_option("-r", "--remote-port-file", dest="remote_port_file",
+        help = "File where to read the remote UDP port number to connect to", 
+        default = "remote_port_file", type="str")
+
+    parser.add_option("-H", "--remote-host", dest="remote_host",
+        help = "Remote host IP", 
+        default = "remote_host", type="str")
+
+    parser.add_option("-R", "--ret-file", dest="ret_file",
+        help = "File where to store return code (success of connection) ", 
+        default = "ret_file", type="str")
+
+    (options, args) = parser.parse_args()
+       
+    vif_type = IFF_TAP
+    if options.vif_type and options.vif_type == "IFF_TUN":
+        vif_type = IFF_TUN
+
+    return ( options.vif_name, vif_type, options.pi, 
+            options.local_port_file, options.remote_port_file, 
+            options.remote_host, options.ret_file, options.bwlimit, 
+            options.cipher, options.cipher_key, options.txqueuelen )
+
+if __name__ == '__main__':
+
+    ( vif_name, vif_type, pi, local_port_file, remote_port_file,
+      remote_host, ret_file, bwlimit, cipher, cipher_key, txqueuelen 
+         ) = get_options()
+   
+    # Get the file descriptor of the TAP device from the process
+    # that created it
+    fd = open_tap(vif_name, vif_type, pi)
+
+    # Create a local socket to stablish the tunnel connection
+    hostaddr = socket.gethostbyname(socket.gethostname())
+    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+    sock.bind((hostaddr, 0))
+    (local_host, local_port) = sock.getsockname()
+
+    # Save local port information to file
+    f = open(local_port_file, 'w')
+    f.write("%d\n" % local_port)
+    f.close()
+
+    # Wait until remote port information is available
+    while not os.path.exists(remote_port_file):
+        time.sleep(2)
+
+    remote_port = ''
+    # Read remote port from file
+    # Try until something is read...
+    # xxx: There seems to be a weird behavior where
+    #       even if the file exists and had the port number,
+    #       the read operation returns empty string!
+    #       Maybe a race condition?
+    for i in xrange(10):
+        f = open(remote_port_file, 'r')
+        remote_port = f.read()
+        f.close()
+
+        if remote_port:
+            break
+        
+        time.sleep(2)
+    
+    remote_port = remote_port.strip()
+    remote_port = int(remote_port)
+
+    # Connect local socket to remote port
+    sock.connect((remote_host, remote_port))
+    remote = os.fdopen(sock.fileno(), 'r+b', 0)
+
+    # TODO: Test connectivity!    
+
+    # Create a ret_file to indicate success
+    f = open(ret_file, 'w')
+    f.write("0")
+    f.close()
+
+    # Establish tunnel
+    tunchannel.tun_fwd(tun, remote,
+        with_pi = True, # Planetlab TAP devices add PI headers 
+        ether_mode = (vif_type == IFF_TAP),
+        udp = True,
+        cipher_key = cipher_key,
+        cipher = cipher,
+        TERMINATE = TERMINATE,
+        SUSPEND = SUSPEND,
+        tunqueue = txqueuelen,
+        tunkqueue = 500,
+        bwlimit = bwlimit
+    ) 
diff --git a/src/nepi/resources/linux/tap.py b/src/nepi/resources/linux/tap.py
new file mode 100644 (file)
index 0000000..b93ee80
--- /dev/null
@@ -0,0 +1,538 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.resource import clsinit_copy, ResourceState, \
+        reschedule_delay
+from nepi.resources.linux.application import LinuxApplication
+from nepi.resources.linux.node import LinuxNode
+from nepi.util.timefuncs import tnow, tdiffsec
+
+import os
+import time
+
+PYTHON_VSYS_VERSION = "1.0"
+
+@clsinit_copy
+class LinuxTap(LinuxApplication):
+    _rtype = "LinuxTap"
+    _help = "Creates a TAP device on a Linux host"
+    _backend = "linux"
+
+    @classmethod
+    def _register_attributes(cls):
+        endpoint_ip = Attribute("endpoint_ip", "IPv4 Address",
+              flags = Flags.Design)
+
+        mac = Attribute("mac", "MAC Address",
+                flags = Flags.Design)
+
+        endpoint_prefix = Attribute("endpoint_prefix", "IPv4 network prefix",
+                type = Types.Integer,
+                flags = Flags.Design)
+
+        mtu = Attribute("mtu", "Maximum transmition unit for device",
+                type = Types.Integer)
+
+        devname = Attribute("deviceName", 
+                "Name of the network interface (e.g. eth0, wlan0, etc)",
+                flags = Flags.NoWrite)
+
+        up = Attribute("up", "Link up", 
+                type = Types.Bool)
+        
+        pointopoint = Attribute("pointopoint", "Peer IP address", 
+                flags = Flags.Design)
+
+        txqueuelen = Attribute("txqueuelen", "Length of transmission queue", 
+                flags = Flags.Design)
+
+        txqueuelen = Attribute("txqueuelen", "Length of transmission queue", 
+                flags = Flags.Design)
+
+        gre_key = Attribute("greKey", 
+                "GRE key to be used to configure GRE tunnel", 
+                default = "1",
+                flags = Flags.Design)
+
+        gre_remote = Attribute("greRemote", 
+                "Public IP of remote endpoint for GRE tunnel", 
+                flags = Flags.Design)
+
+        pi = Attribute("pi", "Add PI (protocol information) header", 
+                default = False,
+                type = Types.Bool)
+        tear_down = Attribute("tearDown", 
+                "Bash script to be executed before releasing the resource",
+                flags = Flags.Design)
+
+        cls._register_attribute(endpoint_ip)
+        cls._register_attribute(mac)
+        cls._register_attribute(endpoint_prefix)
+        cls._register_attribute(mtu)
+        cls._register_attribute(devname)
+        cls._register_attribute(up)
+        cls._register_attribute(pointopoint)
+        cls._register_attribute(txqueuelen)
+        cls._register_attribute(gre_key)
+        cls._register_attribute(gre_remote)
+        cls._register_attribute(pi)
+        cls._register_attribute(tear_down)
+
+    def __init__(self, ec, guid):
+        super(LinuxTap, self).__init__(ec, guid)
+        self._home = "tap-%s" % self.guid
+        self._gre_enabled = False
+        self._tunnel_mode = False
+
+    @property
+    def node(self):
+        node = self.get_connected(LinuxNode.get_rtype())
+        if node: return node[0]
+        raise RuntimeError, "TAP/TUN devices must be connected to Node"
+
+    @property
+    def gre_enabled(self):
+        if not self._gre_enabled:
+            from nepi.resources.linux.gretunnel import LinuxGRETunnel
+            gre = self.get_connected(LinuxGRETunnel.get_rtype())
+            if gre: self._gre_enabled = True
+
+        return self._gre_enabled
+
+    @property
+    def tunnel_mode(self):
+        if not self._tunnel_mode:
+            from nepi.resources.linux.tunnel import LinuxTunnel
+            tunnel = self.get_connected(LinuxTunnel.get_rtype())
+            if tunnel: self._tunnel_mode = True
+
+        return self._tunnel_mode
+
+    def upload_sources(self):
+        scripts = []
+
+        # udp-connect python script
+        udp_connect = os.path.join(os.path.dirname(__file__), "scripts",
+                "linux-udp-connect.py")
+        
+        scripts.append(udp_connect)
+
+        # tunnel creation python script
+        tunchannel = os.path.join(os.path.dirname(__file__), "scripts", 
+                "tunchannel.py")
+
+        scripts.append(tunchannel)
+
+        # Upload scripts
+        scripts = ";".join(scripts)
+
+        self.node.upload(scripts,
+                os.path.join(self.node.src_dir),
+                overwrite = False)
+
+        # upload stop.sh script
+        stop_command = self.replace_paths(self._stop_command)
+
+        self.node.upload(stop_command,
+                os.path.join(self.app_home, "stop.sh"),
+                text = True,
+                # Overwrite file every time. 
+                # The stop.sh has the path to the socket, which should change
+                # on every experiment run.
+                overwrite = True)
+
+    def upload_start_command(self):
+        # If GRE mode is enabled, TAP creation is delayed until the
+        # tunnel is established
+        if not self.tunnel_mode:
+            # We want to make sure the device is up and running
+            # before the deploy is over, so we execute the 
+            # start script now and wait until it finishes. 
+            command = self.get("command")
+            command = self.replace_paths(command)
+
+            shfile = os.path.join(self.app_home, "start.sh")
+            self.node.run_and_wait(command, self.run_home,
+                shfile = shfile,
+                overwrite = True)
+
+    def do_deploy(self):
+        if not self.node or self.node.state < ResourceState.PROVISIONED:
+            self.ec.schedule(reschedule_delay, self.deploy)
+        else:
+            if not self.get("deviceName"):
+                self.set("deviceName", "%s%d" % (self.vif_prefix, self.guid)) 
+
+            if not self.get("command"):
+                self.set("command", self._start_command)
+
+            self.do_discover()
+            self.do_provision()
+
+            self.set_ready()
+
+    def do_start(self):
+        if self.state == ResourceState.READY:
+            command = self.get("command")
+            self.info("Starting command '%s'" % command)
+
+            self.set_started()
+        else:
+            msg = " Failed to execute command '%s'" % command
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+
+    def do_stop(self):
+        command = self.get('command') or ''
+        
+        if self.state == ResourceState.STARTED:
+            self.info("Stopping command '%s'" % command)
+
+            command = "bash %s" % os.path.join(self.app_home, "stop.sh")
+            (out, err), proc = self.execute_command(command,
+                    blocking = True)
+
+            if err:
+                msg = " Failed to stop command '%s' " % command
+                self.error(msg, out, err)
+
+            self.set_stopped()
+
+    @property
+    def state(self):
+        state_check_delay = 0.5
+        if self._state == ResourceState.STARTED and \
+                tdiffsec(tnow(), self._last_state_check) > state_check_delay:
+
+            if self.get("deviceName"):
+                (out, err), proc = self.node.execute("ifconfig")
+
+                if out.strip().find(self.get("deviceName")) == -1: 
+                    # tap is not running is not running (socket not found)
+                    self.set_stopped()
+
+            self._last_state_check = tnow()
+
+        return self._state
+
+    def do_release(self):
+        # Node needs to wait until all associated RMs are released
+        # to be released
+        from nepi.resources.linux.tunnel import LinuxTunnel
+        rms = self.get_connected(LinuxTunnel.get_rtype())
+
+        for rm in rms:
+            if rm.state < ResourceState.STOPPED:
+                self.ec.schedule(reschedule_delay, self.release)
+                return 
+
+        super(LinuxTap, self).do_release()
+
+    def gre_connect(self, remote_endpoint, connection_app_home,
+            connection_run_home):
+        gre_connect_command = self._gre_connect_command(
+                remote_endpoint, connection_run_home)
+
+        # upload command to connect.sh script
+        shfile = os.path.join(connection_app_home, "gre-connect.sh")
+        self.node.upload_command(gre_connect_command,
+                shfile = shfile,
+                overwrite = False)
+
+        # invoke connect script
+        cmd = "bash %s" % shfile
+        (out, err), proc = self.node.run(cmd, connection_run_home)
+             
+        # check if execution errors occurred
+        msg = " Failed to connect endpoints "
+        
+        if proc.poll() or err:
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+    
+        # Wait for pid file to be generated
+        pid, ppid = self.node.wait_pid(connection_run_home)
+        
+        # If the process is not running, check for error information
+        # on the remote machine
+        if not pid or not ppid:
+            (out, err), proc = self.node.check_errors(connection_run_home)
+            # Out is what was written in the stderr file
+            if err:
+                msg = " Failed to start command '%s' " % command
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+        
+        return True
+
+    def initiate_udp_connection(self, remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+        port = self.udp_connect(remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen)
+        return port
+
+    def udp_connect(self, remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+        udp_connect_command = self._udp_connect_command(
+                remote_endpoint, connection_run_home,
+                cipher, cipher_key, bwlimit, txqueuelen)
+
+        # upload command to connect.sh script
+        shfile = os.path.join(self.app_home, "udp-connect.sh")
+        self.node.upload_command(udp_connect_command,
+                shfile = shfile,
+                overwrite = False)
+
+        # invoke connect script
+        cmd = "bash %s" % shfile
+        (out, err), proc = self.node.run(cmd, self.run_home) 
+             
+        # check if execution errors occurred
+        msg = "Failed to connect endpoints "
+        
+        if proc.poll():
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+    
+        # Wait for pid file to be generated
+        self._pid, self._ppid = self.node.wait_pid(self.run_home)
+        
+        # If the process is not running, check for error information
+        # on the remote machine
+        if not self._pid or not self._ppid:
+            (out, err), proc = self.node.check_errors(self.run_home)
+            # Out is what was written in the stderr file
+            if err:
+                msg = " Failed to start command '%s' " % command
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+
+        port = self.wait_local_port()
+
+        return port
+
+    def _udp_connect_command(self, remote_endpoint, connection_run_home, 
+            cipher, cipher_key, bwlimit, txqueuelen):
+
+        # Set the remote endpoint
+        self.set("pointopoint", remote_endpoint.get("endpoint_ip"))
+        
+        # Planetlab TAPs always use PI headers
+        from nepi.resources.planetlab.tap import PlanetlabTap
+        if self.is_rm_instance(PlanetlabTap.get_rtype()):
+            self.set("pi", True)
+
+        remote_ip = remote_endpoint.node.get("ip")
+
+        local_port_file = os.path.join(self.run_home, 
+                "local_port")
+
+        remote_port_file = os.path.join(self.run_home, 
+                "remote_port")
+
+        ret_file = os.path.join(self.run_home, 
+                "ret_file")
+
+        # Generate UDP connect command
+        # Use the start command to configure TAP with peer info
+        start_command = self._start_command
+        
+        command = ["( "]
+        command.append(start_command)
+
+        # Use pl-vid-udp-connect.py to stablish the tunnel between endpoints
+        command.append(") & (")
+        command.append("sudo -S")
+        command.append("PYTHONPATH=$PYTHONPATH:${SRC}")
+        command.append("python ${SRC}/linux-udp-connect.py")
+        command.append("-N %s" % self.get("deviceName"))
+        command.append("-t %s" % self.vif_type)
+        if self.get("pi"):
+            command.append("-p")
+        command.append("-l %s " % local_port_file)
+        command.append("-r %s " % remote_port_file)
+        command.append("-H %s " % remote_ip)
+        command.append("-R %s " % ret_file)
+        if cipher:
+            command.append("-c %s " % cipher)
+        if cipher_key:
+            command.append("-k %s " % cipher_key)
+        if txqueuelen:
+            command.append("-q %s " % txqueuelen)
+        if bwlimit:
+            command.append("-b %s " % bwlimit)
+
+        command.append(")")
+
+        command = " ".join(command)
+        command = self.replace_paths(command)
+
+        return command
+
+    def _gre_connect_command(self, remote_endpoint, connection_run_home): 
+        # Set the remote endpoint
+        self.set("pointopoint", remote_endpoint.get("endpoint_ip"))
+        self.set("greRemote", remote_endpoint.node.get("ip"))
+
+        # Generate GRE connect command
+        command = ["("]
+        command.append(self._stop_command)
+        command.append(") ; (")
+        command.append(self._start_gre_command)
+        command.append(")")
+
+        command = " ".join(command)
+        command = self.replace_paths(command)
+
+        return command
+
+    def establish_udp_connection(self, remote_endpoint, port):
+        # upload remote port number to file
+        rem_port = "%s\n" % port
+        self.node.upload(rem_port,
+                os.path.join(self.run_home, "remote_port"),
+                text = True, 
+                overwrite = False)
+
+    def verify_connection(self):
+        self.wait_result()
+
+    def terminate_connection(self):
+        if  self._pid and self._ppid:
+            (out, err), proc = self.node.kill(self._pid, self._ppid, 
+                    sudo = True) 
+
+            # check if execution errors occurred
+            if proc.poll() and err:
+                msg = " Failed to Kill the Tap"
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+
+    def check_status(self):
+        return self.node.status(self._pid, self._ppid)
+
+    def wait_local_port(self):
+        """ Waits until the local_port file for the endpoint is generated, 
+        and returns the port number 
+        
+        """
+        return self.wait_file("local_port")
+
+    def wait_result(self):
+        """ Waits until the return code file for the endpoint is generated 
+        
+        """ 
+        return self.wait_file("ret_file")
+    def wait_file(self, filename):
+        """ Waits until file on endpoint is generated """
+        result = None
+        delay = 1.0
+
+        for i in xrange(20):
+            (out, err), proc = self.node.check_output(
+                    self.run_home, filename)
+            if out:
+                result = out.strip()
+                break
+            else:
+                time.sleep(delay)
+                delay = delay * 1.5
+        else:
+            msg = "Couldn't retrieve %s" % filename
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+
+        return result
+
+    @property
+    def _start_command(self):
+        command = []
+        if not self.gre_enabled:
+            # Make sure to clean TAP if it existed
+            stop_command = self._stop_command
+            
+            start_command = []
+            start_command.append("sudo -S ip tuntap add %s mode %s %s" % (
+                self.get("deviceName"),
+                self.vif_prefix,
+                "pi" if self.get("pi") else ""))
+            start_command.append("sudo -S ip link set %s up" % self.get("deviceName"))
+            start_command.append("sudo -S ip addr add %s/%d dev %s" % (
+                self.get("endpoint_ip"),
+                self.get("endpoint_prefix"),
+                self.get("deviceName"),
+                ))
+
+            start_command = ";".join(start_command)
+
+            command.append("(")
+            command.append(stop_command)
+            command.append(") ; (")
+            command.append(start_command)
+            command.append(")")
+
+        return " ".join(command)
+
+    @property
+    def _stop_command(self):
+        command = []
+        command.append("sudo -S ip link set %s down" % self.get("deviceName"))
+        command.append("sudo -S ip link del %s" % self.get("deviceName"))
+        
+        return ";".join(command)
+
+    @property
+    def _start_gre_command(self):
+        command = []
+        command.append("sudo -S modprobe ip_gre")
+        command.append("sudo -S ip link add %s type gre remote %s local %s ttl 64 csum key %s" % (
+                self.get("deviceName"),
+                self.get("greRemote"),
+                self.node.get("ip"),
+                self.get("greKey")
+            ))
+        command.append("sudo -S ip addr add %s/%d peer %s/%d dev %s" % (
+                self.get("endpoint_ip"),
+                self.get("endpoint_prefix"),
+                self.get("pointopoint"),
+                self.get("endpoint_prefix"),
+                self.get("deviceName"),
+                ))
+        command.append("sudo -S ip link set %s up " % self.get("deviceName"))
+
+        return ";".join(command)
+
+    @property
+    def vif_type(self):
+        return "IFF_TAP"
+
+    @property
+    def vif_prefix(self):
+        return "tap"
+
+    def sock_name(self):
+        return os.path.join(self.run_home, "tap.sock")
+
+    def valid_connection(self, guid):
+        # TODO: Validate!
+        return True
+
old mode 100755 (executable)
new mode 100644 (file)
similarity index 51%
rename from test/util/parser.py
rename to src/nepi/resources/linux/tun.py
index e9fce6b..5872f8e
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 #
 #    NEPI, a framework to manage network experiments
 #    Copyright (C) 2013 INRIA
 #
 # Author: Alina Quereilhac <alina.quereilhac@inria.fr>
 
+from nepi.execution.resource import clsinit_copy
+from nepi.resources.linux.tap import LinuxTap
 
-from nepi.design.box import Box 
-from nepi.util.parser import XMLParser
+import os
 
-import unittest
+@clsinit_copy
+class LinuxTun(LinuxTap):
+    _rtype = "LinuxTun"
+    _help = "Creates a TUN device on a Linux host"
+    _backend = "linux"
 
-class BoxDesignTestCase(unittest.TestCase):
-    def test_to_xml(self):
-        node1 = Box()
-        node2 = Box()
+    def __init__(self, ec, guid):
+        super(LinuxTun, self).__init__(ec, guid)
+        self._home = "tun-%s" % self.guid
 
-        node1.label = "node1"
-        node2.label = "node2"
+    @property
+    def sock_name(self):
+        return os.path.join(self.run_home, "tun.sock")
+    
+    @property
+    def vif_type(self):
+        return "IFF_TUN"
 
-        node1.connect(node2)
+    @property
+    def vif_prefix(self):
+        return "tun"
 
-        node1.a.dog = "cat"
-        node1.a.one = "two"
-        node1.a.t = "q"
-
-        node1.c.node2.a.sky = "sea"
-        node2.a.bee = "honey"
-
-        node1.tadd("unooo")
-        node2.tadd("dosss")
-
-        parser = XMLParser()
-        xml = parser.to_xml(node1)
-        
-        node = parser.from_xml(xml)
-        xml2 = parser.to_xml(node)
-        
-        self.assertEquals(xml, xml2)
-
-if __name__ == '__main__':
-    unittest.main()
 
index c0e4a73..8e0760d 100644 (file)
@@ -86,6 +86,7 @@ class LinuxTunnel(LinuxApplication):
         self.endpoint1.node.mkdir(self.run_home(self.endpoint1))
         self.endpoint2.node.mkdir(self.run_home(self.endpoint2))
 
+        self.debug("Initiate the connection")
         # Start 2 step connection
         # Initiate connection from endpoint 1 to endpoint 2
         data1 = self.initiate_connection(self.endpoint1, self.endpoint2)
@@ -93,12 +94,14 @@ class LinuxTunnel(LinuxApplication):
         # Initiate connection from endpoint 2 to endpoint 1
         data2 = self.initiate_connection(self.endpoint2, self.endpoint1)
 
+        self.debug("Establish the connection")
         # Establish connection from endpoint 1 to endpoint 2
         self.establish_connection(self.endpoint1, self.endpoint2, data2)
         
         # Establish connection from endpoint 2 to endpoint 1
         self.establish_connection(self.endpoint2, self.endpoint1, data1)
 
+        self.debug("Verify the connection")
         # check if connection was successful on both sides
         self.verify_connection(self.endpoint1, self.endpoint2)
         self.verify_connection(self.endpoint2, self.endpoint1)
@@ -131,6 +134,7 @@ class LinuxTunnel(LinuxApplication):
     def do_stop(self):
         """ Stops application execution
         """
+
         if self.state == ResourceState.STARTED:
             self.info("Stopping tunnel")
 
@@ -155,7 +159,6 @@ class LinuxTunnel(LinuxApplication):
 
         return self._state
 
-
     def valid_connection(self, guid):
         # TODO: Validate!
         return True
index ae4534d..1643b90 100644 (file)
@@ -84,7 +84,7 @@ class LinuxUdpTunnel(LinuxTunnel):
         connected = []
         for guid in self.connections:
             rm = self.ec.get_resource(guid)
-            if hasattr(rm, "udp_connect_command"):
+            if hasattr(rm, "initiate_udp_connection"):
                 connected.append(rm)
         return connected
 
@@ -93,80 +93,33 @@ class LinuxUdpTunnel(LinuxTunnel):
         cipher_key = self.get("cipherKey")
         bwlimit = self.get("bwLimit")
         txqueuelen = self.get("txQueueLen")
-       
-        # Return the command to execute to initiate the connection to the
-        # other endpoint
+        connection_app_home = self.app_home(endpoint)
         connection_run_home = self.run_home(endpoint)
-        udp_connect_command = endpoint.udp_connect_command(
-                remote_endpoint, connection_run_home,
-                cipher, cipher_key, bwlimit, txqueuelen)
-
-        # upload command to connect.sh script
-        shfile = os.path.join(self.app_home(endpoint), "udp-connect.sh")
-        endpoint.node.upload(udp_connect_command,
-                shfile,
-                text = True, 
-                overwrite = False)
-
-        # invoke connect script
-        cmd = "bash %s" % shfile
-        (out, err), proc = endpoint.node.run(cmd, self.run_home(endpoint)) 
-             
-        # check if execution errors occurred
-        msg = "Failed to connect endpoints "
-        
-        if proc.poll():
-            self.error(msg, out, err)
-            raise RuntimeError, msg
-    
-        # Wait for pid file to be generated
-        pid, ppid = endpoint.node.wait_pid(self.run_home(endpoint))
-        
-        # If the process is not running, check for error information
-        # on the remote machine
-        if not pid or not ppid:
-            (out, err), proc = endpoint.node.check_errors(self.run_home(endpoint))
-            # Out is what was written in the stderr file
-            if err:
-                msg = " Failed to start command '%s' " % command
-                self.error(msg, out, err)
-                raise RuntimeError, msg
-
-        # wait until port is written to file
-        port = self.wait_local_port(endpoint)
 
-        self._pids[endpoint] = (pid, ppid)
+        port = endpoint.initiate_udp_connection(
+                remote_endpoint, 
+                connection_app_home,
+                connection_run_home, 
+                cipher, cipher_key, bwlimit, txqueuelen)
 
         return port
 
     def establish_connection(self, endpoint, remote_endpoint, port):
-        self.upload_remote_port(endpoint, port)
+        endpoint.establish_udp_connection(remote_endpoint, port)
 
     def verify_connection(self, endpoint, remote_endpoint):
-        self.wait_result(endpoint)
+        endpoint.verify_connection()
 
     def terminate_connection(self, endpoint, remote_endpoint):
-        pid, ppid = self._pids[endpoint]
-
-        if pid and ppid:
-            (out, err), proc = endpoint.node.kill(pid, ppid, 
-                    sudo = True) 
-
-            # check if execution errors occurred
-            if proc.poll() and err:
-                msg = " Failed to STOP tunnel"
-                self.error(msg, out, err)
-                raise RuntimeError, msg
+        endpoint.terminate_connection()
 
     def check_state_connection(self):
         # Make sure the process is still running in background
         # No execution errors occurred. Make sure the background
         # process with the recorded pid is still running.
-        pid1, ppid1 = self._pids[self.endpoint1]
-        pid2, ppid2 = self._pids[self.endpoint2]
 
-        status1 = self.endpoint1.node.status(pid1, ppid1)
-        status2 = self.endpoint2.node.status(pid2, ppid2)
+        status1 = self.endpoint1.check_status()
+        status2 = self.endpoint2.check_status()
 
         if status1 == ProcStatus.FINISHED and \
                 status2 == ProcStatus.FINISHED:
@@ -220,12 +173,3 @@ class LinuxUdpTunnel(LinuxTunnel):
 
         return result
 
-    def upload_remote_port(self, endpoint, port):
-        # upload remote port number to file
-        port = "%s\n" % port
-        endpoint.node.upload(port,
-                os.path.join(self.run_home(endpoint), "remote_port"),
-                text = True, 
-                overwrite = False)
-
-
index f86bea7..ef345ff 100644 (file)
@@ -32,6 +32,7 @@ class NS3Base(ResourceManager):
         self._uuid = None
         self._connected = set()
         self._trace_filename = dict()
+        self._node = None
 
     @property
     def connected(self):
@@ -47,10 +48,12 @@ class NS3Base(ResourceManager):
 
     @property
     def node(self):
-        from nepi.resources.ns3.ns3node import NS3BaseNode
-        nodes = self.get_connected(NS3BaseNode.get_rtype())
-        if nodes: return nodes[0]
-        return None
+        if not self._node:
+            from nepi.resources.ns3.ns3node import NS3BaseNode
+            nodes = self.get_connected(NS3BaseNode.get_rtype())
+            if nodes: self._node = nodes[0]
+
+        return self._node
 
     def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
         filename = self._trace_filename.get(name)
@@ -80,7 +83,7 @@ class NS3Base(ResourceManager):
 
         kwargs = dict()
         for attr in self._attrs.values():
-            if not ( attr.has_flag(Flags.Construct) and attr.has_changed() ):
+            if not ( attr.has_flag(Flags.Construct) and attr.has_changed ):
                 continue
 
             kwargs[attr.name] = attr._value
index 48a7c16..a85ba0d 100644 (file)
@@ -25,6 +25,15 @@ from nepi.resources.ns3.ns3base import NS3Base
 class NS3BaseNode(NS3Base):
     _rtype = "abstract::ns3::Node"
 
+    def __init__(self, ec, guid):
+        super(NS3BaseNode, self).__init__(ec, guid)
+        self._simulation = None
+        self._ipv4 = None
+        self._arp = None
+        self._mobility = None
+        self._devices = None
+        self._dceapplications = None
+
     @classmethod
     def _register_attributes(cls):
         enablestack = Attribute("enableStack", 
@@ -38,55 +47,72 @@ class NS3BaseNode(NS3Base):
 
     @property
     def simulation(self):
-        from nepi.resources.ns3.ns3simulation import NS3Simulation
-        for guid in self.connections:
-            rm = self.ec.get_resource(guid)
-            if isinstance(rm, NS3Simulation):
-                return rm
-
-        msg = "Node not connected to simulation"
-        self.error(msg)
-        raise RuntimeError, msg
+        if not self._simulation:
+            from nepi.resources.ns3.ns3simulation import NS3Simulation
+            for guid in self.connections:
+                rm = self.ec.get_resource(guid)
+                if isinstance(rm, NS3Simulation):
+                    self._simulation = rm
+            
+            if not self._simulation:
+                msg = "Node not connected to simulation"
+                self.error(msg)
+                raise RuntimeError, msg
+
+        return self._simulation
+         
     @property
     def ipv4(self):
-        from nepi.resources.ns3.ns3ipv4l3protocol import NS3BaseIpv4L3Protocol
-        ipv4s = self.get_connected(NS3BaseIpv4L3Protocol.get_rtype())
-        if ipv4s: return ipv4s[0]
-        return None
+        if not self._ipv4:
+            from nepi.resources.ns3.ns3ipv4l3protocol import NS3BaseIpv4L3Protocol
+            ipv4s = self.get_connected(NS3BaseIpv4L3Protocol.get_rtype())
+            if ipv4s: 
+                self._ipv4 = ipv4s[0]
+        
+        return self._ipv4
 
     @property
     def arp(self):
-        from nepi.resources.ns3.ns3arpl3protocol import NS3BaseArpL3Protocol
-        arps = self.get_connected(NS3BaseArpL3Protocol.get_rtype())
-        if arps: return arps[0]
-        return None
+        if not self._arp:
+            from nepi.resources.ns3.ns3arpl3protocol import NS3BaseArpL3Protocol
+            arps = self.get_connected(NS3BaseArpL3Protocol.get_rtype())
+            if arps: 
+                self._arp = arps[0]
+
+        return self._arp
 
     @property
     def mobility(self):
-        from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
-        mobility = self.get_connected(NS3BaseMobilityModel.get_rtype())
-        if mobility: return mobility[0]
-        return None
+        if not self._mobility:
+            from nepi.resources.ns3.ns3mobilitymodel import NS3BaseMobilityModel
+            mobility = self.get_connected(NS3BaseMobilityModel.get_rtype())
+            if mobility: 
+                self._mobility = mobility[0]
+
+        return self._mobility
 
     @property
     def devices(self):
-        from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
-        devices = self.get_connected(NS3BaseNetDevice.get_rtype())
+        if not self._devices:
+            from nepi.resources.ns3.ns3netdevice import NS3BaseNetDevice
+            devices = self.get_connected(NS3BaseNetDevice.get_rtype())
+
+            if not devices: 
+                msg = "Node not connected to devices"
+                self.error(msg)
+                raise RuntimeError, msg
 
-        if not devices: 
-            msg = "Node not connected to devices"
-            self.error(msg)
-            raise RuntimeError, msg
+            self._devices = devices
 
-        return devices
+        return self._devices
 
     @property
     def dceapplications(self):
-        from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
-        dceapplications = self.get_connected(NS3BaseDceApplication.get_rtype())
+        if not self._dceapplications:
+            from nepi.resources.ns3.ns3dceapplication import NS3BaseDceApplication
+            self._dceapplications = self.get_connected(NS3BaseDceApplication.get_rtype())
 
-        return dceapplications
+        return self._dceapplications
 
     @property
     def _rms_to_wait(self):
index eea32d0..ebd0d67 100644 (file)
@@ -199,8 +199,8 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("FACTORY %s( %s )" % (type_name, str(kwargs)))
         
+        ### DUMP
         self.debuger.dump_factory(uuid, type_name, kwargs)
-        ########
 
         factory = self.ns3.ObjectFactory()
         factory.SetTypeId(type_name)
@@ -216,7 +216,6 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("RET FACTORY ( uuid %s ) %s = %s( %s )" % (
             str(uuid), str(obj), type_name, str(kwargs)))
-        ########
  
         return uuid
 
@@ -233,8 +232,8 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("CREATE %s( %s )" % (clazzname, str(args)))
     
+        ### DUMP
         self.debuger.dump_create(uuid, clazzname, args)
-        ########
 
         clazz = getattr(self.ns3, clazzname)
  
@@ -249,7 +248,6 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("RET CREATE ( uuid %s ) %s = %s( %s )" % (str(uuid), 
             str(obj), clazzname, str(args)))
-        ########
 
         return uuid
 
@@ -269,19 +267,25 @@ class NS3Wrapper(object):
         elif operation == "isAppRunning":
             result = self._is_app_running(uuid)
         else:
-            newuuid = self.make_uuid()
-
-            ### DEBUG
-            self.debuger.dump_invoke(newuuid, uuid, operation, args, kwargs)
-            ########
 
             if operation == "addStaticRoute":
                 result = self._add_static_route(uuid, *args)
+                
+                ### DUMP
+                self.debuger.dump_invoke(result, uuid, operation, args, kwargs)
 
             elif operation == "retrieveObject":
                 result = self._retrieve_object(uuid, *args, **kwargs)
+                
+                ### DUMP
+                self.debuger.dump_invoke(result, uuid, operation, args, kwargs)
 
             else:
+                newuuid = self.make_uuid()
+
+                ### DUMP
+                self.debuger.dump_invoke(newuuid, uuid, operation, args, kwargs)
+
                 if uuid.startswith(SINGLETON):
                     obj = self._singleton(uuid)
                 else:
@@ -319,8 +323,8 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("SET %s %s %s" % (uuid, name, str(value)))
     
+        ### DUMP
         self.debuger.dump_set(uuid, name, value)
-        ########
 
         obj = self.get_object(uuid)
         type_name = obj.GetInstanceTypeId().GetName()
@@ -347,7 +351,6 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("RET SET %s = %s -> set(%s, %s)" % (str(value), uuid, name, 
             str(value)))
-        ########
 
         return value
 
@@ -358,8 +361,8 @@ class NS3Wrapper(object):
         ### DEBUG
         self.logger.debug("GET %s %s" % (uuid, name))
         
+        ### DUMP
         self.debuger.dump_get(uuid, name)
-        ########
 
         obj = self.get_object(uuid)
         type_name = obj.GetInstanceTypeId().GetName()
@@ -379,14 +382,12 @@ class NS3Wrapper(object):
 
         ### DEBUG
         self.logger.debug("RET GET %s = %s -> get(%s)" % (str(result), uuid, name))
-        ########
 
         return result
 
     def start(self):
-        ### DEBUG
+        ### DUMP
         self.debuger.dump_start()
-        ########
 
         # Launch the simulator thread and Start the
         # simulator in that thread
@@ -400,12 +401,10 @@ class NS3Wrapper(object):
 
         ### DEBUG
         self.logger.debug("START")
-        ########
 
     def stop(self, time = None):
-        ### DEBUG
+        ### DUMP
         self.debuger.dump_stop(time=time)
-        ########
         
         if time is None:
             self.ns3.Simulator.Stop()
@@ -414,12 +413,10 @@ class NS3Wrapper(object):
 
         ### DEBUG
         self.logger.debug("STOP time=%s" % str(time))
-        ########
 
     def shutdown(self):
-        ### DEBUG
+        ### DUMP
         self.debuger.dump_shutdown()
-        ########
 
         while not self.ns3.Simulator.IsFinished():
             #self.logger.debug("Waiting for simulation to finish")
@@ -438,7 +435,6 @@ class NS3Wrapper(object):
 
         ### DEBUG
         self.logger.debug("SHUTDOWN")
-        ########
 
     def _simulator_run(self, condition):
         # Run simulation
index eecbe45..0918c3f 100644 (file)
@@ -71,8 +71,8 @@ class WilabtSfaNode(OMFNode):
         host = Attribute("host", "Name of the physical machine",
                 flags = Flags.Design)
 
-        #disk_image = Attribute("disk_image", "Specify a specific disk image for a node",
-        #        flags = Flags.Design)
+        disk_image = Attribute("disk_image", "Specify a specific disk image for a node",
+                flags = Flags.Design)
         
         cls._register_attribute(username)
         cls._register_attribute(identity)
@@ -83,7 +83,7 @@ class WilabtSfaNode(OMFNode):
         cls._register_attribute(gateway_user)
         cls._register_attribute(gateway)
         cls._register_attribute(host)
-        #cls._register_attribute(disk_image)
+        cls._register_attribute(disk_image)
 
     def __init__(self, ec, guid):
         super(WilabtSfaNode, self).__init__(ec, guid)
@@ -130,10 +130,6 @@ class WilabtSfaNode(OMFNode):
         Based on the attributes defined by the user, discover the suitable 
         node for provision.
         """
-        if self._skip_provision():
-            super(WilabtSfaNode, self).do_discover()
-            return
-
         nodes = self.sfaapi.get_resources_hrn()
 
         host = self._get_host()
@@ -159,10 +155,6 @@ class WilabtSfaNode(OMFNode):
         Add node to user's slice and verifing that the node is functioning
         correctly. Check ssh, omf rc running, hostname, file system.
         """
-        if self._skip_provision():
-            super(WilabtSfaNode, self).do_provision()
-            return
-
         provision_ok = False
         ssh_ok = False
         proc_ok = False
@@ -170,10 +162,10 @@ class WilabtSfaNode(OMFNode):
 
         while not provision_ok:
             node = self._node_to_provision
-            if self._slicenode:
-                self._delete_from_slice()
-                self.debug("Waiting 300 sec for re-adding to slice")
-                time.sleep(300) # Timout for the testbed to allow a new reservation
+            #if self._slicenode:
+            #    self._delete_from_slice()
+            #    self.debug("Waiting 480 sec for re-adding to slice")
+            #    time.sleep(480) # Timout for the testbed to allow a new reservation
             self._add_node_to_slice(node)
             t = 0
             while not self._check_if_in_slice([node]) and t < timeout \
@@ -368,11 +360,11 @@ class WilabtSfaNode(OMFNode):
         """
         self.info(" Adding node to slice ")
         slicename = self.get("slicename")
-        #disk_image = self.get("disk_image")
-        #if disk_image is not None:
-        #    properties = {'disk_image': disk_image}
-        #else: properties = None
-        properties = None
+        disk_image = self.get("disk_image")
+        if disk_image is not None:
+            properties = {'disk_image': disk_image}
+        else: properties = None
+        #properties = None
         self.sfaapi.add_resource_to_slice_batch(slicename, host_hrn, properties=properties)
 
     def _delete_from_slice(self):
index 3594a8c..fb921e9 100644 (file)
@@ -297,11 +297,12 @@ class OVSSwitch(LinuxApplication):
         """
 
         from nepi.resources.planetlab.openvswitch.ovsport import OVSPort
-        rm = self.get_connected(OVSPort.get_rtype())
+        rms = self.get_connected(OVSPort.get_rtype())
 
-        if rm[0].state < ResourceState.RELEASED:
-            self.ec.schedule(reschedule_delay, self.release)
-            return 
+        for rm in rms :
+            if rm.state < ResourceState.RELEASED:
+                self.ec.schedule(reschedule_delay, self.release)
+                return 
             
         cmd = "sliver-ovs del-bridge %s" % self.get('bridge_name')
         (out, err), proc = self.node.run(cmd, self.ovs_checks,
index 6571358..8275249 100644 (file)
@@ -26,6 +26,8 @@ from nepi.resources.planetlab.openvswitch.ovs import OVSSwitch
 from nepi.resources.planetlab.node import PlanetlabNode        
 from nepi.resources.linux.application import LinuxApplication
 
+import os
+
 reschedule_delay = "0.5s"
 
 @clsinit_copy                 
@@ -44,7 +46,7 @@ class OVSPort(LinuxApplication):
     _help = "Runs an OpenVSwitch on a PlanetLab host"
     _backend = "planetlab"
 
-    _authorized_connections = ["OVSSwitch", "OVSTunnel"]      
+    _authorized_connections = ["OVSSwitch", "LinuxUdpTunnel", "LinuxTunnel"]      
 
     @classmethod
     def _register_attributes(cls):
@@ -53,8 +55,16 @@ class OVSPort(LinuxApplication):
         """
         port_name = Attribute("port_name", "Name of the port",
             flags = Flags.Design)                      
+        endpoint_ip = Attribute("endpoint_ip", "IP of the endpoint. This is the attribute " 
+                                "you should use to establish a tunnel or a remote "
+                                "connection between endpoint",
+            flags = Flags.Design)
+        network = Attribute("network", "Network used by the port",
+            flags = Flags.Design)      
 
         cls._register_attribute(port_name)
+        cls._register_attribute(endpoint_ip)
+        cls._register_attribute(network)
 
     def __init__(self, ec, guid):
         """
@@ -65,8 +75,11 @@ class OVSPort(LinuxApplication):
     
         """
         super(OVSPort, self).__init__(ec, guid)
+
+
         self._port_number = None
-        self.port_info = []         
+        # in case of connection by tunnel       
+        self._remote_ip = None    
 
     def log_message(self, msg):
         return " guid %d - OVSPort - %s " % (self.guid, msg)
@@ -90,6 +103,10 @@ class OVSPort(LinuxApplication):
         if ovsswitch: return ovsswitch[0]
         return None
         
+    @property
+    def remote_ip(self):
+        return self._remote_ip
+
     @property
     def port_number(self):
         return self._port_number
@@ -136,12 +153,15 @@ class OVSPort(LinuxApplication):
         self.info("Created the port %s on switch %s" % (self.get('port_name'),
                                              self.ovsswitch.get('bridge_name')))     
            
-    def get_local_end(self):
+    def initiate_udp_connection(self, remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
         """ Get the local_endpoint of the port
         """
 
+        self._remote_ip = remote_endpoint.node.get("ip")
+
         msg = "Discovering the number of the port %s" % self.get('port_name')
-        self.debug(msg)
+        self.info(msg)
 
         command = "sliver-ovs get-local-endpoint %s" % self.get('port_name')
         out = err = ""
@@ -164,39 +184,88 @@ class OVSPort(LinuxApplication):
 
         self.info("The number of the %s is %s" % (self.get('port_name'), 
            self.port_number))
-   
-    def set_port_info(self):
-        """ Set all the information about the port inside a list
-        """
 
-        info = []
-        info.append(self.node.get('hostname'))
+        if remote_endpoint.is_rm_instance("PlanetlabTap"):
+            self._vroute = self.ec.register_resource("PlanetlabVroute")
+            self.ec.set(self._vroute, "action", "add")
+            self.ec.set(self._vroute, "network", self.get("network"))
+
+            print "Vroute Guid :" + str(self._vroute)
+
+            self.ec.register_connection(self._vroute, remote_endpoint.guid)
+            self.ec.deploy(guids=[self._vroute], group = self.deployment_group)
+
+            # For debugging
+            msg = "Route for the tap configured"
+            self.debug(msg)
+
+        return self.port_number
+
 
-        #Return the ip of the node
-        import socket
-        ip = socket.gethostbyname(self.node.get('hostname'))
-        info.append(ip)
+    def establish_udp_connection(self,remote_endpoint, port):
+        establish_connection_command = self._establish_connection_command(port)
 
-        info.append(self.get('port_name'))
-        info.append(self.ovsswitch.get('virtual_ip_pref'))
-        info.append(self.port_number)
-        return info
+        # upload command to connect.sh script
+        shfile = os.path.join(self.app_home, "sw-connect.sh")
+        self.node.upload_command(establish_connection_command,
+                shfile = shfile,
+                overwrite = False)
 
-    def switch_connect_command(self, local_port_name, 
-            remote_ip, remote_port_num):
+        # invoke connect script
+        cmd = "bash %s" % shfile
+        (out, err), proc = self.node.run(cmd, self.run_home,
+                sudo  = True,
+                stdout = "sw_stdout",
+                stderr = "sw_stderr") 
+             
+        # check if execution errors occurred
+        msg = "Failed to connect endpoints "
+        if proc.poll():
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+    
+        # Wait for pid file to be generated
+        self._pid, self._ppid = self.node.wait_pid(self.run_home)
+        
+        # If the process is not running, check for error information
+        # on the remote machine
+        if not self._pid or not self._ppid:
+            (out, err), proc = self.node.check_errors(self.run_home)
+            # Out is what was written in the stderr file
+            if err:
+                msg = " Failed to start command '%s' " % command
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+
+        # For debugging
+        msg = "Connection on port configured"
+        self.debug(msg)
+
+
+    def _establish_connection_command(self, port):
         """ Script to create the connection from a switch to a 
              remote endpoint
         """
+        local_port_name = self.get('port_name')
 
         command = ["sliver-ovs"]
         command.append("set-remote-endpoint ")
         command.append("%s " % local_port_name)
-        command.append("%s " % remote_ip)
-        command.append("%s " % remote_port_num)
+        command.append("%s " % self.remote_ip)
+        command.append("%s " % port)
         command = " ".join(command)
         command = self.replace_paths(command)
         return command
         
+    def verify_connection(self):
+        self.ovsswitch.ovs_status()
+
+    def terminate_connection(self):
+        return True
+
+    def check_status(self):
+        return self.node.status(self._pid, self._ppid)
+
     def do_deploy(self):
         """ Deploy the OVS port after the OVS Switch
         """
@@ -210,25 +279,22 @@ class OVSPort(LinuxApplication):
         self.do_provision()
 
         self.create_port()
-        self.get_local_end()
+        end_ip = self.ovsswitch.get('virtual_ip_pref').split('/')
+        self.set("endpoint_ip", end_ip[0])
 
         #Check the status of the OVS Switch
         self.ovsswitch.ovs_status()
 
-        # Save all the information inside a list
-        self.port_info = self.set_port_info()
-
         super(OVSPort, self).do_deploy()
 
     def do_release(self):
         """ Delete the port on the OVSwitch. It needs to wait for the tunnel
         to be released.
         """
+        from nepi.resources.linux.udptunnel import LinuxUdpTunnel
+        rm = self.get_connected(LinuxUdpTunnel.get_rtype())
 
-        from nepi.resources.planetlab.openvswitch.tunnel import OVSTunnel
-        rm = self.get_connected(OVSTunnel.get_rtype())
-
-        if rm and rm[0].state < ResourceState.RELEASED:
+        if rm and rm[0].state < ResourceState.STOPPED:
             self.ec.schedule(reschedule_delay, self.release)
             return 
             
diff --git a/src/nepi/resources/planetlab/openvswitch/tunnel.py b/src/nepi/resources/planetlab/openvswitch/tunnel.py
deleted file mode 100644 (file)
index 6257565..0000000
+++ /dev/null
@@ -1,386 +0,0 @@
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors: Alina Quereilhac <alina.quereilhac@inria.fr>
-#         Alexandros Kouvakas <alexandros.kouvakas@inria.fr>
-#         Julien Tribino <julien.tribino@inria.fr>
-
-
-from nepi.execution.attribute import Attribute, Flags, Types
-from nepi.execution.resource import ResourceManager, ResourceFactory, clsinit_copy, \
-        ResourceState
-from nepi.resources.linux.application import LinuxApplication
-from nepi.resources.planetlab.node import PlanetlabNode            
-from nepi.resources.planetlab.openvswitch.ovs import OVSSwitch   
-from nepi.util.timefuncs import tnow, tdiffsec    
-from nepi.resources.planetlab.vroute import PlanetlabVroute
-from nepi.resources.planetlab.tap import PlanetlabTap
-
-import os
-import time
-import socket
-
-reschedule_delay = "0.5s"
-
-@clsinit_copy                 
-class OVSTunnel(LinuxApplication):
-    """
-    .. class:: Class Args :
-      
-        :param ec: The Experiment controller
-        :type ec: ExperimentController
-        :param guid: guid of the RM
-        :type guid: int
-        :param creds: Credentials to communicate with the rm 
-        :type creds: dict
-
-    """
-    
-    _rtype = "OVSTunnel"
-    _authorized_connections = ["OVSPort", "PlanetlabTap"]    
-
-    @classmethod
-    def _register_attributes(cls):
-        """ Register the attributes of OVSTunnel RM 
-
-        """
-        network = Attribute("network", "IPv4 Network Address",
-               flags = Flags.Design)
-
-        cipher = Attribute("cipher",
-               "Cipher to encript communication. "
-                "One of PLAIN, AES, Blowfish, DES, DES3. ",
-                default = None,
-                allowed = ["PLAIN", "AES", "Blowfish", "DES", "DES3"],
-                type = Types.Enumerate, 
-                flags = Flags.Design)
-
-        cipher_key = Attribute("cipherKey",
-                "Specify a symmetric encryption key with which to protect "
-                "packets across the tunnel. python-crypto must be installed "
-                "on the system." ,
-                flags = Flags.Design)
-
-        txqueuelen = Attribute("txQueueLen",
-                "Specifies the interface's transmission queue length. "
-                "Defaults to 1000. ", 
-                type = Types.Integer, 
-                flags = Flags.Design)
-
-        bwlimit = Attribute("bwLimit",
-                "Specifies the interface's emulated bandwidth in bytes "
-                "per second.",
-                type = Types.Integer, 
-                flags = Flags.Design)
-
-        cls._register_attribute(network)
-        cls._register_attribute(cipher)
-        cls._register_attribute(cipher_key)
-        cls._register_attribute(txqueuelen)
-        cls._register_attribute(bwlimit)
-
-    def __init__(self, ec, guid):
-        """
-        :param ec: The Experiment controller
-        :type ec: ExperimentController
-        :param guid: guid of the RM
-        :type guid: int
-    
-        """
-        super(OVSTunnel, self).__init__(ec, guid)
-        self._home = "tunnel-%s" % self.guid
-        self.port_info_tunl = []
-        self._pid = None
-        self._ppid = None
-        self._vroute = None
-        self._node_endpoint1 = None
-        self._node_endpoint2 = None
-
-    def log_message(self, msg):
-        return " guid %d - Tunnel - %s " % (self.guid, msg)
-
-    def app_home(self, node):
-        return os.path.join(node.exp_home, self._home)
-
-    def run_home(self, node):
-        return os.path.join(self.app_home(node), self.ec.run_id)
-
-    @property
-    def tap(self):
-        """ Return the Tap RM if it exists """
-        rclass = ResourceFactory.get_resource_type(PlanetlabTap.get_rtype())
-        for guid in self.connections:
-            rm = self.ec.get_resource(guid)
-            if isinstance(rm, rclass):
-                return rm
-
-    @property
-    def ovsswitch(self):
-        """ Return the 1st switch """
-        for guid in self.connections:
-            rm_port = self.ec.get_resource(guid)
-            if hasattr(rm_port, "create_port"):
-                rm_list = rm_port.get_connected(OVSSwitch.get_rtype())
-                if rm_list:
-                    return rm_list[0]
-
-    @property         
-    def check_switch_host_link(self):
-        """ Check if the links are between switches
-            or switch-host. Return False for the latter.
-        """
-        if self.tap :
-            return True
-        return False
-
-
-    def endpoints(self):
-        """ Return the list with the two connected elements.
-        Either Switch-Switch or Switch-Host
-        """
-        connected = [1, 1]
-        position = 0
-        for guid in self.connections:
-            rm = self.ec.get_resource(guid)
-            if hasattr(rm, "create_port"):
-                connected[position] = rm
-                position += 1
-            elif hasattr(rm, "udp_connect_command"):
-                connected[1] = rm
-        return connected
-
-    def get_node(self, endpoint):
-        """ Get the nodes of the endpoint
-        """
-        rm = []
-        if hasattr(endpoint, "create_port"):
-            rm_list = endpoint.get_connected(OVSSwitch.get_rtype())
-            if rm_list:
-                rm = rm_list[0].get_connected(PlanetlabNode.get_rtype())
-        else:
-            rm = endpoint.get_connected(PlanetlabNode.get_rtype())
-
-        if rm :
-            return rm[0]
-
-    @property
-    def endpoint1(self):
-        """ Return the first endpoint : Always a Switch
-        """
-        endpoint = self.endpoints()
-        return endpoint[0]
-
-    @property
-    def endpoint2(self):
-        """ Return the second endpoint : Either a Switch or a TAP
-        """
-        endpoint = self.endpoints()
-        return endpoint[1]
-
-    def get_port_info(self, endpoint1, endpoint2):
-        #TODO : Need to change it. Really bad to have method that return different type of things !!!!!
-        """ Retrieve the port_info list for each port
-       
-        """
-        if self.check_switch_host_link :
-            host0, ip0, pname0, virt_ip0, pnumber0 = endpoint1.port_info
-            return pnumber0
-
-        host0, ip0, pname0, virt_ip0, pnumber0 = endpoint1.port_info
-        host1, ip1, pname1, virt_ip1, pnumber1 = endpoint2.port_info
-
-        return pname0, ip1, pnumber1
-    
-    def wait_local_port(self, node_endpoint):
-        """ Waits until the if_name file for the command is generated, 
-            and returns the if_name for the device """
-
-        local_port = None
-        delay = 1.0
-
-        #TODO : Need to change it with reschedule to avoid the problem 
-        #        of the order of connection
-        for i in xrange(10):
-            (out, err), proc = node_endpoint.check_output(self.run_home(node_endpoint), 'local_port')
-            if out:
-                local_port = int(out)
-                break
-            else:
-                time.sleep(delay)
-                delay = delay * 1.5
-        else:
-            msg = "Couldn't retrieve local_port"
-            self.error(msg, out, err)
-            raise RuntimeError, msg
-
-        return local_port
-
-    def connection(self, local_endpoint, rm_endpoint):
-        """ Create the connect command for each case : 
-              - Host - Switch,  
-              - Switch - Switch,  
-              - Switch - Host
-        """
-        local_node = self.get_node(local_endpoint)
-        local_node.mkdir(self.run_home(local_node))
-
-        rm_node = self.get_node(rm_endpoint)
-        rm_node.mkdir(self.run_home(rm_node))
-
-        # Host to switch
-        if self.check_switch_host_link and local_endpoint == self.endpoint2 :
-        # Collect info from rem_endpoint
-            remote_ip = socket.gethostbyname(rm_node.get("hostname"))
-
-        # Collect info from endpoint
-            local_port_file = os.path.join(self.run_home(local_node), "local_port")
-            rem_port_file = os.path.join(self.run_home(local_node), "remote_port")
-            ret_file = os.path.join(self.run_home(local_node), "ret_file")
-            cipher = self.get("cipher")
-            cipher_key = self.get("cipherKey")
-            bwlimit = self.get("bwLimit")
-            txqueuelen = self.get("txQueueLen")
-
-            rem_port = str(self.get_port_info(rm_endpoint,local_endpoint))
-   
-        # Upload the remote port in a file
-            local_node.upload(rem_port, rem_port_file,
-                 text = True,
-                 overwrite = False)
-       
-            connect_command = local_endpoint.udp_connect_command(
-                 remote_ip, local_port_file, rem_port_file,
-                 ret_file, cipher, cipher_key, bwlimit, txqueuelen) 
-
-            self.connection_command(connect_command, local_node, rm_node)
-
-        # Wait for pid file to be generated
-            self._pid, self._ppid = local_node.wait_pid(self.run_home(local_node))
-
-            if not self._pid or not self._ppid:
-                (out, err), proc = local_node.check_errors(self.run_home(local_node))
-                # Out is what was written in the stderr file
-                if err:
-                    msg = " Failed to start connection of the OVS Tunnel "
-                    self.error(msg, out, err)
-                    raise RuntimeError, msg
-            return
-
-        # Switch to Host
-        if self.check_switch_host_link and local_endpoint == self.endpoint1:
-            local_port_name = local_endpoint.get('port_name')
-            remote_port_num = self.wait_local_port(rm_node)
-            remote_ip = socket.gethostbyname(rm_node.get("hostname"))
-  
-        # Switch to Switch
-        if not self.check_switch_host_link :
-            local_port_name, remote_ip, remote_port_num = self.get_port_info(local_endpoint, rm_endpoint)
-
-        connect_command = local_endpoint.switch_connect_command(
-                    local_port_name, remote_ip, remote_port_num)
-
-        self.connection_command(connect_command, local_node, rm_node)       
-
-    def connection_command(self, command, node_endpoint, rm_node_endpoint):
-        """ Execute the connection command on the node and check if the processus is
-            correctly running on the node.
-        """
-        shfile = os.path.join(self.app_home(node_endpoint), "sw_connect.sh")
-        node_endpoint.upload(command,
-                shfile,
-                text = True,
-                overwrite = False)
-
-        # Invoke connect script
-        out = err= ''       
-        cmd = "bash %s" % shfile
-        (out, err), proc = node_endpoint.run(cmd, self.run_home(node_endpoint),
-                sudo  = True,
-                stdout = "sw_stdout",
-                stderr = "sw_stderr")
-        
-        # Check if execution errors occured
-
-        if proc.poll():
-            msg = "Failed to connect endpoints"
-            self.error(msg, out, err)
-            raise RuntimeError, msg
-
-        # For debugging
-        msg = "Connection on port configured"
-        self.debug(msg)
-
-    def do_provision(self):
-        """ Provision the tunnel
-        """
-        
-        #TODO : The order of the connection is important for now ! 
-        # Need to change the code of wait local port
-        self.connection(self.endpoint2, self.endpoint1)
-        self.connection(self.endpoint1, self.endpoint2)
-
-    def configure_route(self):
-        """ Configure the route for the tap device
-
-            .. note : In case of a conection between a switch and a host, a route
-                      was missing on the node with the Tap Device. This method create
-                      the missing route. 
-        """
-
-        if  self.check_switch_host_link:
-            self._vroute = self.ec.register_resource("PlanetlabVroute")
-            self.ec.set(self._vroute, "action", "add")
-            self.ec.set(self._vroute, "network", self.get("network"))
-
-            self.ec.register_connection(self._vroute, self.tap.guid)
-            self.ec.deploy(guids=[self._vroute], group = self.deployment_group)
-
-    def do_deploy(self):
-        """ Deploy the tunnel after the endpoint get ready
-        """
-        if (not self.endpoint1 or self.endpoint1.state < ResourceState.READY) or \
-            (not self.endpoint2 or self.endpoint2.state < ResourceState.READY):
-            self.ec.schedule(reschedule_delay, self.deploy)
-            return
-
-        self.do_discover()
-        self.do_provision()
-        self.configure_route()
-
-        # Cannot call the deploy of the linux application 
-        #         because of a log error.
-        # Need to investigate if it is right that the tunnel 
-        #    inherits from the linux application
-        #  super(OVSTunnel, self).do_deploy()
-        self.set_ready()
-    def do_release(self):
-        """ Release the tunnel by releasing the Tap Device if exists
-        """
-        if self.check_switch_host_link:
-            # TODO: Make more generic Release method of PLTAP
-            tap_node = self.get_node(self.endpoint2)
-            if self._pid and self._ppid:
-                (out, err), proc = tap_node.kill(self._pid,
-                        self._ppid, sudo = True)
-
-                if err or proc.poll():
-                    msg = " Failed to delete TAP device"
-                    self.error(msg, out, err)
-
-        super(OVSTunnel, self).do_release()
-
index 9188bbb..caac65a 100644 (file)
@@ -61,7 +61,7 @@ def get_options():
         vif_type = vsys.IFF_TUN
 
     return (options.socket_name, options.vif_name, options.slicename, 
-            options.vif_type, options.delete)
+            vif_type, options.delete)
 
 if __name__ == '__main__':
 
index dc6d448..4df138b 100644 (file)
@@ -25,7 +25,6 @@ from nepi.resources.planetlab.node import PlanetlabNode
 from nepi.util.timefuncs import tnow, tdiffsec
 
 import os
-import socket
 import time
 
 PYTHON_VSYS_VERSION = "1.0"
@@ -38,13 +37,15 @@ class PlanetlabTap(LinuxApplication):
 
     @classmethod
     def _register_attributes(cls):
-        ip4 = Attribute("ip4", "IPv4 Address",
+        endpoint_ip = Attribute("endpoint_ip", "IP of the endpoint. This is the attribute " 
+                                "you should use to establish a tunnel or a remote "
+                                "connection between endpoint",
               flags = Flags.Design)
 
         mac = Attribute("mac", "MAC Address",
                 flags = Flags.Design)
 
-        prefix4 = Attribute("prefix4", "IPv4 network prefix",
+        endpoint_prefix = Attribute("endpoint_prefix", "IPv4 network prefix of the endpoint",
                 type = Types.Integer,
                 flags = Flags.Design)
 
@@ -84,9 +85,9 @@ class PlanetlabTap(LinuxApplication):
                 "Bash script to be executed before releasing the resource",
                 flags = Flags.Design)
 
-        cls._register_attribute(ip4)
+        cls._register_attribute(endpoint_ip)
         cls._register_attribute(mac)
-        cls._register_attribute(prefix4)
+        cls._register_attribute(endpoint_prefix)
         cls._register_attribute(mtu)
         cls._register_attribute(devname)
         cls._register_attribute(up)
@@ -106,7 +107,7 @@ class PlanetlabTap(LinuxApplication):
     def node(self):
         node = self.get_connected(PlanetlabNode.get_rtype())
         if node: return node[0]
-        return None
+        raise RuntimeError, "TAP/TUN devices must be connected to Node"
 
     @property
     def gre_enabled(self):
@@ -160,9 +161,8 @@ class PlanetlabTap(LinuxApplication):
         # upload stop.sh script
         stop_command = self.replace_paths(self._stop_command)
 
-        self.node.upload(stop_command,
-                os.path.join(self.app_home, "stop.sh"),
-                text = True,
+        self.node.upload_command(stop_command,
+                shfile = os.path.join(self.app_home, "stop.sh"),
                 # Overwrite file every time. 
                 # The stop.sh has the path to the socket, which should change
                 # on every experiment run.
@@ -266,17 +266,23 @@ class PlanetlabTap(LinuxApplication):
 
         super(PlanetlabTap, self).do_release()
 
-    def wait_vif_name(self):
+    def wait_vif_name(self, exec_run_home = None):
         """ Waits until the vif_name file for the command is generated, 
             and returns the vif_name for the device """
         vif_name = None
         delay = 0.5
 
+        # The vif_name file will be created in the tap-home, while the
+        # current execution home might be elsewhere to check for errors
+        # (e.g. could be a tunnel-home)
+        if not exec_run_home:
+            exec_run_home = self.run_home
+
         for i in xrange(20):
             (out, err), proc = self.node.check_output(self.run_home, "vif_name")
 
             if proc.poll() > 0:
-                (out, err), proc = self.node.check_errors(self.run_home)
+                (out, err), proc = self.node.check_errors(exec_run_home)
                 
                 if err.strip():
                     raise RuntimeError, err
@@ -294,22 +300,112 @@ class PlanetlabTap(LinuxApplication):
 
         return vif_name
 
-    def udp_connect_command(self, remote_endpoint, connection_run_home, 
+    def gre_connect(self, remote_endpoint, connection_app_home,
+            connection_run_home):
+        gre_connect_command = self._gre_connect_command(
+                remote_endpoint, connection_run_home)
+
+        # upload command to connect.sh script
+        shfile = os.path.join(connection_app_home, "gre-connect.sh")
+        self.node.upload_command(gre_connect_command,
+                shfile = shfile,
+                overwrite = False)
+
+        # invoke connect script
+        cmd = "bash %s" % shfile
+        (out, err), proc = self.node.run(cmd, connection_run_home) 
+             
+        # check if execution errors occurred
+        msg = " Failed to connect endpoints "
+        
+        if proc.poll() or err:
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+    
+        # Wait for pid file to be generated
+        pid, ppid = self.node.wait_pid(connection_run_home)
+        
+        # If the process is not running, check for error information
+        # on the remote machine
+        if not pid or not ppid:
+            (out, err), proc = self.node.check_errors(connection_run_home)
+            # Out is what was written in the stderr file
+            if err:
+                msg = " Failed to start command '%s' " % command
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+        
+        # After creating the TAP, the pl-vif-create.py script
+        # will write the name of the TAP to a file. We wait until
+        # we can read the interface name from the file.
+        vif_name = self.wait_vif_name(exec_run_home = connection_run_home)
+        self.set("deviceName", vif_name) 
+
+        return True
+
+
+    def initiate_udp_connection(self, remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+        port = self.udp_connect(remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen)
+        return port
+
+
+    def udp_connect(self, remote_endpoint, connection_app_home, 
+            connection_run_home, cipher, cipher_key, bwlimit, txqueuelen):
+        udp_connect_command = self._udp_connect_command(
+                remote_endpoint, connection_run_home,
+                cipher, cipher_key, bwlimit, txqueuelen)
+
+        # upload command to connect.sh script
+        shfile = os.path.join(self.app_home, "udp-connect.sh")
+        self.node.upload_command(udp_connect_command,
+                shfile = shfile,
+                overwrite = False)
+
+        # invoke connect script
+        cmd = "bash %s" % shfile
+        (out, err), proc = self.node.run(cmd, self.run_home) 
+             
+        # check if execution errors occurred
+        msg = "Failed to connect endpoints "
+        
+        if proc.poll():
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+    
+        # Wait for pid file to be generated
+        self._pid, self._ppid = self.node.wait_pid(self.run_home)
+        
+        # If the process is not running, check for error information
+        # on the remote machine
+        if not self._pid or not self._ppid:
+            (out, err), proc = self.node.check_errors(self.run_home)
+            # Out is what was written in the stderr file
+            if err:
+                msg = " Failed to start command '%s' " % command
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+
+        port = self.wait_local_port()
+
+        return port
+
+    def _udp_connect_command(self, remote_endpoint, connection_run_home, 
             cipher, cipher_key, bwlimit, txqueuelen):
 
         # Set the remote endpoint
-        self.set("pointopoint", remote_endpoint.get("ip4"))
+        self.set("pointopoint", remote_endpoint.get("endpoint_ip"))
 
-        remote_ip = socket.gethostbyname(
-                remote_endpoint.node.get("hostname"))
+        remote_ip = remote_endpoint.node.get("ip")
 
-        local_port_file = os.path.join(connection_run_home, 
+        local_port_file = os.path.join(self.run_home, 
                 "local_port")
 
-        remote_port_file = os.path.join(connection_run_home, 
+        remote_port_file = os.path.join(self.run_home, 
                 "remote_port")
 
-        ret_file = os.path.join(connection_run_home, 
+        ret_file = os.path.join(self.run_home, 
                 "ret_file")
 
         # Generate UDP connect command
@@ -346,11 +442,71 @@ class PlanetlabTap(LinuxApplication):
 
         return command
 
-    def gre_connect_command(self, remote_endpoint, connection_run_home): 
+    def establish_udp_connection(self, remote_endpoint, port):
+        # upload remote port number to file
+        rem_port = "%s\n" % port
+        self.node.upload(rem_port,
+                os.path.join(self.run_home, "remote_port"),
+                text = True, 
+                overwrite = False)
+
+    def verify_connection(self):
+        self.wait_result()
+
+    def terminate_connection(self):
+        if  self._pid and self._ppid:
+            (out, err), proc = self.node.kill(self._pid, self._ppid, 
+                    sudo = True) 
+
+            # check if execution errors occurred
+            if proc.poll() and err:
+                msg = " Failed to Kill the Tap"
+                self.error(msg, out, err)
+                raise RuntimeError, msg
+
+    def check_status(self):
+        return self.node.status(self._pid, self._ppid)
+
+    def wait_local_port(self):
+        """ Waits until the local_port file for the endpoint is generated, 
+        and returns the port number 
+        
+        """
+        return self.wait_file("local_port")
+
+    def wait_result(self):
+        """ Waits until the return code file for the endpoint is generated 
+        
+        """ 
+        return self.wait_file("ret_file")
+    def wait_file(self, filename):
+        """ Waits until file on endpoint is generated """
+        result = None
+        delay = 1.0
+
+        for i in xrange(20):
+            (out, err), proc = self.node.check_output(
+                    self.run_home, filename)
+            if out:
+                result = out.strip()
+                break
+            else:
+                time.sleep(delay)
+                delay = delay * 1.5
+        else:
+            msg = "Couldn't retrieve %s" % filename
+            self.error(msg, out, err)
+            raise RuntimeError, msg
+
+        return result
+
+
+
+    def _gre_connect_command(self, remote_endpoint, connection_run_home): 
         # Set the remote endpoint
-        self.set("pointopoint", remote_endpoint.get("ip4"))
-        self.set("greRemote", socket.gethostbyname(
-            remote_endpoint.node.get("hostname")))
+        self.set("pointopoint", remote_endpoint.get("endpoint_ip"))
+        self.set("greRemote", remote_endpoint.node.get("ip"))
 
         # Generate GRE connect command
 
@@ -371,6 +527,7 @@ class PlanetlabTap(LinuxApplication):
 
         return command
 
+
     @property
     def _start_command(self):
         if self.gre_enabled:
@@ -379,8 +536,8 @@ class PlanetlabTap(LinuxApplication):
             command = ["sudo -S python ${SRC}/pl-vif-create.py"]
             
             command.append("-t %s" % self.vif_type)
-            command.append("-a %s" % self.get("ip4"))
-            command.append("-n %d" % self.get("prefix4"))
+            command.append("-a %s" % self.get("endpoint_ip"))
+            command.append("-n %d" % self.get("endpoint_prefix"))
             command.append("-f %s " % self.vif_name_file)
             command.append("-S %s " % self.sock_name)
 
@@ -422,8 +579,8 @@ class PlanetlabTap(LinuxApplication):
         command.append("-u %s" % self.node.get("username"))
         command.append("-N %s" % device_name)
         command.append("-t %s" % self.vif_type)
-        command.append("-a %s" % self.get("ip4"))
-        command.append("-n %d" % self.get("prefix4"))
+        command.append("-a %s" % self.get("endpoint_ip"))
+        command.append("-n %d" % self.get("endpoint_prefix"))
 
         if self.get("snat") == True:
             command.append("-s")
index 7007561..ad0b83e 100644 (file)
@@ -148,7 +148,7 @@ class PlanetlabVroute(LinuxApplication):
         command = ["sudo -S python ${SRC}/pl-vroute.py"]
         command.append("-a %s" % self.get("action"))
         command.append("-n %s" % self.get("network"))
-        command.append("-p %d" % self.tap.get("prefix4"))
+        command.append("-p %d" % self.tap.get("endpoint_prefix"))
         command.append("-g %s" % self.tap.get("pointopoint"))
         command.append("-f %s" % self.tap.get("deviceName"))
         return " ".join(command)
@@ -158,7 +158,7 @@ class PlanetlabVroute(LinuxApplication):
         command = ["sudo -S python ${SRC}/pl-vroute.py"]
         command.append("-a %s" % "del")
         command.append("-n %s" % self.get("network"))
-        command.append("-p %d" % self.tap.get("prefix4"))
+        command.append("-p %d" % self.tap.get("endpoint_prefix"))
         command.append("-g %s" % self.tap.get("pointopoint"))
         command.append("-f %s" % self.tap.get("deviceName"))
         return " ".join(command)
index 522a174..44624a2 100644 (file)
 # Should it be made thread-safe?
 class GuidGenerator(object):
     def __init__(self):
-        self._guids = list()
+        self._last_guid = 0
 
     def next(self, guid = None):
-        if guid != None:
-            return guid
-        else:
-            last_guid = 0 if len(self._guids) == 0 else self._guids[-1]
-            guid = last_guid + 1 
-        self._guids.append(guid)
-        self._guids.sort()
+        if guid == None:
+            guid = self._last_guid + 1
+
+        self._last_guid = self._last_guid if guid <= self._last_guid else guid
+
         return guid
 
diff --git a/src/nepi/util/netgraph.py b/src/nepi/util/netgraph.py
new file mode 100644 (file)
index 0000000..4b8c06c
--- /dev/null
@@ -0,0 +1,344 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import ipaddr
+import networkx
+import math
+import random
+
+class TopologyType:
+    LINEAR = "linear"
+    LADDER = "ladder"
+    MESH = "mesh"
+    TREE = "tree"
+    STAR = "star"
+    ADHOC = "adhoc"
+
+## TODO: 
+##      - AQ: Add support for hypergraphs (to be able to add hyper edges to 
+##        model CSMA or wireless networks)
+
+class NetGraph(object):
+    """ NetGraph represents a network topology. 
+    Network graphs are internally using the networkx library.
+
+    """
+
+    def __init__(self, **kwargs):
+        """ A graph can be generated using a specified pattern 
+        (LADDER, MESH, TREE, etc), or provided as an argument.
+
+            :param topology: Undirected graph to use as internal representation 
+            :type topology: networkx.Graph
+
+            :param topo_type: One of TopologyType.{LINEAR,LADDER,MESH,TREE,STAR}
+            used to automatically generate the topology graph. 
+            :type topo_type: TopologyType
+
+            :param node_count: Number of nodes in the topology to be generated. 
+            :type node_count: int
+
+            :param branches: Number of branches (arms) for the STAR topology. 
+            :type branches: int
+
+
+            :param assign_ips: Automatically assign IP addresses to each node. 
+            :type assign_ips: bool
+
+            :param network: Base network segment for IP address assignment.
+            :type network: str
+
+            :param prefix: Base network prefix for IP address assignment.
+            :type prefix: int
+
+            :param version: IP version for IP address assignment.
+            :type version: int
+
+            :param assign_st: Select source and target nodes on the graph.
+            :type assign_st: bool
+
+           :param sources_targets: dictionary with the list of sources (key =
+            "sources") and list of targets (key = "targets") if defined, ignore
+            assign_st
+           :type sources_targets: dictionary of lists
+
+           :param leaf_source: if True, random sources will be selected only 
+            from leaf nodes.
+           :type leaf_source: bool
+
+        NOTE: Only point-to-point like network topologies are supported for now.
+                (Wireless and Ethernet networks were several nodes share the same
+                edge (hyperedge) can not be modeled for the moment).
+
+        """
+        self._topology = kwargs.get("topology")
+        self._topo_type = kwargs.get("topo_type", TopologyType.ADHOC)
+
+        if not self.topology:
+            if kwargs.get("node_count"):
+                node_count = kwargs["node_count"]
+                branches = kwargs.get("branches")
+
+                self._topology = self.generate_topology(self.topo_type, 
+                        node_count, branches = branches)
+            else:
+                self._topology = networkx.Graph()
+
+        if kwargs.get("assign_ips"):
+            network = kwargs.get("network", "10.0.0.0")
+            prefix = kwargs.get("prefix", 8)
+            version = kwargs.get("version", 4)
+
+            self.assign_p2p_ips(network = network, prefix = prefix, 
+                    version = version)
+
+       sources_targets = kwargs.get("sources_targets")
+       if sources_targets:
+            [self.set_source(n) for n in sources_targets["sources"]]
+           [self.set_target(n) for n in sources_targets["targets"]]
+       elif kwargs.get("assign_st"):
+            self.select_target_zero()
+            self.select_random_source(is_leaf = kwargs.get("leaf_source"))
+
+    @property
+    def topology(self):
+        return self._topology
+
+    @property
+    def topo_type(self):
+        return self._topo_type
+
+    @property
+    def order(self):
+        return self.topology.order()
+
+    def nodes(self):
+        return self.topology.nodes()
+
+    def edges(self):
+        return self.topology.edges()
+
+    def generate_topology(self, topo_type, node_count, branches = None):
+        if topo_type == TopologyType.LADDER:
+            total_nodes = node_count/2
+            graph = networkx.ladder_graph(total_nodes)
+
+        elif topo_type == TopologyType.LINEAR:
+            graph = networkx.path_graph(node_count)
+
+        elif topo_type == TopologyType.MESH:
+            graph = networkx.complete_graph(node_count)
+
+        elif topo_type == TopologyType.TREE:
+            h = math.log(node_count + 1)/math.log(2) - 1
+            graph = networkx.balanced_tree(2, h)
+
+        elif topo_type == TopologyType.STAR:
+            graph = networkx.Graph()
+            graph.add_node(0)
+
+            nodesinbranch = (node_count - 1)/ BRANCHES
+            c = 1
+
+            for i in xrange(BRANCHES):
+                prev = 0
+                for n in xrange(1, nodesinbranch + 1):
+                    graph.add_node(c)
+                    graph.add_edge(prev, c)
+                    prev = c
+                    c += 1
+
+        return graph
+
+    def add_node(self, nid):
+        if nid not in self.topology: 
+            self.topology.add_node(nid)
+
+    def add_edge(self, nid1, nid2):
+        self.add_node(nid1)
+        self.add_node( nid2)
+
+        if nid1 not in self.topology[nid2]:
+            self.topology.add_edge(nid2, nid1)
+
+    def annotate_node_ip(self, nid, ip):
+        if "ips" not in self.topology.node[nid]:
+            self.topology.node[nid]["ips"] = list()
+
+        self.topology.node[nid]["ips"].append(ip)
+    def node_ip_annotations(self, nid):
+        return self.topology.node[nid].get("ips", [])
+   
+    def annotate_node(self, nid, name, value):
+        if not isinstance(value, str) and not isinstance(value, int) and \
+                not isinstance(value, float) and not isinstance(value, bool):
+            raise RuntimeError, "Non-serializable annotation"
+
+        self.topology.node[nid][name] = value
+    
+    def node_annotation(self, nid, name):
+        return self.topology.node[nid].get(name)
+
+    def node_annotations(self, nid):
+        return self.topology.node[nid].keys()
+    
+    def del_node_annotation(self, nid, name):
+        del self.topology.node[nid][name]
+
+    def annotate_edge(self, nid1, nid2, name, value):
+        if not isinstance(value, str) and not isinstance(value, int) and \
+                not isinstance(value, float) and not isinstance(value, bool):
+            raise RuntimeError, "Non-serializable annotation"
+
+        self.topology.edge[nid1][nid2][name] = value
+   
+    def annotate_edge_net(self, nid1, nid2, ip1, ip2, mask, network, 
+            prefixlen):
+        self.topology.edge[nid1][nid2]["net"] = dict()
+        self.topology.edge[nid1][nid2]["net"][nid1] = ip1
+        self.topology.edge[nid1][nid2]["net"][nid2] = ip2
+        self.topology.edge[nid1][nid2]["net"]["mask"] = mask
+        self.topology.edge[nid1][nid2]["net"]["network"] = network
+        self.topology.edge[nid1][nid2]["net"]["prefix"] = prefixlen
+
+    def edge_net_annotation(self, nid1, nid2):
+        return self.topology.edge[nid1][nid2].get("net", dict())
+    def edge_annotation(self, nid1, nid2, name):
+        return self.topology.edge[nid1][nid2].get(name)
+    def edge_annotations(self, nid1, nid2):
+        return self.topology.edge[nid1][nid2].keys()
+    
+    def del_edge_annotation(self, nid1, nid2, name):
+        del self.topology.edge[nid1][nid2][name]
+
+    def assign_p2p_ips(self, network = "10.0.0.0", prefix = 8, version = 4):
+        """ Assign IP addresses to each end of each edge of the network graph,
+        computing all the point to point subnets and addresses in the network
+        representation.
+
+            :param network: Base network address used for subnetting. 
+            :type network: str
+
+            :param prefix: Prefix for the base network address used for subnetting.
+            :type prefixt: int
+
+            :param version: IP version (either 4 or 6).
+            :type version: int
+
+        """
+        if networkx.number_connected_components(self.topology) > 1:
+            raise RuntimeError("Disconnected graph!!")
+
+        # Assign IP addresses to host
+        netblock = "%s/%d" % (network, prefix)
+        if version == 4:
+            net = ipaddr.IPv4Network(netblock)
+            new_prefix = 30
+        elif version == 6:
+            net = ipaddr.IPv6Network(netblock)
+            new_prefix = 30
+        else:
+            raise RuntimeError, "Invalid IP version %d" % version
+        
+        ## Clear all previusly assigned IPs
+        for nid in self.topology.nodes():
+            self.topology.node[nid]["ips"] = list()
+
+        ## Generate and assign new IPs
+        sub_itr = net.iter_subnets(new_prefix = new_prefix)
+        
+        for nid1, nid2 in self.topology.edges():
+            #### Compute subnets for each link
+            
+            # get a subnet of base_add with prefix /30
+            subnet = sub_itr.next()
+            mask = subnet.netmask.exploded
+            network = subnet.network.exploded
+            prefixlen = subnet.prefixlen
+
+            # get host addresses in that subnet
+            i = subnet.iterhosts()
+            addr1 = i.next()
+            addr2 = i.next()
+
+            ip1 = addr1.exploded
+            ip2 = addr2.exploded
+            self.annotate_edge_net(nid1, nid2, ip1, ip2, mask, network, 
+                    prefixlen)
+
+            self.annotate_node_ip(nid1, ip1)
+            self.annotate_node_ip(nid2, ip2)
+
+    def get_p2p_info(self, nid1, nid2):
+        net = self.topology.edge[nid1][nid2]["net"]
+        return ( net[nid1], net[nid2], net["mask"], net["network"], 
+                net["prefixlen"] )
+
+    def set_source(self, nid):
+        self.topology.node[nid]["source"] = True
+
+    def is_source(self, nid):
+        return self.topology.node[nid].get("source")
+
+    def set_target(self, nid):
+        self.topology.node[nid]["target"] = True
+
+    def is_target(self, nid):
+        return self.topology.node[nid].get("target")
+
+    def targets(self):
+        """ Returns the nodes that are targets """
+        return [nid for nid in self.topology.nodes() \
+                if self.topology.node[nid].get("target")]
+
+    def sources(self):
+        """ Returns the nodes that are sources """
+        return [nid for nid in self.topology.nodes() \
+                if self.topology.node[nid].get("source")]
+
+    def select_target_zero(self):
+        """ Mark the node 0 as target
+        """
+        nid = 0 if 0 in self.topology.nodes() else "0"
+        self.set_target(nid)
+
+    def select_random_source(self, **kwargs):
+        """  Mark a random node as source. 
+        """
+
+        # The ladder is a special case because is not symmetric.
+        if self.topo_type == TopologyType.LADDER:
+            total_nodes = self.order/2
+            leaf1 = total_nodes
+            leaf2 = total_nodes - 1
+            leaves = [leaf1, leaf2]
+            source = leaves.pop(random.randint(0, len(leaves) - 1))
+        else:
+            # options must not be already sources or targets
+            options = [ k for k,v in self.topology.degree().iteritems() \
+                    if (not kwargs.get("is_leaf") or v == 1)  \
+                        and not self.topology.node[k].get("source") \
+                        and not self.topology.node[k].get("target")]
+            source = options.pop(random.randint(0, len(options) - 1))
+        
+        self.set_source(source)
+
diff --git a/src/nepi/util/parser.py b/src/nepi/util/parser.py
deleted file mode 100644 (file)
index 58cb79b..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-from nepi.design.box import Box
-
-from xml.dom import minidom
-import sys
-
-STRING = "string"
-BOOL = "bool"
-INTEGER = "integer"
-DOUBLE = "float"
-
-def xmlencode(s):
-    if isinstance(s, str):
-        rv = s.decode("latin1")
-    elif not isinstance(s, unicode):
-        rv = unicode(s)
-    else:
-        rv = s
-    return rv.replace(u'\x00',u'&#0000;')
-
-def xmldecode(s):
-    return s.replace(u'&#0000',u'\x00').encode("utf8")
-
-def from_type(value):
-    if isinstance(value, str):
-        return STRING
-    if isinstance(value, bool):
-        return BOOL
-    if isinstance(value, int):
-        return INTEGER
-    if isinstance(value, float):
-        return DOUBLE
-
-def to_type(type, value):
-    if type == STRING:
-        return str(value)
-    if type == BOOL:
-        return value == "True"
-    if type == INTEGER:
-        return int(value)
-    if type == DOUBLE:
-        return float(value)
-
-class XMLParser(object):
-    def to_xml(self, box):
-        doc = minidom.Document()
-
-        root = doc.createElement("boxes")
-        doc.appendChild(root)
-
-        traversed = dict()
-        self._traverse_boxes(doc, traversed, box)
-
-        # Keep the order
-        for guid in sorted(traversed.keys()):
-            bnode = traversed[guid]
-            root.appendChild(bnode)
-       
-        try:
-            xml = doc.toprettyxml(indent="    ", encoding="UTF-8")
-        except:
-            print >>sys.stderr, "Oops: generating XML from %s" % (data,)
-            raise
-        
-        return xml
-
-    def _traverse_boxes(self, doc, traversed, box):
-        bnode = doc.createElement("box")
-        bnode.setAttribute("guid", xmlencode(box.guid))
-        bnode.setAttribute("label", xmlencode(box.label))
-        bnode.setAttribute("x", xmlencode(box.x))
-        bnode.setAttribute("y", xmlencode(box.y))
-        bnode.setAttribute("width", xmlencode(box.width))
-        bnode.setAttribute("height", xmlencode(box.height))
-
-        traversed[box.guid] = bnode
-
-        anode = doc.createElement("attributes")
-        bnode.appendChild(anode)
-        for name in sorted(box.attributes):
-            value = getattr(box.a, name)
-            aanode = doc.createElement("attribute")
-            anode.appendChild(aanode)
-            aanode.setAttribute("name", xmlencode(name))
-            aanode.setAttribute("value", xmlencode(value))
-            aanode.setAttribute("type", from_type(value))
-
-        tnode = doc.createElement("tags")
-        bnode.appendChild(tnode)
-        for tag in sorted(box.tags):
-            ttnode = doc.createElement("tag")
-            tnode.appendChild(ttnode)
-            ttnode.setAttribute("name", xmlencode(tag))
-
-        cnode = doc.createElement("connections")
-        bnode.appendChild(cnode)
-        for b in sorted(box.connections):
-            ccnode = doc.createElement("connection")
-            cnode.appendChild(ccnode)
-            ccnode.setAttribute("guid", xmlencode(b.guid))
-            if b.guid not in traversed:
-                self._traverse_boxes(doc, traversed, b)
-
-    def from_xml(self, xml):
-        doc = minidom.parseString(xml)
-        bnode_list = doc.getElementsByTagName("box")
-
-        boxes = dict()
-        connections = dict()
-
-        for bnode in bnode_list:
-            if bnode.nodeType == doc.ELEMENT_NODE:
-                guid = int(bnode.getAttribute("guid"))
-                label = xmldecode(bnode.getAttribute("label"))
-                x = float(bnode.getAttribute("x"))
-                y = float(bnode.getAttribute("y"))
-                height = float(bnode.getAttribute("height"))
-                width = float(bnode.getAttribute("width"))
-                box = Box(label=label, guid=guid)
-                boxes[guid] = box
-
-                anode_list = bnode.getElementsByTagName("attribute") 
-                for anode in anode_list:
-                    name = xmldecode(anode.getAttribute("name"))
-                    value = xmldecode(anode.getAttribute("value"))
-                    type = xmldecode(anode.getAttribute("type"))
-                    value = to_type(type, value)
-                    setattr(box.a, name, value)
-                    
-                tnode_list = bnode.getElementsByTagName("tag") 
-                for tnode in tnode_list:
-                    value = xmldecode(tnode.getAttribute("name"))
-                    box.tadd(value)
-
-                connections[box] = set()
-                cnode_list = bnode.getElementsByTagName("connection")
-                for cnode in cnode_list:
-                    guid = int(cnode.getAttribute("guid"))
-                    connections[box].add(guid)
-
-        for box, conns in connections.iteritems():
-            for guid in conns:
-                b = boxes[guid]
-                box.connect(b)
-
-        return box
-
diff --git a/src/nepi/util/parsers/__init__.py b/src/nepi/util/parsers/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/util/parsers/xml_parser.py b/src/nepi/util/parsers/xml_parser.py
new file mode 100644 (file)
index 0000000..4f3f1d9
--- /dev/null
@@ -0,0 +1,458 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.util.netgraph import NetGraph, TopologyType 
+from nepi.util.timefuncs import stformat, tsformat
+
+from xml.dom import minidom
+
+import datetime
+import sys
+import os
+
+STRING = "string"
+BOOL = "bool"
+INTEGER = "integer"
+DOUBLE = "float"
+
+def xmlencode(s):
+    if isinstance(s, str):
+        rv = s.decode("latin1")
+    if isinstance(s, datetime.datetime):
+        rv = tsformat(s)
+    elif not isinstance(s, unicode):
+        rv = unicode(s)
+    else:
+        rv = s
+    return rv.replace(u'\x00',u'&#0000;')
+
+def xmldecode(s, cast = str):
+    ret = s.replace(u'&#0000',u'\x00').encode("ascii")
+    ret = cast(ret)
+    if s == "None":
+        return None
+    return ret
+
+def from_type(value):
+    if isinstance(value, bool):
+        return BOOL
+    if isinstance(value, int):
+        return INTEGER
+    if isinstance(value, float):
+        return DOUBLE
+
+    return STRING
+
+def to_type(type, value):
+    if not value:
+        return value
+
+    if type == STRING:
+        return str(value)
+    if type == BOOL:
+        return value == "True"
+    if type == INTEGER:
+        return int(value)
+    if type == DOUBLE:
+        return float(value)
+
+class ECXMLParser(object):
+    def to_xml(self, ec):
+        
+        doc = minidom.Document()
+        
+        self._ec_to_xml(doc, ec)
+       
+        try:
+            xml = doc.toprettyxml(indent="    ", encoding="UTF-8")
+        except:
+            print >>sys.stderr, "Oops: generating XML from %s" % (data,)
+            raise
+        
+        return xml
+
+    def _ec_to_xml(self, doc, ec):
+        ecnode = doc.createElement("experiment")
+        ecnode.setAttribute("exp_id", xmlencode(ec.exp_id))
+        ecnode.setAttribute("run_id", xmlencode(ec.run_id))
+        ecnode.setAttribute("nthreads", xmlencode(ec.nthreads))
+        ecnode.setAttribute("local_dir", xmlencode(ec.local_dir))
+        doc.appendChild(ecnode)
+
+        if ec.netgraph != None:
+            self._netgraph_to_xml(doc, ecnode, ec)
+
+        rmsnode = doc.createElement("rms")
+        ecnode.appendChild(rmsnode)
+
+        for guid, rm in ec._resources.iteritems():
+            self._rm_to_xml(doc, rmsnode, ec, guid, rm)
+
+        return doc
+    
+    def _netgraph_to_xml(self, doc, ecnode, ec):
+        ngnode = doc.createElement("topology")
+        ngnode.setAttribute("topo-type", xmlencode(ec.netgraph.topo_type))
+        ecnode.appendChild(ngnode)
+        
+        self. _netgraph_nodes_to_xml(doc, ngnode, ec)
+        self. _netgraph_edges_to_xml(doc, ngnode, ec)
+        
+    def _netgraph_nodes_to_xml(self, doc, ngnode, ec):
+        ngnsnode = doc.createElement("nodes")
+        ngnode.appendChild(ngnsnode)
+
+        for nid in ec.netgraph.nodes():
+            ngnnode = doc.createElement("node")
+            ngnnode.setAttribute("nid", xmlencode(nid))
+            ngnnode.setAttribute("nid-type", from_type(nid))
+            ngnsnode.appendChild(ngnnode)
+
+            # Mark ources and targets
+            if ec.netgraph.is_source(nid):
+                ngnnode.setAttribute("source", xmlencode(True))
+
+            if ec.netgraph.is_target(nid):
+                ngnnode.setAttribute("target", xmlencode(True))
+
+            # Node annotations
+            annosnode = doc.createElement("node-annotations")
+            add_annotations = False
+            for name in ec.netgraph.node_annotations(nid):
+                add_annotations = True
+                value = ec.netgraph.node_annotation(nid, name)
+                annonode = doc.createElement("node-annotation")
+                annonode.setAttribute("name", xmlencode(name))
+                annonode.setAttribute("value", xmlencode(value))
+                annonode.setAttribute("type", from_type(value))
+                annosnode.appendChild(annonode)
+
+            if add_annotations:
+                ngnnode.appendChild(annosnode)
+
+    def _netgraph_edges_to_xml(self, doc, ngnode, ec):
+        ngesnode = doc.createElement("edges")
+        ngnode.appendChild(ngesnode)
+
+        for nid1, nid2 in ec.netgraph.edges():
+            ngenode = doc.createElement("edge")
+            ngenode.setAttribute("nid1", xmlencode(nid1))
+            ngenode.setAttribute("nid1-type", from_type(nid1))
+            ngenode.setAttribute("nid2", xmlencode(nid2))
+            ngenode.setAttribute("nid2-type", from_type(nid2))
+            ngesnode.appendChild(ngenode)
+
+            # Edge annotations
+            annosnode = doc.createElement("edge-annotations")
+            add_annotations = False
+            for name in ec.netgraph.edge_annotations(nid1, nid2):
+                add_annotations = True
+                value = ec.netgraph.edge_annotation(nid1, nid2, name)
+                annonode = doc.createElement("edge-annotation")
+                annonode.setAttribute("name", xmlencode(name))
+                annonode.setAttribute("value", xmlencode(value))
+                annonode.setAttribute("type", from_type(value))
+                annosnode.appendChild(annonode)
+
+            if add_annotations:
+                ngenode.appendChild(annosnode)
+
+    def _rm_to_xml(self, doc, rmsnode, ec, guid, rm):
+        rmnode = doc.createElement("rm")
+        rmnode.setAttribute("guid", xmlencode(guid))
+        rmnode.setAttribute("rtype", xmlencode(rm._rtype))
+        rmnode.setAttribute("state", xmlencode(rm._state))
+        if rm._start_time:
+            rmnode.setAttribute("start_time", xmlencode(rm._start_time))
+        if rm._stop_time:
+            rmnode.setAttribute("stop_time", xmlencode(rm._stop_time))
+        if rm._discover_time:
+            rmnode.setAttribute("discover_time", xmlencode(rm._discover_time))
+        if rm._provision_time:    
+            rmnode.setAttribute("provision_time", xmlencode(rm._provision_time))
+        if rm._ready_time:
+            rmnode.setAttribute("ready_time", xmlencode(rm._ready_time))
+        if rm._release_time:
+            rmnode.setAttribute("release_time", xmlencode(rm._release_time))
+        if rm._failed_time:
+            rmnode.setAttribute("failed_time", xmlencode(rm._failed_time))
+        rmsnode.appendChild(rmnode)
+
+        anode = doc.createElement("attributes")
+        attributes = False
+
+        for attr in rm._attrs.values():
+            if attr.has_changed:
+                attributes = True
+                aanode = doc.createElement("attribute")
+                aanode.setAttribute("name", xmlencode(attr.name))
+                aanode.setAttribute("value", xmlencode(attr.value))
+                aanode.setAttribute("type", from_type(attr.value))
+                anode.appendChild(aanode)
+    
+        if attributes: 
+            rmnode.appendChild(anode)
+
+        cnode = doc.createElement("connections")
+        connections = False
+        
+        for guid in rm._connections:
+            connections = True
+            ccnode = doc.createElement("connection")
+            ccnode.setAttribute("guid", xmlencode(guid))
+            cnode.appendChild(ccnode)
+        
+        if connections:
+           rmnode.appendChild(cnode)
+
+        cnnode = doc.createElement("conditions")
+        conditions = False
+
+        for action, conds in rm._conditions.iteritems():
+            conditions = True
+            for (group, state, time) in conds:
+                ccnnode = doc.createElement("condition")
+                ccnnode.setAttribute("action", xmlencode(action))
+                ccnnode.setAttribute("group", xmlencode(group))
+                ccnnode.setAttribute("state", xmlencode(state))
+                ccnnode.setAttribute("time", xmlencode(time))
+                cnnode.appendChild(ccnnode)
+        
+        if conditions:
+           rmnode.appendChild(cnnode)
+
+        tnode = doc.createElement("traces")
+        traces = False
+
+        for trace in rm._trcs.values():
+            if trace.enabled:
+                traces = True
+                ttnode = doc.createElement("trace")
+                ttnode.setAttribute("name", xmlencode(trace.name))
+                tnode.appendChild(ttnode)
+    
+        if traces: 
+            rmnode.appendChild(tnode)
+
+    def from_xml(self, xml):
+        doc = minidom.parseString(xml)
+        return self._ec_from_xml(doc)
+
+    def _ec_from_xml(self, doc):
+        from nepi.execution.ec import ExperimentController
+        ec = None
+        
+        ecnode_list = doc.getElementsByTagName("experiment")
+        for ecnode in ecnode_list:
+            if ecnode.nodeType == doc.ELEMENT_NODE:
+                exp_id = xmldecode(ecnode.getAttribute("exp_id"))
+                run_id = xmldecode(ecnode.getAttribute("run_id"))
+                local_dir = xmldecode(ecnode.getAttribute("local_dir"))
+
+                # Configure number of preocessing threads
+                nthreads = xmldecode(ecnode.getAttribute("nthreads"))
+                os.environ["NEPI_NTHREADS"] = nthreads
+
+                # Deserialize netgraph
+                topology = None
+                topo_type = None
+
+                netgraph = self._netgraph_from_xml(doc, ecnode)
+                
+                if netgraph:
+                    topo_type = netgraph.topo_type
+                    topology = netgraph.topology
+
+                # Instantiate EC
+                ec = ExperimentController(exp_id = exp_id, local_dir = local_dir, 
+                        topology = topology, topo_type = topo_type)
+
+                connections = set()
+
+                rmsnode_list = ecnode.getElementsByTagName("rms")
+                if rmsnode_list:
+                    rmnode_list = rmsnode_list[0].getElementsByTagName("rm") 
+                    for rmnode in rmnode_list:
+                        if rmnode.nodeType == doc.ELEMENT_NODE:
+                            self._rm_from_xml(doc, rmnode, ec, connections)
+
+                for (guid1, guid2) in connections:
+                    ec.register_connection(guid1, guid2)
+
+                break
+
+        return ec
+
+    def _netgraph_from_xml(self, doc, ecnode):
+        netgraph = None
+
+        topology = ecnode.getElementsByTagName("topology")
+        if topology:
+            topology = topology[0]
+            topo_type = xmldecode(topology.getAttribute("topo-type"))
+
+            netgraph = NetGraph(topo_type = topo_type)
+
+            ngnsnode_list = topology.getElementsByTagName("nodes")
+            if ngnsnode_list:
+                ngnsnode = ngnsnode_list[0].getElementsByTagName("node") 
+                for ngnnode in ngnsnode:
+                    nid = xmldecode(ngnnode.getAttribute("nid"))
+                    tipe = xmldecode(ngnnode.getAttribute("nid-type"))
+                    nid = to_type(tipe, nid)
+                    netgraph.add_node(nid)
+
+                    if ngnnode.hasAttribute("source"):
+                        netgraph.set_source(nid)
+                    if ngnnode.hasAttribute("target"):
+                        netgraph.set_target(nid)
+
+                    annosnode_list = ngnnode.getElementsByTagName("node-annotations")
+                    
+                    if annosnode_list:
+                        annosnode = annosnode_list[0].getElementsByTagName("node-annotation") 
+                        for annonode in annosnode:
+                            name = xmldecode(annonode.getAttribute("name"))
+
+                            if name == "ips":
+                                ips = xmldecode(annonode.getAttribute("value"), eval) # list
+                                for ip in ips:
+                                    netgraph.annotate_node_ip(nid, ip)
+                            else:
+                                value = xmldecode(annonode.getAttribute("value"))
+                                tipe = xmldecode(annonode.getAttribute("type"))
+                                value = to_type(tipe, value)
+                                netgraph.annotate_node(nid, name, value)
+
+            ngesnode_list = topology.getElementsByTagName("edges") 
+            if ngesnode_list:
+                ngesnode = ngesnode_list[0].getElementsByTagName("edge") 
+                for ngenode in ngesnode:
+                    nid1 = xmldecode(ngenode.getAttribute("nid1"))
+                    tipe1 = xmldecode(ngenode.getAttribute("nid1-type"))
+                    nid1 = to_type(tipe1, nid1)
+
+                    nid2 = xmldecode(ngenode.getAttribute("nid2"))
+                    tipe2 = xmldecode(ngenode.getAttribute("nid2-type"))
+                    nid2 = to_type(tipe2, nid2)
+
+                    netgraph.add_edge(nid1, nid2)
+
+                    annosnode_list = ngenode.getElementsByTagName("edge-annotations")
+                    if annosnode_list:
+                        annosnode = annosnode_list[0].getElementsByTagName("edge-annotation") 
+                        for annonode in annosnode:
+                            name = xmldecode(annonode.getAttribute("name"))
+
+                            if name == "net":
+                                net = xmldecode(annonode.getAttribute("value"), eval) # dict
+                                netgraph.annotate_edge_net(nid1, nid2, net[nid1], net[nid2], 
+                                        net["mask"], net["network"], net["prefix"])
+                            else:
+                                value = xmldecode(annonode.getAttribute("value"))
+                                tipe = xmldecode(annonode.getAttribute("type"))
+                                value = to_type(tipe, value)
+                                netgraph.annotate_edge(nid1, nid2, name, value)
+        return netgraph
+
+    def _rm_from_xml(self, doc, rmnode, ec, connections):
+        start_time = None
+        stop_time = None
+        discover_time = None
+        provision_time = None
+        ready_time = None
+        release_time = None
+        failed_time = None
+
+        guid = xmldecode(rmnode.getAttribute("guid"), int)
+        rtype = xmldecode(rmnode.getAttribute("rtype"))
+
+        # FOR NOW ONLY STATE NEW IS ALLOWED
+        state = 0
+        """
+        state = xmldecode(rmnode.getAttribute("state"), int)
+
+        if rmnode.hasAttribute("start_time"):
+            start_time = xmldecode(rmnode.getAttribute("start_time"), 
+                    datetime.datetime)
+        if rmnode.hasAttribute("stop_time"):
+            stop_time = xmldecode(rmnode.getAttribute("stop_time"), 
+                    datetime.datetime)
+        if rmnode.hasAttribute("discover_time"):
+            dicover_time = xmldecode(rmnode.getAttribute("discover_time"), 
+                    datetime.datetime)
+        if rmnode.hasAttribute("provision_time"):
+            provision_time = xmldecode(rmnode.getAttribute("provision_time"),
+                    datetime.datetime)
+        if rmnode.hasAttribute("ready_time"):
+            ready_time = xmldecode(rmnode.getAttribute("ready_time"),
+                    datetime.datetime)
+        if rmnode.hasAttribute("release_time"):
+            release_time = xmldecode(rmnode.getAttribute("release_time"),
+                    datetime.datetime)
+        if rmnode.hasAttribute("failed_time"):
+            failed_time = xmldecode(rmnode.getAttribute("failed_time"),
+                    datetime.datetime)
+        """
+
+        ec.register_resource(rtype, guid = guid)
+        rm = ec.get_resource(guid)
+        rm.set_state_time(state, "_start_time", start_time)
+        rm.set_state_time(state, "_stop_time", stop_time)
+        rm.set_state_time(state, "_discover_time", discover_time)
+        rm.set_state_time(state, "_provision_time", provision_time)
+        rm.set_state_time(state, "_ready_time", ready_time)
+        rm.set_state_time(state, "_release_time", release_time)
+        rm.set_state_time(state, "_failed_time", failed_time)
+        
+        anode_list = rmnode.getElementsByTagName("attributes")
+        if anode_list:
+            aanode_list = anode_list[0].getElementsByTagName("attribute") 
+            for aanode in aanode_list:
+                name = xmldecode(aanode.getAttribute("name"))
+                value = xmldecode(aanode.getAttribute("value"))
+                tipe = xmldecode(aanode.getAttribute("type"))
+                value = to_type(tipe, value)
+                rm.set(name, value)
+
+        cnode_list = rmnode.getElementsByTagName("connections")
+        if cnode_list:
+            ccnode_list = cnode_list[0].getElementsByTagName("connection") 
+            for ccnode in ccnode_list:
+                guid2 = xmldecode(ccnode.getAttribute("guid"), int)
+                connections.add((guid, guid2))
+
+        tnode_list = rmnode.getElementsByTagName("traces")
+        if tnode_list:
+            ttnode_list = tnode_list[0].getElementsByTagName("trace") 
+            for ttnode in ttnode_list:
+                name = xmldecode(ttnode.getAttribute("name"))
+                ec.enable_trace(guid, name)
+
+        cnnode_list = rmnode.getElementsByTagName("conditions")
+        if cnnode_list:
+            ccnnode_list = cnnode_list[0].getElementsByTagName("condition") 
+            for ccnnode in ccnnode_list:
+                action = xmldecode(ccnnode.getAttribute("action"), int)
+                group = xmldecode(ccnnode.getAttribute("group"), eval) # list
+                state = xmldecode(ccnnode.getAttribute("state"), int)
+                time = xmldecode(ccnnode.getAttribute("time"))
+                time = to_type('STRING', time)
+                ec.register_condition(guid, action, group, state, time = time)
+                 
diff --git a/src/nepi/util/plot.py b/src/nepi/util/plot.py
deleted file mode 100644 (file)
index 97b146b..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-import networkx
-import tempfile
-
-class Plotter(object):
-    def __init__(self, box):
-        self._graph = networkx.Graph(graph = dict(overlap = "false"))
-
-        traversed = set()
-        self._traverse_boxes(traversed, box)
-
-    def _traverse_boxes(self, traversed, box):
-        traversed.add(box.guid)
-
-        self._graph.add_node(box.label, 
-                width = 50/72.0, # 1 inch = 72 points
-                height = 50/72.0, 
-                shape = "circle")
-
-        for b in box.connections:
-            self._graph.add_edge(box.label, b.label)
-            if b.guid not in traversed:
-                self._traverse_boxes(traversed, b)
-
-    def plot(self):
-        f = tempfile.NamedTemporaryFile(delete=False)
-        networkx.draw_graphviz(self._graph)
-        networkx.write_dot(self._graph, f.name)
-        f.close()
-        return f.name
-
diff --git a/src/nepi/util/plotter.py b/src/nepi/util/plotter.py
new file mode 100644 (file)
index 0000000..ec57ee9
--- /dev/null
@@ -0,0 +1,107 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import logging
+import os
+
+try:
+    import networkx
+except ImportError:
+    msg = "Networkx library is not installed, you will not be able to plot."
+    logger = logging.Logger("Plotter")
+    logger.debug(msg)
+
+try:
+    import matplotlib.pyplot as plt
+except ImportError:
+    msg = ("Matplotlib library is not installed, you will not be able "
+        "generate PNG plots.")
+    logger = logging.Logger("Plotter")
+    logger.debug(msg)
+
+class PFormats:
+    DOT = "dot"
+    FIGURE = "figure"
+
+class ECPlotter(object):
+    def plot(self, ec, dirpath = None, format= PFormats.FIGURE, 
+            show = False):
+        graph, labels = self._ec2graph(ec)
+
+        add_extension = False
+
+        if not dirpath:
+            import tempfile
+            dirpath = tempfile.mkdtemp()
+        
+        fpath = os.path.join(dirpath, "%s_%s" % (ec.exp_id, ec.run_id)) 
+
+        if format == PFormats.FIGURE:
+            pos = networkx.graphviz_layout(graph, prog="neato")
+            networkx.draw(graph, pos = pos, node_color="white", 
+                    node_size = 500, with_labels=True)
+           
+            label = "\n".join(map(lambda v: "%s: %s" % (v[0], v[1]), labels.iteritems()))
+            plt.annotate(label, xy=(0.05, 0.95), xycoords='axes fraction')
+           
+            fpath += ".png"
+
+            plt.savefig(fpath, bbox_inches="tight")
+            
+            if show:
+                plt.show()
+
+        elif format == PFormats.DOT:
+            fpath += ".dot"
+
+            networkx.write_dot(graph, fpath)
+            
+            if show:
+                import subprocess
+                subprocess.call(["dot", "-Tps", fpath, "-o", "%s.ps" % fpath])
+                subprocess.call(["evince","%s.ps" % fpath])
+        
+        return fpath
+
+    def _ec2graph(self, ec):
+        graph = networkx.Graph(graph = dict(overlap = "false"))
+
+        labels = dict()
+        connections = set()
+
+        for guid, rm in ec._resources.iteritems():
+            label = rm.get_rtype()
+
+            graph.add_node(guid,
+                label = "%d %s" % (guid, label),
+                width = 50/72.0, # 1 inch = 72 points
+                height = 50/72.0, 
+                shape = "circle")
+
+            labels[guid] = label
+
+            for guid2 in rm.connections:
+                # Avoid adding a same connection twice
+                if (guid2, guid) not in connections:
+                    connections.add((guid, guid2))
+
+        for (guid1, guid2) in connections:
+            graph.add_edge(guid1, guid2)
+
+        return graph, labels
diff --git a/src/nepi/util/serializer.py b/src/nepi/util/serializer.py
new file mode 100644 (file)
index 0000000..7347c5b
--- /dev/null
@@ -0,0 +1,61 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import datetime
+import os
+
+class SFormats:
+    XML = "xml"
+    
+class ECSerializer(object):
+    def load(self, filepath, format = SFormats.XML):
+        if format == SFormats.XML:
+            from nepi.util.parsers.xml_parser import ECXMLParser
+            
+            parser = ECXMLParser()
+            f = open(filepath, "r")
+            xml = f.read()
+            f.close()
+
+            ec = parser.from_xml(xml)
+
+        return ec
+
+    def serialize(self, ec, format = SFormats.XML):
+        if format == SFormats.XML:
+            from nepi.util.parsers.xml_parser import ECXMLParser
+            
+            parser = ECXMLParser()
+            sec = parser.to_xml(ec)
+
+        return sec
+
+    def save(self, ec, dirpath, format = SFormats.XML):
+        date = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+        filename = "%s_%s" % (ec.exp_id, date)
+
+        if format == SFormats.XML:
+            filepath = os.path.join(dirpath, "%s.xml" % filename)
+            sec = self.serialize(ec, format = format)
+            f = open(filepath, "w")
+            f.write(sec)
+            f.close()
+
+        return filepath
+
index fafb99d..9c3b5d3 100644 (file)
@@ -21,6 +21,7 @@ import threading
 import hashlib
 import re
 import os
+import time
 
 from nepi.util.logger import Logger
 
@@ -254,12 +255,18 @@ class SFAAPI(object):
         self._slice_resources_batch.append(resource_hrn)
         resources_hrn_new = list()
         if self._count == len(self._total):
+            check_all_inslice = self._check_all_inslice(self._slice_resources_batch, slicename)
+            if check_all_inslice == True:
+                return True
             for resource_hrn in self._slice_resources_batch:
                 resource_parts = resource_hrn.split('.')
                 resource_hrn = '.'.join(resource_parts[:2]) + '.' + '\\.'.join(resource_parts[2:])
                 resources_hrn_new.append(resource_hrn)
             with self.lock_slice:
-                self._sfi_exec_method('delete', slicename)
+                if check_all_inslice != 0:
+                    self._sfi_exec_method('delete', slicename)
+                    time.sleep(480)
+                
                 # Re implementing urn from hrn because the library sfa-common doesn't work for wilabt
                 resources_urn = self._get_urn(resources_hrn_new)
                 rspec = self.rspec_proc.build_sfa_rspec(slicename, resources_urn, properties, leases)
@@ -293,6 +300,24 @@ class SFAAPI(object):
         else:
             self._log.debug(" Waiting for more nodes to add the batch to the slice ")
 
+    def _check_all_inslice(self, resources_hrn, slicename):
+        slice_res = self.get_slice_resources(slicename)['resource']
+        if slice_res:
+            if len(slice_res[0]['services']) != 0:
+                slice_res_hrn = self.get_resources_hrn(slice_res).values()
+                if self._compare_lists(slice_res_hrn, resources_hrn):
+                    return True
+                else: return len(slice_res_hrn)
+        return 0
+
+    def _compare_lists(self, list1, list2):
+        if len(list1) != len(list2):
+            return False
+        for item in list1:
+            if item not in list2:
+                return False
+        return True
+
     def _get_urn(self, resources_hrn):
         """
         Get urn from hrn.
index b06abdb..451a7b4 100644 (file)
@@ -404,7 +404,8 @@ def rspawn(command, pidfile,
         agent = None, 
         identity = None, 
         server_key = None,
-        tty = False):
+        tty = False,
+        strict_host_checking = True):
     """
     Spawn a remote command such that it will continue working asynchronously in 
     background. 
@@ -479,7 +480,8 @@ def rspawn(command, pidfile,
         agent = agent,
         identity = identity,
         server_key = server_key,
-        tty = tty ,
+        tty = tty,
+        strict_host_checking = strict_host_checking ,
         )
     
     if proc.wait():
@@ -496,7 +498,8 @@ def rgetpid(pidfile,
         gw = None,
         agent = None, 
         identity = None,
-        server_key = None):
+        server_key = None,
+        strict_host_checking = True):
     """
     Returns the pid and ppid of a process from a remote file where the 
     information was stored.
@@ -524,7 +527,8 @@ def rgetpid(pidfile,
         gw = gw,
         agent = agent,
         identity = identity,
-        server_key = server_key
+        server_key = server_key,
+        strict_host_checking = strict_host_checking
         )
         
     if proc.wait():
@@ -546,7 +550,8 @@ def rstatus(pid, ppid,
         gw = None,
         agent = None, 
         identity = None,
-        server_key = None):
+        server_key = None,
+        strict_host_checking = True):
     """
     Returns a code representing the the status of a remote process
 
@@ -572,7 +577,8 @@ def rstatus(pid, ppid,
         gw = gw,
         agent = agent,
         identity = identity,
-        server_key = server_key
+        server_key = server_key,
+        strict_host_checking = strict_host_checking
         )
     
     if proc.wait():
@@ -599,7 +605,8 @@ def rkill(pid, ppid,
         sudo = False,
         identity = None, 
         server_key = None, 
-        nowait = False):
+        nowait = False,
+        strict_host_checking = True):
     """
     Sends a kill signal to a remote process.
 
@@ -653,7 +660,8 @@ fi
         gw = gw,
         agent = agent,
         identity = identity,
-        server_key = server_key
+        server_key = server_key,
+        strict_host_checking = strict_host_checking
         )
     
     # wait, don't leave zombies around
diff --git a/src/nepi/util/statfuncs.py b/src/nepi/util/statfuncs.py
new file mode 100644 (file)
index 0000000..5c0b2ca
--- /dev/null
@@ -0,0 +1,46 @@
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+import math
+import numpy
+from scipy import stats
+
+def compute_mean(sample):
+    # TODO: Discard outliers !!!!
+
+    if not sample:
+        print " CANNOT COMPUTE STATS for ", sample
+        return (0, 0, 0, 0)
+
+    x = numpy.array(sample)
+
+    # sample mean and standard deviation
+    n, min_max, mean, var, skew, kurt = stats.describe(x)
+    std = math.sqrt(var)
+
+    # for the population mean and std ...
+    # mean = x.mean()
+    # std = x.std()
+    
+    # Calculate confidence interval t-distribution
+    ## BUG: Use quantil of NORMAL distribution, not t-student quantil distribution
+    ci = stats.t.interval(0.95, n-1, loc = mean, scale = std/math.sqrt(n))
+
+    return (mean, std, ci[0], ci[1])
+
index 3d7c366..f7fbc85 100644 (file)
@@ -113,3 +113,11 @@ def stabsformat(sdate, dbase = None):
 
     return None
 
+def compute_delay_ms(timestamp2, timestamp1):
+    d1 = datetime.datetime.fromtimestamp(float(timestamp1))
+    d2 = datetime.datetime.fromtimestamp(float(timestamp2))
+    delay = d2 - d1
+
+    # round up resolution - round up to miliseconds
+    return delay.total_seconds() * 1000
+
diff --git a/test/design/box.py b/test/design/box.py
deleted file mode 100755 (executable)
index d1b9c03..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-
-from nepi.design.box import Box 
-
-import unittest
-
-class BoxDesignTestCase(unittest.TestCase):
-    def test_simple_design(self):
-        node1 = Box()
-        node2 = Box()
-
-        node1.label = "uno"
-        node2.label = "dos"
-
-        node1.tadd('nodo')
-        node2.tadd('mynodo')
-
-        self.assertEquals(node1.tags, set(['nodo']))
-        self.assertEquals(node2.tags, set(['mynodo']))
-       
-        node1.a.hola = "chau"
-        node2.a.hello = "bye"
-
-        self.assertEquals(node1.a.hola, "chau")
-        self.assertEquals(node2.a.hello, "bye")
-
-        node1.connect(node2)
-        
-        self.assertEquals(node1.connections, set([node2]))
-        self.assertEquals(node2.connections, set([node1]))
-        self.assertTrue(node1.is_connected(node2))
-        self.assertTrue(node2.is_connected(node1))
-
-        self.assertEquals(node1.c.dos.a.hello, "bye")
-        self.assertEquals(node2.c.uno.a.hola, "chau")
-       
-        node2.disconnect(node1)
-
-        self.assertEquals(node1.connections, set([]))
-        self.assertEquals(node2.connections, set([]))
-        self.assertFalse(node1.is_connected(node2))
-        self.assertFalse(node2.is_connected(node1))
-
-        self.assertRaises(AttributeError, node1.c.dos)
-        self.assertRaises(AttributeError, node2.c.uno)
-
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/execution/runner.py b/test/execution/runner.py
new file mode 100755 (executable)
index 0000000..b06d383
--- /dev/null
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceManager, ResourceState, \
+        clsinit_copy, ResourceAction, ResourceFactory
+from nepi.execution.runner import ExperimentRunner
+
+import functools
+import os
+import shutil
+import tempfile
+import time
+import unittest
+
+reschedule_delay = "0.5s"
+deploy_time = 0
+run_time = 0
+
+class Link(ResourceManager):
+    _rtype = "dummy::Link"
+    def do_deploy(self):
+        time.sleep(deploy_time)
+        super(Link, self).do_deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Interface(ResourceManager):
+    _rtype = "dummy::Interface"
+
+    def do_deploy(self):
+        node = self.get_connected(Node.get_rtype())[0]
+        link = self.get_connected(Link.get_rtype())[0]
+
+        if node.state < ResourceState.READY or \
+                link.state < ResourceState.READY:
+            self.ec.schedule(reschedule_delay, self.deploy)
+            self.logger.debug(" -------- RESCHEDULING ------- ")
+        else:
+            time.sleep(deploy_time)
+            super(Interface, self).do_deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+    _rtype = "dummy::Node"
+
+    def do_deploy(self):
+        self.logger.debug(" -------- DO_DEPLOY ------- ")
+        time.sleep(deploy_time)
+        super(Node, self).do_deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+    _rtype = "dummy::Application"
+
+    def do_deploy(self):
+        node = self.get_connected(Node.get_rtype())[0]
+
+        if node.state < ResourceState.READY: 
+            self.ec.schedule(reschedule_delay, self.deploy)
+            self.logger.debug(" -------- RESCHEDULING ------- ")
+        else:
+            time.sleep(deploy_time)
+            super(Application, self).do_deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+    def do_start(self):
+        super(Application, self).do_start()
+        time.sleep(run_time)
+        self.ec.schedule("0s", self.stop)
+
+ResourceFactory.register_type(Application)
+ResourceFactory.register_type(Node)
+ResourceFactory.register_type(Interface)
+ResourceFactory.register_type(Link)
+
+class RunnerTestCase(unittest.TestCase):
+    def test_runner_max_runs(self):
+        node_count = 4
+        app_count = 2
+
+        ec = ExperimentController(exp_id = "max-runs-test")
+       
+        # Add simulated nodes and applications
+        nodes = list()
+        apps = list()
+        ifaces = list()
+
+        for i in xrange(node_count):
+            node = ec.register_resource("dummy::Node")
+            nodes.append(node)
+            
+            iface = ec.register_resource("dummy::Interface")
+            ec.register_connection(node, iface)
+            ifaces.append(iface)
+
+            for i in xrange(app_count):
+                app = ec.register_resource("dummy::Application")
+                ec.register_connection(node, app)
+                apps.append(app)
+
+        link = ec.register_resource("dummy::Link")
+
+        for iface in ifaces:
+            ec.register_connection(link, iface)
+
+        rnr = ExperimentRunner()
+        runs = rnr.run(ec, min_runs = 5, max_runs = 10, wait_guids = apps, 
+                wait_time = 0)
+
+        self.assertEquals(runs, 10)
+
+    def test_runner_convergence(self):
+        node_count = 4
+        app_count = 2
+
+        ec = ExperimentController(exp_id = "convergence-test")
+       
+        # Add simulated nodes and applications
+        nodes = list()
+        apps = list()
+        ifaces = list()
+
+        for i in xrange(node_count):
+            node = ec.register_resource("dummy::Node")
+            nodes.append(node)
+            
+            iface = ec.register_resource("dummy::Interface")
+            ec.register_connection(node, iface)
+            ifaces.append(iface)
+
+            for i in xrange(app_count):
+                app = ec.register_resource("dummy::Application")
+                ec.register_connection(node, app)
+                apps.append(app)
+
+        link = ec.register_resource("dummy::Link")
+
+        for iface in ifaces:
+            ec.register_connection(link, iface)
+
+        samples = [10, 10, 10, 10, 12, 10, 12, 10, 10, 11]
+        
+        def compute_metric_callback(samples, ec, run):
+            return samples[run-1]
+
+        metric_callback = functools.partial(compute_metric_callback, samples)
+
+        rnr = ExperimentRunner()
+        runs = rnr.run(ec, min_runs = 5, 
+                compute_metric_callback = metric_callback,
+                wait_guids = apps, 
+                wait_time = 0)
+
+        self.assertEquals(runs, 10)
+                       
+if __name__ == '__main__':
+    unittest.main()
+
index a1f1397..0a76dcc 100755 (executable)
@@ -250,6 +250,7 @@ main (void)
 
         ec.register_connection(app, node)
 
+
         ec.deploy()
 
         ec.wait_finished([app])
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
diff --git a/test/resources/linux/gretunnel.py b/test/resources/linux/gretunnel.py
new file mode 100755 (executable)
index 0000000..6317ae0
--- /dev/null
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+from test_utils import skipIfAnyNotAliveWithIdentity
+
+import os
+import time
+import unittest
+
+## TODO: VALIDATE THIS TEST!
+
+class LinuxGRETunnelTestCase(unittest.TestCase):
+    def setUp(self):
+        self.host1 = "roseval.pl.sophia.inria.fr"
+        self.host2 = "138.96.118.11"
+        self.user1 = "inria_nepi"
+        self.user2 = "omflab"
+        self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+        self.netblock = "192.168.1"
+
+    @skipIfAnyNotAliveWithIdentity
+    def t_tap_gre_tunnel(self, user1, host1, identity1, user2, host2, 
+            identity2):
+
+        ec = ExperimentController(exp_id = "test-tap-gre-tunnel")
+        
+        node1 = ec.register_resource("LinuxNode")
+        ec.set(node1, "hostname", host1)
+        ec.set(node1, "username", user1)
+        ec.set(node1, "identity", identity1)
+        ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanProcesses", True)
+
+        tap1 = ec.register_resource("LinuxTap")
+        ec.set(tap1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tap1, "endpoint_prefix", 32)
+        ec.register_connection(tap1, node1)
+
+        node2 = ec.register_resource("LinuxNode")
+        ec.set(node2, "hostname", host2)
+        ec.set(node2, "username", user2)
+        ec.set(node2, "identity", identity2)
+        ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanProcesses", True)
+
+        tap2 = ec.register_resource("LinuxTap")
+        ec.set(tap2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tap2, "endpoint_prefix", 32)
+        ec.register_connection(tap2, node2)
+
+        gretun = ec.register_resource("LinuxGRETunnel")
+        ec.register_connection(tap1, gretun)
+        ec.register_connection(tap2, gretun)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c3 %s.2" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node1)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        if_name = ec.get(tap1, "deviceName")
+        self.assertTrue(if_name.startswith("tap"))
+        
+        if_name = ec.get(tap2, "deviceName")
+        self.assertTrue(if_name.startswith("tap"))
+
+        ec.shutdown()
+
+    @skipIfAnyNotAliveWithIdentity
+    def t_tun_gre_tunnel(self, user1, host1, identity1, user2, host2, 
+            identity2):
+
+        ec = ExperimentController(exp_id = "test-tun-gre-tunnel")
+        
+        node1 = ec.register_resource("LinuxNode")
+        ec.set(node1, "hostname", host1)
+        ec.set(node1, "username", user1)
+        ec.set(node1, "identity", identity1)
+        ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanProcesses", True)
+
+        tun1 = ec.register_resource("LinuxTun")
+        ec.set(tun1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun1, "endpoint_prefix", 32)
+        ec.register_connection(tun1, node1)
+
+        node2 = ec.register_resource("LinuxNode")
+        ec.set(node2, "hostname", host2)
+        ec.set(node2, "username", user2)
+        ec.set(node2, "identity", identity2)
+        ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanProcesses", True)
+
+        tun2 = ec.register_resource("LinuxTun")
+        ec.set(tun2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tun2, "endpoint_prefix", 32)
+        ec.register_connection(tun2, node2)
+
+        udptun = ec.register_resource("LinuxGRETunnel")
+        ec.register_connection(tun1, udptun)
+        ec.register_connection(tun2, udptun)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c3 %s.2" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node1)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        if_name = ec.get(tun1, "deviceName")
+        self.assertTrue(if_name.startswith("tun"))
+        
+        if_name = ec.get(tun2, "deviceName")
+        self.assertTrue(if_name.startswith("tun"))
+
+        ec.shutdown()
+
+    def test_tap_gre_tunnel(self):
+        self.t_tap_gre_tunnel(self.user1, self.host1, self.identity,
+                self.user2, self.host2, self.identity)
+
+    def test_tun_gre_tunnel(self):
+        self.t_tun_gre_tunnel(self.user1, self.host1, self.identity,
+                self.user2, self.host2, self.identity)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/multirun.py b/test/resources/linux/multirun.py
new file mode 100755 (executable)
index 0000000..dbee7df
--- /dev/null
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.runner import ExperimentRunner 
+
+from test_utils import skipIfNotAlive, skipInteractive
+
+import functools
+import glob
+import os
+import re
+import shutil
+import time
+import tempfile
+import unittest
+
+_ping_re = re.compile("[^/]+rtt min/avg/max/mdev = (?P<min>\d\.\d+)/(?P<avg>\d\.\d+)/(?P<max>\d\.\d+)/(?P<mdev>\d\.\d+)[^/]+", re.MULTILINE)
+
+class LinuxMultiRunTestCase(unittest.TestCase):
+    def setUp(self):
+        self.fedora_host = "nepi2.pl.sophia.inria.fr"
+        self.fedora_user = "inria_nepi"
+
+        self.ubuntu_host = "roseval.pl.sophia.inria.fr"
+        self.ubuntu_user = "inria_nepi"
+        
+        self.target = "nepi5.pl.sophia.inria.fr"
+
+    @skipIfNotAlive
+    def t_simple_multirun(self, host, user, depends):
+
+        dirpath = tempfile.mkdtemp()
+
+        ec = ExperimentController(exp_id = "test-condition-multirun", 
+                local_dir = dirpath)
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        ping = ec.register_resource("LinuxApplication")
+        ec.set(ping, "command", "ping -c10 nepi.inria.fr")
+        ec.register_connection(ping, node)
+
+        collector = ec.register_resource("Collector")
+        ec.set(collector, "traceName", "stdout")
+        ec.register_connection(ping, collector)
+
+        def compute_metric_callback(ping, ec, run):
+            stdout = ec.trace(ping, "stdout")
+
+            m = _ping_re.match(stdout)
+            if not m:
+                return None
+            
+            return float(m.groupdict()["min"])
+
+        metric_callback = functools.partial(compute_metric_callback, ping)
+
+        rnr = ExperimentRunner()
+        runs = rnr.run(ec, min_runs = 5, 
+                compute_metric_callback = metric_callback,
+                wait_guids = [ping],
+                wait_time = 0)
+
+        self.assertTrue(runs >= 5)
+
+        dircount = 0
+
+        for d in os.listdir(ec.exp_dir):
+            path = os.path.join(ec.exp_dir, d)
+            if os.path.isdir(path):
+                dircount += 1
+                logs = glob.glob(os.path.join(path, "*.stdout"))
+                self.assertEquals(len(logs), 1)
+        
+        self.assertEquals(runs, dircount)
+
+        shutil.rmtree(dirpath)
+
+    def test_simple_multirun_fedora(self):
+        self.t_simple_multirun(self.fedora_host, self.fedora_user, "nc")
+
+    def test_simple_multirun_ubuntu(self):
+        self.t_simple_multirun(self.ubuntu_host, self.ubuntu_user, "netcat")
+
+if __name__ == '__main__':
+    unittest.main()
+
old mode 100644 (file)
new mode 100755 (executable)
index f215fa3..78d517e
@@ -81,9 +81,9 @@ class LinuxNS3CCNDceApplicationTest(unittest.TestCase):
         #ec.set(node, "cleanHome", True)
 
         simu = ec.register_resource("LinuxNS3Simulation")
-        ec.set(simu, "verbose", True)
-        ec.set(simu, "buildMode", "debug")
-        ec.set(simu, "nsLog", "DceApplication")
+        #ec.set(simu, "verbose", True)
+        #ec.set(simu, "buildMode", "debug")
+        #ec.set(simu, "nsLog", "DceApplication")
         ec.register_connection(simu, node)
 
         nsnode1 = add_ns3_node(ec, simu)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
index 8528f69..3f9443a
@@ -136,6 +136,10 @@ class LinuxNS3DceApplicationTest(unittest.TestCase):
         self.fedora_user = "inria_nepi"
         self.fedora_identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
 
+        self.ubuntu_host = "roseval.pl.sophia.inria.fr"
+        self.ubuntu_user = "inria_nepi"
+        self.ubuntu_identity = "%s/.ssh/id_rsa" % (os.environ['HOME'])
     @skipIfNotAlive
     def t_dce_ping(self, host, user = None, identity = None):
         ec = ExperimentController(exp_id = "test-dce-ping")
@@ -248,10 +252,17 @@ class LinuxNS3DceApplicationTest(unittest.TestCase):
         ### create applications
         ccnd1 = ec.register_resource("ns3::LinuxCCNDceApplication")
 
-        # NOTE THAT INSTALLATION MIGHT FAIL IF openjdk-6-jdk is not installed
-        ec.set(ccnd1, "depends", "libpcap0.8-dev openjdk-6-jdk ant1.8 autoconf "
-            "libssl-dev libexpat-dev libpcap-dev libecryptfs0 libxml2-utils auto"
-            "make gawk gcc g++ git-core pkg-config libpcre3-dev openjdk-6-jre-lib")
+        if host == self.fedora_host:
+            depends = ( " autoconf openssl-devel  expat-devel libpcap-devel "
+                " ecryptfs-utils-devel libxml2-devel automake gawk " 
+                " gcc gcc-c++ git pcre-devel make ")
+        else: # UBUNTU
+            # NOTE THAT INSTALLATION MIGHT FAIL IF openjdk-6-jdk is not installed
+            depends = ( "libpcap0.8-dev openjdk-6-jdk ant1.8 autoconf "
+                    "libssl-dev libexpat-dev libpcap-dev libecryptfs0 libxml2-utils auto"
+                    "make gawk gcc g++ git-core pkg-config libpcre3-dev openjdk-6-jre-lib")
+
+        ec.set (ccnd1, "depends", depends)
         ec.set (ccnd1, "sources", "http://www.ccnx.org/releases/ccnx-0.7.2.tar.gz")
         ec.set (ccnd1, "build", "tar zxf ${SRC}/ccnx-0.7.2.tar.gz && "
                 "cd ccnx-0.7.2 && "
@@ -341,7 +352,10 @@ class LinuxNS3DceApplicationTest(unittest.TestCase):
         ec.shutdown()
 
     def test_dce_ping_fedora(self):
-        self.t_dce_ping(self.fedora_host, self.fedora_user, self.fedora_identity)
+        self.t_dce_ping(self.fedora_host, self.fedora_user, self.fedora_identity) 
+
+    def test_dce_ping_ubuntu(self):
+        self.t_dce_ping(self.ubuntu_host, self.ubuntu_user, self.ubuntu_identity)
 
     def test_dce_ping_local(self):
         self.t_dce_ping("localhost")
@@ -349,6 +363,9 @@ class LinuxNS3DceApplicationTest(unittest.TestCase):
     def test_dce_ccn_fedora(self):
         self.t_dce_ccn(self.fedora_host, self.fedora_user, self.fedora_identity)
 
+    def test_dce_ccn_ubuntu(self):
+        self.t_dce_ccn(self.ubuntu_host, self.ubuntu_user, self.ubuntu_identity)
+
     def test_dce_ccn_local(self):
         self.t_dce_ccn("localhost")
 
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
diff --git a/test/resources/linux/ns3/serialization.py b/test/resources/linux/ns3/serialization.py
new file mode 100755 (executable)
index 0000000..fbf665f
--- /dev/null
@@ -0,0 +1,483 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.trace import TraceAttr
+
+from test_utils import skipIfNotAlive
+
+import os
+import shutil
+import time
+import tempfile
+import unittest
+
+def add_ns3_node(ec, simu):
+    node = ec.register_resource("ns3::Node")
+    ec.register_connection(node, simu)
+
+    ipv4 = ec.register_resource("ns3::Ipv4L3Protocol")
+    ec.register_connection(node, ipv4)
+
+    arp = ec.register_resource("ns3::ArpL3Protocol")
+    ec.register_connection(node, arp)
+    
+    icmp = ec.register_resource("ns3::Icmpv4L4Protocol")
+    ec.register_connection(node, icmp)
+
+    udp = ec.register_resource("ns3::UdpL4Protocol")
+    ec.register_connection(node, udp)
+
+    return node
+
+def add_point2point_device(ec, node, ip, prefix):
+    dev = ec.register_resource("ns3::PointToPointNetDevice")
+    ec.set(dev, "ip", ip)
+    ec.set(dev, "prefix", prefix)
+    ec.register_connection(node, dev)
+
+    queue = ec.register_resource("ns3::DropTailQueue")
+    ec.register_connection(dev, queue)
+
+    return dev
+
+def add_csma_device(ec, node, ip, prefix):
+    dev = ec.register_resource("ns3::CsmaNetDevice")
+    ec.set(dev, "ip", ip)
+    ec.set(dev, "prefix", prefix)
+    ec.register_connection(node, dev)
+
+    queue = ec.register_resource("ns3::DropTailQueue")
+    ec.register_connection(dev, queue)
+
+    return dev
+
+def add_wifi_device(ec, node, ip, prefix, 
+        access_point = False):
+    dev = ec.register_resource("ns3::WifiNetDevice")
+    ec.set(dev, "ip", ip)
+    ec.set(dev, "prefix", prefix)
+    ec.register_connection(node, dev)
+
+    phy = ec.register_resource("ns3::YansWifiPhy")
+    ec.set(phy, "Standard", "WIFI_PHY_STANDARD_80211a")
+    ec.register_connection(dev, phy)
+
+    error = ec.register_resource("ns3::NistErrorRateModel")
+    ec.register_connection(phy, error)
+
+    manager = ec.register_resource("ns3::ArfWifiManager")
+    ec.register_connection(dev, manager)
+
+    if access_point:
+        mac = ec.register_resource("ns3::ApWifiMac")
+    else:
+        mac = ec.register_resource("ns3::StaWifiMac")
+
+    ec.set(mac, "Standard", "WIFI_PHY_STANDARD_80211a")
+    ec.register_connection(dev, mac)
+
+    return dev, phy
+
+def add_random_mobility(ec, node, x, y, z, speed, bounds_width, 
+        bounds_height):
+    position = "%d:%d:%d" % (x, y, z)
+    bounds = "0|%d|0|%d" % (bounds_width, bounds_height) 
+    speed = "ns3::UniformRandomVariable[Min=%d|Max=%s]" % (speed, speed)
+    pause = "ns3::ConstantRandomVariable[Constant=1.0]"
+    
+    mobility = ec.register_resource("ns3::RandomDirection2dMobilityModel")
+    ec.set(mobility, "Position", position)
+    ec.set(mobility, "Bounds", bounds)
+    ec.set(mobility, "Speed", speed)
+    ec.set(mobility, "Pause",  pause)
+    ec.register_connection(node, mobility)
+    return mobility
+
+def add_constant_mobility(ec, node, x, y, z):
+    mobility = ec.register_resource("ns3::ConstantPositionMobilityModel") 
+    position = "%d:%d:%d" % (x, y, z)
+    ec.set(mobility, "Position", position)
+    ec.register_connection(node, mobility)
+    return mobility
+
+def add_wifi_channel(ec):
+    channel = ec.register_resource("ns3::YansWifiChannel")
+    delay = ec.register_resource("ns3::ConstantSpeedPropagationDelayModel")
+    ec.register_connection(channel, delay)
+
+    loss  = ec.register_resource("ns3::LogDistancePropagationLossModel")
+    ec.register_connection(channel, loss)
+
+    return channel
+
+class LinuxNS3SimulationSerializationTest(unittest.TestCase):
+    def setUp(self):
+        #self.fedora_host = "nepi2.pl.sophia.inria.fr"
+        self.fedora_host = "planetlab1.informatik.uni-erlangen.de"
+        self.fedora_user = "inria_nepi"
+        self.fedora_identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+
+    @skipIfNotAlive
+    def t_wifi_serialize(self, host, user = None, identity = None):
+        bounds_width = bounds_height = 200
+        x = y = 100
+        speed = 1
+
+        dirpath = tempfile.mkdtemp()
+        
+        ec = ExperimentController(exp_id = "test-ns3-wifi-ping")
+        
+        node = ec.register_resource("LinuxNode")
+        if host == "localhost":
+            ec.set(node, "hostname", "localhost")
+        else:
+            ec.set(node, "hostname", host)
+            ec.set(node, "username", user)
+            ec.set(node, "identity", identity)
+
+        ec.set(node, "cleanProcesses", True)
+        #ec.set(node, "cleanHome", True)
+
+        simu = ec.register_resource("LinuxNS3Simulation")
+        ec.set(simu, "verbose", True)
+        ec.register_connection(simu, node)
+
+        nsnode1 = add_ns3_node(ec, simu)
+        dev1, phy1 = add_wifi_device(ec, nsnode1, "10.0.0.1", "24", access_point = True)
+        mobility1 = add_constant_mobility(ec, nsnode1, x, y, 0)
+
+        nsnode2 = add_ns3_node(ec, simu)
+        dev2, phy2 = add_wifi_device(ec, nsnode2, "10.0.0.2", "24", access_point = False)
+        mobility1 = add_constant_mobility(ec, nsnode2, x, y, 0)
+        #mobility2 = add_random_mobility(ec, nsnode2, x, y, 0, speed, bounds_width, bounds_height)
+
+        # Create channel
+        chan = add_wifi_channel(ec)
+        ec.register_connection(chan, phy1)
+        ec.register_connection(chan, phy2)
+
+        ### create pinger
+        ping = ec.register_resource("ns3::V4Ping")
+        ec.set (ping, "Remote", "10.0.0.1")
+        ec.set (ping, "Interval", "1s")
+        ec.set (ping, "Verbose", True)
+        ec.set (ping, "StartTime", "1s")
+        ec.set (ping, "StopTime", "21s")
+        ec.register_connection(ping, nsnode2)
+
+        filepath = ec.save(dirpath)
+        print filepath
+        
+        ec.deploy()
+
+        ec.wait_finished([ping])
+        
+        stdout = ec.trace(simu, "stdout")
+
+        expected = "20 packets transmitted, 20 received, 0% packet loss"
+        self.assertTrue(stdout.find(expected) > -1)
+
+        ec.shutdown()
+
+        # Load serialized experiment
+        ec2 = ExperimentController.load(filepath)
+        
+        ec2.deploy()
+
+        ec2.wait_finished([ping])
+        
+        self.assertEquals(len(ec.resources), len(ec2.resources))
+        
+        stdout = ec2.trace(simu, "stdout")
+        expected = "20 packets transmitted, 20 received, 0% packet loss"
+        self.assertTrue(stdout.find(expected) > -1)
+        
+        ec2.shutdown()
+
+        shutil.rmtree(dirpath)
+
+    @skipIfNotAlive
+    def t_routing_serialize(self, host, user = None, identity = None):
+        """ 
+        network topology:
+                                n4
+                                |
+           n1 -- p2p -- n2 -- csma -- n5 -- p2p -- n6
+           |                    | 
+           ping n6              n3
+           
+
+        """
+        dirpath = tempfile.mkdtemp()
+        
+        ec = ExperimentController(exp_id = "test-ns3-routes")
+        
+        node = ec.register_resource("LinuxNode")
+        if host == "localhost":
+            ec.set(node, "hostname", host)
+        else:
+            ec.set(node, "hostname", host)
+            ec.set(node, "username", user)
+            ec.set(node, "identity", identity)
+
+        ec.set(node, "cleanProcesses", True)
+        #ec.set(node, "cleanHome", True)
+
+        simu = ec.register_resource("LinuxNS3Simulation")
+        ec.set(simu, "verbose", True)
+        ec.register_connection(simu, node)
+
+        nsnode1 = add_ns3_node(ec, simu)
+        p2p12 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
+
+        nsnode2 = add_ns3_node(ec, simu)
+        p2p21 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
+        csma2 = add_csma_device(ec, nsnode2, "10.0.1.1", "24")
+
+        nsnode3 = add_ns3_node(ec, simu)
+        csma3 = add_csma_device(ec, nsnode3, "10.0.1.2", "24")
+
+        nsnode4 = add_ns3_node(ec, simu)
+        csma4 = add_csma_device(ec, nsnode4, "10.0.1.3", "24")
+
+        nsnode5 = add_ns3_node(ec, simu)
+        p2p56 = add_point2point_device(ec, nsnode5, "10.0.2.1", "30")
+        csma5 = add_csma_device(ec, nsnode5, "10.0.1.4", "24")
+
+        nsnode6 = add_ns3_node(ec, simu)
+        p2p65 = add_point2point_device(ec, nsnode6, "10.0.2.2", "30")
+
+        # P2P chan1
+        p2p_chan1 = ec.register_resource("ns3::PointToPointChannel")
+        ec.set(p2p_chan1, "Delay", "0s")
+        ec.register_connection(p2p_chan1, p2p12)
+        ec.register_connection(p2p_chan1, p2p21)
+
+        # CSMA chan
+        csma_chan = ec.register_resource("ns3::CsmaChannel")
+        ec.set(csma_chan, "Delay", "0s")
+        ec.register_connection(csma_chan, csma2)
+        ec.register_connection(csma_chan, csma3)
+        ec.register_connection(csma_chan, csma4)
+        ec.register_connection(csma_chan, csma5)
+
+        # P2P chan2
+        p2p_chan2 = ec.register_resource("ns3::PointToPointChannel")
+        ec.set(p2p_chan2, "Delay", "0s")
+        ec.register_connection(p2p_chan2, p2p56)
+        ec.register_connection(p2p_chan2, p2p65)
+
+        # Add routes - n1 - n6
+        r1 = ec.register_resource("ns3::Route")
+        ec.set(r1, "network", "10.0.2.0")
+        ec.set(r1, "prefix", "30")
+        ec.set(r1, "nexthop", "10.0.0.2")
+        ec.register_connection(r1, nsnode1)
+
+        # Add routes - n2 - n6
+        r2 = ec.register_resource("ns3::Route")
+        ec.set(r2, "network", "10.0.2.0")
+        ec.set(r2, "prefix", "30")
+        ec.set(r2, "nexthop", "10.0.1.4")
+        ec.register_connection(r2, nsnode2)
+
+        # Add routes - n5 - n1
+        r5 = ec.register_resource("ns3::Route")
+        ec.set(r5, "network", "10.0.0.0")
+        ec.set(r5, "prefix", "30")
+        ec.set(r5, "nexthop", "10.0.1.1")
+        ec.register_connection(r5, nsnode5)
+
+        # Add routes - n6 - n1
+        r6 = ec.register_resource("ns3::Route")
+        ec.set(r6, "network", "10.0.0.0")
+        ec.set(r6, "prefix", "30")
+        ec.set(r6, "nexthop", "10.0.2.1")
+        ec.register_connection(r6, nsnode6)
+
+        ### create pinger
+        ping = ec.register_resource("ns3::V4Ping")
+        ec.set (ping, "Remote", "10.0.2.2")
+        ec.set (ping, "Interval", "1s")
+        ec.set (ping, "Verbose", True)
+        ec.set (ping, "StartTime", "1s")
+        ec.set (ping, "StopTime", "21s")
+        ec.register_connection(ping, nsnode1)
+
+        filepath = ec.save(dirpath)
+        print filepath
+        
+        ec.deploy()
+
+        ec.wait_finished([ping])
+        
+        stdout = ec.trace(simu, "stdout")
+
+        expected = "20 packets transmitted, 20 received, 0% packet loss"
+        self.assertTrue(stdout.find(expected) > -1)
+
+        ec.shutdown()
+
+        # Load serialized experiment
+        ec2 = ExperimentController.load(filepath)
+        
+        ec2.deploy()
+
+        ec2.wait_finished([ping])
+        
+        self.assertEquals(len(ec.resources), len(ec2.resources))
+        
+        stdout = ec2.trace(simu, "stdout")
+        expected = "20 packets transmitted, 20 received, 0% packet loss"
+        self.assertTrue(stdout.find(expected) > -1)
+        
+        ec2.shutdown()
+
+        shutil.rmtree(dirpath)
+
+    @skipIfNotAlive
+    def t_dce_serialize(self, host, user = None, identity = None):
+        dirpath = tempfile.mkdtemp()
+        
+        ec = ExperimentController(exp_id = "test-ns3-dce")
+        
+        node = ec.register_resource("LinuxNode")
+        if host == "localhost":
+            ec.set(node, "hostname", host)
+        else:
+            ec.set(node, "hostname", host)
+            ec.set(node, "username", user)
+            ec.set(node, "identity", identity)
+
+        ec.set(node, "cleanProcesses", True)
+        #ec.set(node, "cleanHome", True)
+
+        simu = ec.register_resource("LinuxNS3Simulation")
+        ec.set(simu, "verbose", True)
+        ec.register_connection(simu, node)
+
+        nsnode1 = add_ns3_node(ec, simu)
+        p2p1 = add_point2point_device(ec, nsnode1, "10.0.0.1", "30")
+        ec.set(p2p1, "DataRate", "5Mbps")
+
+        nsnode2 = add_ns3_node(ec, simu)
+        p2p2 = add_point2point_device(ec, nsnode2, "10.0.0.2", "30")
+        ec.set(p2p2, "DataRate", "5Mbps")
+
+        # Create channel
+        chan = ec.register_resource("ns3::PointToPointChannel")
+        ec.set(chan, "Delay", "2ms")
+
+        ec.register_connection(chan, p2p1)
+        ec.register_connection(chan, p2p2)
+
+        ### create applications
+        udp_perf = ec.register_resource("ns3::DceApplication")
+        ec.set (udp_perf, "binary", "udp-perf")
+        ec.set (udp_perf, "stackSize", 1<<20)
+        ec.set (udp_perf, "arguments", "--duration=10;--nodes=2")
+        ec.set (udp_perf, "StartTime", "1s")
+        ec.set (udp_perf, "StopTime", "20s")
+        ec.register_connection(udp_perf, nsnode1)
+
+        udp_perf_client = ec.register_resource("ns3::DceApplication")
+        ec.set (udp_perf_client, "binary", "udp-perf")
+        ec.set (udp_perf_client, "stackSize", 1<<20)
+        ec.set (udp_perf_client, "arguments", "--client;--nodes=2;--host=10.0.0.1;--duration=10")
+        ec.set (udp_perf_client, "StartTime", "2s")
+        ec.set (udp_perf_client, "StopTime", "20s")
+        ec.register_connection(udp_perf_client, nsnode2)
+
+        filepath = ec.save(dirpath)
+        
+        ec.deploy()
+
+        ec.wait_finished([udp_perf_client])
+
+        # Give time to flush the streams
+        import time
+        time.sleep(5) 
+
+        expected = "udp-perf --duration=10 --nodes=2"
+        cmdline = ec.trace(udp_perf, "cmdline")
+        self.assertTrue(cmdline.find(expected) > -1, cmdline)
+
+        expected = "Start Time: NS3 Time:          1s ("
+        status = ec.trace(udp_perf, "status")
+        self.assertTrue(status.find(expected) > -1, status)
+
+        expected = "received=1500 bytes, 1 reads (@1500 bytes) 1500"
+        stdout = ec.trace(udp_perf, "stdout")
+        self.assertTrue(stdout.find(expected) > -1, stdout)
+
+        ec.shutdown()
+
+        # Load serialized experiment
+        ec2 = ExperimentController.load(filepath)
+        
+        ec2.deploy()
+        ec2.wait_finished([udp_perf_client])
+
+        # Give time to flush the streams
+        time.sleep(5) 
+       
+        self.assertEquals(len(ec.resources), len(ec2.resources))
+        expected = "udp-perf --duration=10 --nodes=2"
+        cmdline = ec2.trace(udp_perf, "cmdline")
+        self.assertTrue(cmdline.find(expected) > -1, cmdline)
+
+        expected = "Start Time: NS3 Time:          1s ("
+        status = ec2.trace(udp_perf, "status")
+        self.assertTrue(status.find(expected) > -1, status)
+
+        expected = "received=1500 bytes, 1 reads (@1500 bytes) 1500"
+        stdout = ec2.trace(udp_perf, "stdout")
+        self.assertTrue(stdout.find(expected) > -1, stdout)
+
+        ec2.shutdown()
+
+        shutil.rmtree(dirpath)
+    
+    def test_wifi_serialize_fedora(self):
+        self.t_wifi_serialize(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+    def test_wifi_serialize_local(self):
+        self.t_wifi_serialize("localhost")
+
+    def test_routing_serialize_fedora(self):
+        self.t_routing_serialize(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+    def test_routing_serialize_local(self):
+        self.t_routing_serialize("localhost")
+
+    def test_dce_serialize_fedora(self):
+        self.t_dce_serialize(self.fedora_host, self.fedora_user, self.fedora_identity)
+
+    def test_dce_serialize_local(self):
+        self.t_dce_serialize("localhost")
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/serialization.py b/test/resources/linux/serialization.py
new file mode 100755 (executable)
index 0000000..c133ab3
--- /dev/null
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.resource import ResourceState, ResourceAction
+from nepi.execution.trace import TraceAttr
+
+from test_utils import skipIfNotAlive, skipInteractive
+
+import os
+import shutil
+import time
+import tempfile
+import unittest
+
+class LinuxSerializationTestCase(unittest.TestCase):
+    def setUp(self):
+        self.fedora_host = "nepi2.pl.sophia.inria.fr"
+        self.fedora_user = "inria_nepi"
+
+        self.ubuntu_host = "roseval.pl.sophia.inria.fr"
+        self.ubuntu_user = "inria_nepi"
+        
+        self.target = "nepi5.pl.sophia.inria.fr"
+
+    @skipIfNotAlive
+    def t_condition_serialize(self, host, user, depends):
+
+        dirpath = tempfile.mkdtemp()
+
+        ec = ExperimentController(exp_id="test-condition-serial")
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        server = ec.register_resource("LinuxApplication")
+        cmd = "echo 'HOLA' | nc -l 3333"
+        ec.set(server, "command", cmd)
+        ec.set(server, "depends", depends)
+        ec.register_connection(server, node)
+
+        client = ec.register_resource("LinuxApplication")
+        cmd = "nc 127.0.0.1 3333"
+        ec.set(client, "command", cmd)
+        ec.register_connection(client, node)
+
+        ec.register_condition(client, ResourceAction.START, server, ResourceState.STARTED)
+
+        apps = [client, server]
+        
+        filepath = ec.save(dirpath)
+        
+        ec.deploy()
+
+        ec.wait_finished(apps)
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec.state(server) == ResourceState.STOPPED)
+        self.assertTrue(ec.state(client) == ResourceState.STOPPED)
+
+        stdout = ec.trace(client, "stdout")
+        self.assertTrue(stdout.strip() == "HOLA")
+
+        ec.shutdown()
+
+        # Load serialized experiment
+        ec2 = ExperimentController.load(filepath)
+        
+        ec2.deploy()
+        ec2.wait_finished(apps)
+        
+        self.assertEquals(len(ec.resources), len(ec2.resources))
+        
+        self.assertTrue(ec2.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec2.state(server) == ResourceState.STOPPED)
+        self.assertTrue(ec2.state(client) == ResourceState.STOPPED)
+
+        stdout = ec2.trace(client, "stdout")
+
+        self.assertTrue(stdout.strip() == "HOLA")
+        
+        ec2.shutdown()
+
+        shutil.rmtree(dirpath)
+
+    def test_condition_serialize_fedora(self):
+        self.t_condition_serialize(self.fedora_host, self.fedora_user, "nc")
+
+    def test_condition_serialize_ubuntu(self):
+        self.t_condition_serialize(self.ubuntu_host, self.ubuntu_user, "netcat")
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/tap.py b/test/resources/linux/tap.py
new file mode 100755 (executable)
index 0000000..8536a32
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import unittest
+
+class LinuxTapTestCase(unittest.TestCase):
+    def setUp(self):
+        self.host = "roseval.pl.sophia.inria.fr"
+        self.user = "inria_nepi"
+        self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+        self.netblock = "192.168.1"
+
+    @skipIfNotAlive
+    def t_tap_create(self, host, user, identity):
+
+        ec = ExperimentController(exp_id = "test-tap-create")
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "identity", identity)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        tap = ec.register_resource("LinuxTap")
+        ec.set(tap, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tap, "endpoint_prefix", 24)
+        ec.register_connection(tap, node)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c3 %s.1" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        if_name = ec.get(tap, "deviceName")
+        self.assertTrue(if_name.startswith("tap"))
+
+        ec.shutdown()
+
+    def test_tap_create(self):
+        self.t_tap_create(self.host, self.user, self.identity)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/tun.py b/test/resources/linux/tun.py
new file mode 100755 (executable)
index 0000000..1f50158
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import unittest
+
+class LinuxTunTestCase(unittest.TestCase):
+    def setUp(self):
+        self.host = "roseval.pl.sophia.inria.fr"
+        self.user = "inria_nepi"
+        self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+        self.netblock = "192.168.1"
+
+    @skipIfNotAlive
+    def t_tun_create(self, host, user, identity):
+
+        ec = ExperimentController(exp_id = "test-tun-create")
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "identity", identity)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        tun = ec.register_resource("LinuxTun")
+        ec.set(tun, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun, "endpoint_prefix", 24)
+        ec.register_connection(tun, node)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c3 %s.1" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        if_name = ec.get(tun, "deviceName")
+        self.assertTrue(if_name.startswith("tun"))
+
+        ec.shutdown()
+
+    def test_tun_create(self):
+        self.t_tun_create(self.host, self.user, self.identity)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/udptunnel.py b/test/resources/linux/udptunnel.py
new file mode 100755 (executable)
index 0000000..24ddf14
--- /dev/null
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController 
+
+from test_utils import skipIfAnyNotAliveWithIdentity
+
+import os
+import time
+import unittest
+
+class LinuxUdpTunnelTestCase(unittest.TestCase):
+    def setUp(self):
+        self.host1 = "roseval.pl.sophia.inria.fr"
+        self.host2 = "138.96.118.11"
+        self.user1 = "inria_nepi"
+        self.user2 = "omflab"
+        self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+        self.netblock = "192.168.1"
+
+    @skipIfAnyNotAliveWithIdentity
+    def t_tap_udp_tunnel(self, user1, host1, identity1, user2, host2, 
+            identity2):
+
+        ec = ExperimentController(exp_id = "test-tap-udp-tunnel")
+        
+        node1 = ec.register_resource("LinuxNode")
+        ec.set(node1, "hostname", host1)
+        ec.set(node1, "username", user1)
+        ec.set(node1, "identity", identity1)
+        ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanProcesses", True)
+
+        tap1 = ec.register_resource("LinuxTap")
+        ec.set(tap1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tap1, "endpoint_prefix", 32)
+        ec.register_connection(tap1, node1)
+
+        node2 = ec.register_resource("LinuxNode")
+        ec.set(node2, "hostname", host2)
+        ec.set(node2, "username", user2)
+        ec.set(node2, "identity", identity2)
+        ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanProcesses", True)
+
+        tap2 = ec.register_resource("LinuxTap")
+        ec.set(tap2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tap2, "endpoint_prefix", 32)
+        ec.register_connection(tap2, node2)
+
+        udptun = ec.register_resource("LinuxUdpTunnel")
+        ec.register_connection(tap1, udptun)
+        ec.register_connection(tap2, udptun)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c3 %s.2" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node1)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        vif_name = ec.get(tap1, "deviceName")
+        self.assertTrue(vif_name.startswith("tap"))
+        
+        vif_name = ec.get(tap2, "deviceName")
+        self.assertTrue(vif_name.startswith("tap"))
+
+        ec.shutdown()
+
+    @skipIfAnyNotAliveWithIdentity
+    def t_tun_udp_tunnel(self, user1, host1, identity1, user2, host2, identity2):
+
+        ec = ExperimentController(exp_id = "test-tun-udp-tunnel")
+        
+        node1 = ec.register_resource("LinuxNode")
+        ec.set(node1, "hostname", host1)
+        ec.set(node1, "username", user1)
+        ec.set(node1, "identity", identity1)
+        ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanProcesses", True)
+
+        tun1 = ec.register_resource("LinuxTun")
+        ec.set(tun1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun1, "endpoint_prefix", 32)
+        ec.register_connection(tun1, node1)
+
+        node2 = ec.register_resource("LinuxNode")
+        ec.set(node2, "hostname", host2)
+        ec.set(node2, "username", user2)
+        ec.set(node2, "identity", identity2)
+        ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanProcesses", True)
+
+        tun2 = ec.register_resource("LinuxTun")
+        ec.set(tun2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tun2, "endpoint_prefix", 32)
+        ec.register_connection(tun2, node2)
+
+        udptun = ec.register_resource("LinuxUdpTunnel")
+        ec.register_connection(tun1, udptun)
+        ec.register_connection(tun2, udptun)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c3 %s.2" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node1)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        vif_name = ec.get(tun1, "deviceName")
+        self.assertTrue(vif_name.startswith("tun"))
+        
+        vif_name = ec.get(tun2, "deviceName")
+        self.assertTrue(vif_name.startswith("tun"))
+
+        ec.shutdown()
+
+    def test_tap_udp_tunnel(self):
+        self.t_tap_udp_tunnel(self.user1, self.host1, self.identity,
+                self.user2, self.host2, self.identity)
+
+    def ztest_tun_udp_tunnel(self):
+        self.t_tun_udp_tunnel(self.user1, self.host1, self.identity,
+                self.user2, self.host2, self.identity)
+
+if __name__ == '__main__':
+    unittest.main()
+
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
index 1d69a69..98ad803 100755 (executable)
@@ -26,12 +26,14 @@ import os
 import time
 import unittest
 
-class GRETunnelTestCase(unittest.TestCase):
+class PlanetLabGRETunnelTestCase(unittest.TestCase):
     def setUp(self):
         #self.host1 = "nepi2.pl.sophia.inria.fr"
         #self.host2 = "nepi5.pl.sophia.inria.fr"
+        #self.host2 = "planetlab1.informatik.uni-goettingen.de" 
         self.host1 = "planetlab1.informatik.uni-erlangen.de"
-        self.host2 = "planetlab1.informatik.uni-goettingen.de"
+        self.host2 = "planck227ple.test.ibbt.be"
+        self.host3 = "roseval.pl.sophia.inria.fr"
         self.user = "inria_nepi"
         self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
         #self.netblock = "192.168.1"
@@ -47,24 +49,26 @@ class GRETunnelTestCase(unittest.TestCase):
         ec.set(node1, "hostname", host1)
         ec.set(node1, "username", user1)
         ec.set(node1, "identity", identity1)
-        ec.set(node1, "cleanHome", True)
+        #ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanExperiment", True)
         ec.set(node1, "cleanProcesses", True)
 
         tap1 = ec.register_resource("PlanetlabTap")
-        ec.set(tap1, "ip4", "%s.1" % self.netblock)
-        ec.set(tap1, "prefix4", 24)
+        ec.set(tap1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tap1, "endpoint_prefix", 24)
         ec.register_connection(tap1, node1)
 
         node2 = ec.register_resource("PlanetlabNode")
         ec.set(node2, "hostname", host2)
         ec.set(node2, "username", user2)
         ec.set(node2, "identity", identity2)
-        ec.set(node2, "cleanHome", True)
+        #ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanExperiment", True)
         ec.set(node2, "cleanProcesses", True)
 
         tap2 = ec.register_resource("PlanetlabTap")
-        ec.set(tap2, "ip4", "%s.2" % self.netblock)
-        ec.set(tap2, "prefix4", 24)
+        ec.set(tap2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tap2, "endpoint_prefix", 24)
         ec.register_connection(tap2, node2)
 
         gretun = ec.register_resource("LinuxGRETunnel")
@@ -102,24 +106,26 @@ class GRETunnelTestCase(unittest.TestCase):
         ec.set(node1, "hostname", host1)
         ec.set(node1, "username", user1)
         ec.set(node1, "identity", identity1)
-        ec.set(node1, "cleanHome", True)
+        #ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanExperiment", True)
         ec.set(node1, "cleanProcesses", True)
 
         tun1 = ec.register_resource("PlanetlabTun")
-        ec.set(tun1, "ip4", "%s.1" % self.netblock)
-        ec.set(tun1, "prefix4", 24)
+        ec.set(tun1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun1, "endpoint_prefix", 24)
         ec.register_connection(tun1, node1)
 
         node2 = ec.register_resource("PlanetlabNode")
         ec.set(node2, "hostname", host2)
         ec.set(node2, "username", user2)
         ec.set(node2, "identity", identity2)
-        ec.set(node2, "cleanHome", True)
+        #ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanExperiment", True)
         ec.set(node2, "cleanProcesses", True)
 
         tun2 = ec.register_resource("PlanetlabTun")
-        ec.set(tun2, "ip4", "%s.2" % self.netblock)
-        ec.set(tun2, "prefix4", 24)
+        ec.set(tun2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tun2, "endpoint_prefix", 24)
         ec.register_connection(tun2, node2)
 
         udptun = ec.register_resource("LinuxGRETunnel")
@@ -147,6 +153,65 @@ class GRETunnelTestCase(unittest.TestCase):
 
         ec.shutdown()
 
+    @skipIfAnyNotAliveWithIdentity
+    def t_tun_hybrid_gre_tunnel(self, user1, host1, identity1, 
+            user2, host2, identity2):
+
+        ec = ExperimentController(exp_id = "test-tap-hybrid-gre-tunnel")
+        
+        node1 = ec.register_resource("PlanetlabNode")
+        ec.set(node1, "hostname", host1)
+        ec.set(node1, "username", user1)
+        ec.set(node1, "identity", identity1)
+        #ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanExperiment", True)
+        ec.set(node1, "cleanProcesses", True)
+
+        tun1 = ec.register_resource("PlanetlabTun")
+        ec.set(tun1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun1, "endpoint_prefix", 24)
+        ec.register_connection(tun1, node1)
+
+        node2 = ec.register_resource("LinuxNode")
+        ec.set(node2, "hostname", host2)
+        ec.set(node2, "username", user2)
+        ec.set(node2, "identity", identity2)
+        #ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanExperiment", True)
+        ec.set(node2, "cleanProcesses", True)
+
+        tun2 = ec.register_resource("LinuxTun")
+        ec.set(tun2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tun2, "endpoint_prefix", 24)
+        ec.register_connection(tun2, node2)
+
+        gretun = ec.register_resource("LinuxGRETunnel")
+        ec.register_connection(tun1, gretun)
+        ec.register_connection(tun2, gretun)
+
+        app = ec.register_resource("LinuxApplication")
+        # It seems the hybrid tunnel takes some time to setup... we add a sleep 5
+        # XXX: Debug this to see if it can be fixed on the RMs
+        cmd = "sleep 5; ping -c3 %s.2" % self.netblock
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node1)
+
+        ec.deploy()
+
+        ec.wait_finished(app)
+
+        ping = ec.trace(app, 'stdout')
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+        self.assertTrue(ping.find(expected) > -1)
+        
+        if_name = ec.get(tun1, "deviceName")
+        self.assertTrue(if_name.startswith("tun"))
+        
+        if_name = ec.get(tun2, "deviceName")
+        self.assertTrue(if_name.startswith("tun"))
+
+        ec.shutdown()
+
     def test_tap_gre_tunnel(self):
         self.t_tap_gre_tunnel(self.user, self.host1, self.identity,
                 self.user, self.host2, self.identity)
@@ -155,6 +220,11 @@ class GRETunnelTestCase(unittest.TestCase):
         self.t_tun_gre_tunnel(self.user, self.host1, self.identity,
                 self.user, self.host2, self.identity)
 
+    def test_tun_hybrid_gre_tunnel(self):
+        self.t_tun_hybrid_gre_tunnel(self.user, self.host1, self.identity, 
+                self.user, self.host3, self.identity)
+
+
 if __name__ == '__main__':
     unittest.main()
 
index 2d90707..3c46513 100755 (executable)
@@ -50,8 +50,8 @@ class PlanetlabTapTestCase(unittest.TestCase):
         ec.set(node, "cleanProcesses", True)
 
         tap = ec.register_resource("PlanetlabTap")
-        ec.set(tap, "ip4", "%s.1" % self.netblock)
-        ec.set(tap, "prefix4", 24)
+        ec.set(tap, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tap, "endpoint_prefix", 24)
         ec.register_connection(tap, node)
 
         app = ec.register_resource("LinuxApplication")
index 8dea839..aa0def2 100755 (executable)
@@ -50,8 +50,8 @@ class PlanetlabTunTestCase(unittest.TestCase):
         ec.set(node, "cleanProcesses", True)
 
         tun = ec.register_resource("PlanetlabTun")
-        ec.set(tun, "ip4", "%s.1" % self.netblock)
-        ec.set(tun, "prefix4", 24)
+        ec.set(tun, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun, "endpoint_prefix", 24)
         ec.register_connection(tun, node)
 
         app = ec.register_resource("LinuxApplication")
index 36915ac..e54c3ef 100755 (executable)
@@ -26,14 +26,16 @@ import os
 import time
 import unittest
 
-class UdpTunnelTestCase(unittest.TestCase):
+class PlanetlabUdpTunnelTestCase(unittest.TestCase):
     def setUp(self):
         #self.host1 = "nepi2.pl.sophia.inria.fr"
         #self.host2 = "nepi5.pl.sophia.inria.fr"
+        #self.host2 = "planetlab1.informatik.uni-goettingen.de" 
         self.host1 = "planetlab1.informatik.uni-erlangen.de"
-        self.host2 = "planetlab1.informatik.uni-goettingen.de"
+        self.host2 = "planck227ple.test.ibbt.be"
         self.user = "inria_nepi"
-        self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+        #self.identity = "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'])
+        self.identity = "%s/.ssh/id_rsa" % (os.environ['HOME'])
         #self.netblock = "192.168.1"
         self.netblock = "192.168.3"
 
@@ -47,24 +49,26 @@ class UdpTunnelTestCase(unittest.TestCase):
         ec.set(node1, "hostname", host1)
         ec.set(node1, "username", user1)
         ec.set(node1, "identity", identity1)
-        ec.set(node1, "cleanHome", True)
+        #ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanExperiment", True)
         ec.set(node1, "cleanProcesses", True)
 
         tap1 = ec.register_resource("PlanetlabTap")
-        ec.set(tap1, "ip4", "%s.1" % self.netblock)
-        ec.set(tap1, "prefix4", 24)
+        ec.set(tap1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tap1, "endpoint_prefix", 24)
         ec.register_connection(tap1, node1)
 
         node2 = ec.register_resource("PlanetlabNode")
         ec.set(node2, "hostname", host2)
         ec.set(node2, "username", user2)
         ec.set(node2, "identity", identity2)
-        ec.set(node2, "cleanHome", True)
+        #ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanExperiment", True)
         ec.set(node2, "cleanProcesses", True)
 
         tap2 = ec.register_resource("PlanetlabTap")
-        ec.set(tap2, "ip4", "%s.2" % self.netblock)
-        ec.set(tap2, "prefix4", 24)
+        ec.set(tap2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tap2, "endpoint_prefix", 24)
         ec.register_connection(tap2, node2)
 
         udptun = ec.register_resource("LinuxUdpTunnel")
@@ -101,24 +105,26 @@ class UdpTunnelTestCase(unittest.TestCase):
         ec.set(node1, "hostname", host1)
         ec.set(node1, "username", user1)
         ec.set(node1, "identity", identity1)
-        ec.set(node1, "cleanHome", True)
+        #ec.set(node1, "cleanHome", True)
+        ec.set(node1, "cleanExperiment", True)
         ec.set(node1, "cleanProcesses", True)
 
         tun1 = ec.register_resource("PlanetlabTun")
-        ec.set(tun1, "ip4", "%s.1" % self.netblock)
-        ec.set(tun1, "prefix4", 24)
+        ec.set(tun1, "endpoint_ip", "%s.1" % self.netblock)
+        ec.set(tun1, "endpoint_prefix", 24)
         ec.register_connection(tun1, node1)
 
         node2 = ec.register_resource("PlanetlabNode")
         ec.set(node2, "hostname", host2)
         ec.set(node2, "username", user2)
         ec.set(node2, "identity", identity2)
-        ec.set(node2, "cleanHome", True)
+        #ec.set(node2, "cleanHome", True)
+        ec.set(node2, "cleanExperiment", True)
         ec.set(node2, "cleanProcesses", True)
 
         tun2 = ec.register_resource("PlanetlabTun")
-        ec.set(tun2, "ip4", "%s.2" % self.netblock)
-        ec.set(tun2, "prefix4", 24)
+        ec.set(tun2, "endpoint_ip", "%s.2" % self.netblock)
+        ec.set(tun2, "endpoint_prefix", 24)
         ec.register_connection(tun2, node2)
 
         udptun = ec.register_resource("LinuxUdpTunnel")
old mode 100644 (file)
new mode 100755 (executable)
diff --git a/test/util/plot.py b/test/util/plot.py
deleted file mode 100755 (executable)
index 38001de..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-#
-#    NEPI, a framework to manage network experiments
-#    Copyright (C) 2013 INRIA
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
-
-
-from nepi.design.box import Box 
-from nepi.util.plot import Plotter
-
-import subprocess
-import unittest
-
-class BoxPlotTestCase(unittest.TestCase):
-    def xtest_plot(self):
-        """ XXX: This test is interactive, it will open an evince instance,
-        so it should not run automatically """
-        node1 = Box(label="node1")
-        ping1 = Box(label="ping")
-        mobility1 = Box(label="mob1")
-        node2 = Box(label="node2")
-        mobility2 = Box(label="mob2")
-        iface1 = Box(label="iface1")
-        iface2 = Box(label="iface2")
-        channel = Box(label="chan")
-
-        node1.connect(ping1)
-        node1.connect(mobility1)
-        node1.connect(iface1)
-        channel.connect(iface1)
-        channel.connect(iface2)
-        node2.connect(iface2)
-        node2.connect(mobility2)
-
-        plotter = Plotter(node1)
-        fname = plotter.plot()
-        subprocess.call(["dot", "-Tps", fname, "-o", "%s.ps"%fname])
-        subprocess.call(["evince","%s.ps"%fname])
-       
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/util/plotter.py b/test/util/plotter.py
new file mode 100755 (executable)
index 0000000..9ad5ce6
--- /dev/null
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceManager, ResourceState, \
+        clsinit_copy, ResourceAction, ResourceFactory
+from nepi.util.plotter import PFormats
+
+import os
+import tempfile
+import time
+import unittest
+
+reschedule_delay = "0.5s"
+deploy_time = 0
+run_time = 0
+
+class Link(ResourceManager):
+    _rtype = "dummy::Link"
+    def do_deploy(self):
+        time.sleep(deploy_time)
+        super(Link, self).do_deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Interface(ResourceManager):
+    _rtype = "dummy::Interface"
+
+    def do_deploy(self):
+        node = self.get_connected(Node.get_rtype())[0]
+        link = self.get_connected(Link.get_rtype())[0]
+
+        if node.state < ResourceState.READY or \
+                link.state < ResourceState.READY:
+            self.ec.schedule(reschedule_delay, self.deploy)
+            self.logger.debug(" -------- RESCHEDULING ------- ")
+        else:
+            time.sleep(deploy_time)
+            super(Interface, self).do_deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+    _rtype = "dummy::Node"
+
+    def do_deploy(self):
+        self.logger.debug(" -------- DO_DEPLOY ------- ")
+        time.sleep(deploy_time)
+        super(Node, self).do_deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+    _rtype = "dummy::Application"
+
+    def do_deploy(self):
+        node = self.get_connected(Node.get_rtype())[0]
+
+        if node.state < ResourceState.READY: 
+            self.ec.schedule(reschedule_delay, self.deploy)
+            self.logger.debug(" -------- RESCHEDULING ------- ")
+        else:
+            time.sleep(deploy_time)
+            super(Application, self).do_deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+    def do_start(self):
+        super(Application, self).do_start()
+        time.sleep(run_time)
+        self.ec.schedule("0s", self.stop)
+
+ResourceFactory.register_type(Application)
+ResourceFactory.register_type(Node)
+ResourceFactory.register_type(Interface)
+ResourceFactory.register_type(Link)
+
+class PlotterTestCase(unittest.TestCase):
+    def test_serialize(self):
+        node_count = 4
+        app_count = 2
+
+        ec = ExperimentController(exp_id = "plotter-test")
+       
+        # Add simulated nodes and applications
+        nodes = list()
+        apps = list()
+        ifaces = list()
+
+        for i in xrange(node_count):
+            node = ec.register_resource("dummy::Node")
+            nodes.append(node)
+            
+            iface = ec.register_resource("dummy::Interface")
+            ec.register_connection(node, iface)
+            ifaces.append(iface)
+
+            for i in xrange(app_count):
+                app = ec.register_resource("dummy::Application")
+                ec.register_connection(node, app)
+                apps.append(app)
+
+        link = ec.register_resource("dummy::Link")
+
+        for iface in ifaces:
+            ec.register_connection(link, iface)
+       
+        fpath = ec.plot()
+        statinfo = os.stat(fpath)
+        size = statinfo.st_size
+        self.assertTrue(size > 0)
+        self.assertTrue(fpath.endswith(".png"))
+
+        os.remove(fpath)
+
+        fpath = ec.plot(format = PFormats.DOT)
+        statinfo = os.stat(fpath)
+        size = statinfo.st_size
+        self.assertTrue(size > 0)
+        self.assertTrue(fpath.endswith(".dot"))
+
+        os.remove(fpath)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/util/serializer.py b/test/util/serializer.py
new file mode 100755 (executable)
index 0000000..993a0f4
--- /dev/null
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+#
+#    NEPI, a framework to manage network experiments
+#    Copyright (C) 2013 INRIA
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceManager, ResourceState, \
+        clsinit_copy, ResourceAction, ResourceFactory
+
+import os
+import tempfile
+import time
+import shutil
+import unittest
+
+reschedule_delay = "0.5s"
+deploy_time = 0
+run_time = 0
+
+class Link(ResourceManager):
+    _rtype = "dummy::Link"
+    def do_deploy(self):
+        time.sleep(deploy_time)
+        super(Link, self).do_deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Interface(ResourceManager):
+    _rtype = "dummy::Interface"
+
+    def do_deploy(self):
+        node = self.get_connected(Node.get_rtype())[0]
+        link = self.get_connected(Link.get_rtype())[0]
+
+        if node.state < ResourceState.READY or \
+                link.state < ResourceState.READY:
+            self.ec.schedule(reschedule_delay, self.deploy)
+            self.logger.debug(" -------- RESCHEDULING ------- ")
+        else:
+            time.sleep(deploy_time)
+            super(Interface, self).do_deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+    _rtype = "dummy::Node"
+
+    def do_deploy(self):
+        self.logger.debug(" -------- DO_DEPLOY ------- ")
+        time.sleep(deploy_time)
+        super(Node, self).do_deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+    _rtype = "dummy::Application"
+
+    def do_deploy(self):
+        node = self.get_connected(Node.get_rtype())[0]
+
+        if node.state < ResourceState.READY: 
+            self.ec.schedule(reschedule_delay, self.deploy)
+            self.logger.debug(" -------- RESCHEDULING ------- ")
+        else:
+            time.sleep(deploy_time)
+            super(Application, self).do_deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+    def do_start(self):
+        super(Application, self).do_start()
+        time.sleep(run_time)
+        self.ec.schedule("0s", self.stop)
+
+ResourceFactory.register_type(Application)
+ResourceFactory.register_type(Node)
+ResourceFactory.register_type(Interface)
+ResourceFactory.register_type(Link)
+
+class SerializerTestCase(unittest.TestCase):
+    def test_serialize(self):
+        node_count = 4
+        app_count = 2
+
+        dirpath = tempfile.mkdtemp()
+
+        ec = ExperimentController(exp_id = "serialize-test")
+       
+        # Add simulated nodes and applications
+        nodes = list()
+        apps = list()
+        ifaces = list()
+
+        for i in xrange(node_count):
+            node = ec.register_resource("dummy::Node")
+            nodes.append(node)
+            
+            iface = ec.register_resource("dummy::Interface")
+            ec.register_connection(node, iface)
+            ifaces.append(iface)
+
+            for i in xrange(app_count):
+                app = ec.register_resource("dummy::Application")
+                ec.register_connection(node, app)
+                apps.append(app)
+
+        link = ec.register_resource("dummy::Link")
+
+        for iface in ifaces:
+            ec.register_connection(link, iface)
+
+        filepath = ec.save(dirpath)
+
+        ec.deploy()
+
+        # Wait until nodes and apps are deployed
+        ec.wait_finished(apps)
+
+        # Do the experiment controller shutdown
+        ec.shutdown()
+
+        # Load serialized experiment
+        ec2 = ExperimentController.load(filepath)
+        apps = ec2.filter_resources("dummy::Application")
+        ec2.deploy()
+        ec2.wait_finished(apps)
+        ec2.shutdown()
+        
+        self.assertEquals(len(ec.resources), len(ec2.resources))
+
+        shutil.rmtree(dirpath)
+                       
+if __name__ == '__main__':
+    unittest.main()
+