Merge neco to nepi-3.0
authorAlina Quereilhac <alina.quereilhac@inria.fr>
Sun, 19 May 2013 22:10:01 +0000 (00:10 +0200)
committerAlina Quereilhac <alina.quereilhac@inria.fr>
Sun, 19 May 2013 22:10:01 +0000 (00:10 +0200)
193 files changed:
.hgignore [deleted file]
DEPENDENCIES [deleted file]
LICENSE
Makefile
README [deleted file]
examples/Multicast/multicast_experiment.py [deleted file]
examples/POPI/popi-0.9-rpy2.patch [deleted file]
examples/POPI/popi-0.9.tar.gz [deleted file]
examples/POPI/popi-tun-classfilter-2MB-q500-pl-hibw.xml [deleted file]
examples/POPI/popi-tun-classfilter-2MB-q500-pl.xml [deleted file]
examples/POPI/run_one_experiment.py [deleted file]
examples/POPI/run_popi_experiments.py [deleted file]
examples/POPI/run_popi_experiments.sh [deleted file]
examples/automated_vlc_experiment_plexus.py [new file with mode: 0644]
examples/big_buck_bunny_240p_mpeg4.ts [deleted file]
examples/big_buck_bunny_240p_mpeg4_lq.ts [deleted file]
examples/big_buck_bunny_license_information [deleted file]
examples/ccnx/ccnx-0.6.0rc3.tar.gz [deleted file]
examples/ccnx/planetlab_ccnx_multicast.py [deleted file]
examples/ccnx/planetlab_ccnx_unicast.py [deleted file]
examples/daemonized_testbed_controller.py [deleted file]
examples/fd_cross_testbed_experiment.py [deleted file]
examples/linux/ccnx/simple_topo.py [new file with mode: 0644]
examples/linux/scalability.py [new file with mode: 0644]
examples/manual_vlc_experiment_plexus.py [new file with mode: 0644]
examples/netns_routing_experiment.py [deleted file]
examples/netns_xterm_experiment.py [deleted file]
examples/ns3_runtime_attribute_change.py [deleted file]
examples/ns3_wifi_hidden_terminal.py [deleted file]
examples/omf-iminds-vlc.py [deleted file]
examples/omf-nitos-vlc.py [deleted file]
examples/omf-plexuslab-vlc.py [deleted file]
examples/omf-plexuslab-xeyes.py [deleted file]
examples/planetlab_fix_gpgkeys.py [deleted file]
examples/planetlab_package_install.py [deleted file]
examples/planetlab_simple_experiment.py [deleted file]
examples/streaming/ccn_broadcast.py [deleted file]
examples/streaming/vlc_broadcast.py [deleted file]
examples/tunnel_cross_testbed_experiment.py [deleted file]
examples/vlc_cross_testbed_experiment.py [deleted file]
setup.py
src/nepi/__init__.py
src/nepi/core/attributes.py [deleted file]
src/nepi/core/connector.py [deleted file]
src/nepi/core/design.py [deleted file]
src/nepi/core/execute.py [deleted file]
src/nepi/core/factory.py [deleted file]
src/nepi/core/metadata.py [deleted file]
src/nepi/core/testbed_impl.py [deleted file]
src/nepi/design/__init__.py [moved from src/nepi/core/__init__.py with 100% similarity]
src/nepi/design/box.py [new file with mode: 0644]
src/nepi/execution/__init__.py [moved from src/nepi/testbeds/__init__.py with 100% similarity]
src/nepi/execution/attribute.py [new file with mode: 0644]
src/nepi/execution/ec.py [new file with mode: 0644]
src/nepi/execution/resource.py [new file with mode: 0644]
src/nepi/execution/scheduler.py [new file with mode: 0644]
src/nepi/execution/tags.py [new file with mode: 0644]
src/nepi/execution/trace.py [new file with mode: 0644]
src/nepi/resources/__init__.py [moved from src/nepi/util/parser/__init__.py with 100% similarity]
src/nepi/resources/linux/__init__.py [new file with mode: 0644]
src/nepi/resources/linux/application.py [new file with mode: 0644]
src/nepi/resources/linux/channel.py [new file with mode: 0644]
src/nepi/resources/linux/debfuncs.py [new file with mode: 0644]
src/nepi/resources/linux/interface.py [new file with mode: 0644]
src/nepi/resources/linux/node.py [new file with mode: 0644]
src/nepi/resources/linux/rpmfuncs.py [new file with mode: 0644]
src/nepi/resources/netns/__init__.py [new file with mode: 0644]
src/nepi/resources/ns3/__init__.py [new file with mode: 0644]
src/nepi/resources/ns3/ns3wrapper.py [new file with mode: 0644]
src/nepi/resources/ns3/ns3wrapper_server.py [new file with mode: 0644]
src/nepi/resources/omf/__init__.py [new file with mode: 0644]
src/nepi/resources/omf/omf_api.py [new file with mode: 0644]
src/nepi/resources/omf/omf_application.py [new file with mode: 0644]
src/nepi/resources/omf/omf_channel.py [new file with mode: 0644]
src/nepi/resources/omf/omf_client.py [new file with mode: 0644]
src/nepi/resources/omf/omf_interface.py [new file with mode: 0644]
src/nepi/resources/omf/omf_messages_5_4.py [new file with mode: 0644]
src/nepi/resources/omf/omf_node.py [new file with mode: 0644]
src/nepi/resources/omf/xx_omf_resource.py [new file with mode: 0644]
src/nepi/resources/planetlab/__init__.py [new file with mode: 0644]
src/nepi/testbeds/netns/__init__.py [deleted file]
src/nepi/testbeds/netns/constants.py [deleted file]
src/nepi/testbeds/netns/execute.py [deleted file]
src/nepi/testbeds/netns/metadata.py [deleted file]
src/nepi/testbeds/ns3/__init__.py [deleted file]
src/nepi/testbeds/ns3/attributes_metadata.py [deleted file]
src/nepi/testbeds/ns3/connection_metadata.py [deleted file]
src/nepi/testbeds/ns3/constants.py [deleted file]
src/nepi/testbeds/ns3/execute.py [deleted file]
src/nepi/testbeds/ns3/factories_metadata.py [deleted file]
src/nepi/testbeds/ns3/metadata.py [deleted file]
src/nepi/testbeds/ns3/ns3_bindings_import.py [deleted file]
src/nepi/testbeds/ns3/traces_metadata.py [deleted file]
src/nepi/testbeds/ns3/util.py [deleted file]
src/nepi/testbeds/ns3/validation.py [deleted file]
src/nepi/testbeds/omf/__init__.py [deleted file]
src/nepi/testbeds/omf/constants.py [deleted file]
src/nepi/testbeds/omf/execute.py [deleted file]
src/nepi/testbeds/omf/metadata.py [deleted file]
src/nepi/testbeds/omf/omf_api.py [deleted file]
src/nepi/testbeds/omf/omf_client.py [deleted file]
src/nepi/testbeds/omf/omf_messages.py [deleted file]
src/nepi/testbeds/planetlab/__init__.py [deleted file]
src/nepi/testbeds/planetlab/application.py [deleted file]
src/nepi/testbeds/planetlab/constants.py [deleted file]
src/nepi/testbeds/planetlab/execute.py [deleted file]
src/nepi/testbeds/planetlab/interfaces.py [deleted file]
src/nepi/testbeds/planetlab/metadata.py [deleted file]
src/nepi/testbeds/planetlab/multicast.py [deleted file]
src/nepi/testbeds/planetlab/node.py [deleted file]
src/nepi/testbeds/planetlab/plcapi.py [deleted file]
src/nepi/testbeds/planetlab/resourcealloc.py [deleted file]
src/nepi/testbeds/planetlab/scripts/classqueue.py [deleted file]
src/nepi/testbeds/planetlab/scripts/consts.c [deleted file]
src/nepi/testbeds/planetlab/scripts/loggingclassqueue.py [deleted file]
src/nepi/testbeds/planetlab/scripts/mcastfwd.py [deleted file]
src/nepi/testbeds/planetlab/scripts/mrouted-3.9.5-pl.patch [deleted file]
src/nepi/testbeds/planetlab/scripts/plr50.c [deleted file]
src/nepi/testbeds/planetlab/scripts/plr50.py [deleted file]
src/nepi/testbeds/planetlab/scripts/tosqueue.py [deleted file]
src/nepi/testbeds/planetlab/scripts/tun_connect.py [deleted file]
src/nepi/testbeds/planetlab/scripts/tunalloc.c [deleted file]
src/nepi/testbeds/planetlab/tunproto.py [deleted file]
src/nepi/testbeds/planetlab/util.py [deleted file]
src/nepi/util/constants.py [deleted file]
src/nepi/util/defer.py [deleted file]
src/nepi/util/environ.py
src/nepi/util/execfuncs.py [moved from src/nepi/testbeds/planetlab/rspawn.py with 56% similarity]
src/nepi/util/graphical_info.py [deleted file]
src/nepi/util/graphtools/__init__.py [deleted file]
src/nepi/util/graphtools/mst.py [deleted file]
src/nepi/util/guid.py
src/nepi/util/ipaddr2.py [deleted file]
src/nepi/util/parallel.py
src/nepi/util/parser.py [new file with mode: 0644]
src/nepi/util/parser/_xml.py [deleted file]
src/nepi/util/parser/base.py [deleted file]
src/nepi/util/parser/sfa.py [deleted file]
src/nepi/util/plot.py [new file with mode: 0644]
src/nepi/util/proxy.py [deleted file]
src/nepi/util/proxy_stub.tpl [deleted file]
src/nepi/util/rmatcher.py [new file with mode: 0644]
src/nepi/util/server.py [deleted file]
src/nepi/util/settools/__init__.py [deleted file]
src/nepi/util/settools/classify.py [deleted file]
src/nepi/util/settools/setclusters.py [deleted file]
src/nepi/util/sfa_api.py [new file with mode: 0644]
src/nepi/util/sfa_sfav1.py [new file with mode: 0644]
src/nepi/util/sfiapi.py [deleted file]
src/nepi/util/sshfuncs.py [new file with mode: 0644]
src/nepi/util/tags.py [deleted file]
src/nepi/util/timefuncs.py [new file with mode: 0644]
src/nepi/util/tunchannel.py [deleted file]
src/nepi/util/tunchannel_impl.py [deleted file]
src/nepi/util/validation.py [deleted file]
test/core/design.py [deleted file]
test/core/execute.py [deleted file]
test/core/integration.py [deleted file]
test/design/box.py [new file with mode: 0755]
test/execution/ec.py [new file with mode: 0755]
test/execution/resource.py [new file with mode: 0755]
test/lib/mock/__init__.py [deleted file]
test/lib/mock/constants.py [deleted file]
test/lib/mock/execute.py [deleted file]
test/lib/mock/metadata.py [deleted file]
test/lib/mock2/__init__.py [deleted file]
test/lib/mock2/constants.py [deleted file]
test/lib/mock2/execute.py [deleted file]
test/lib/mock2/metadata.py [deleted file]
test/resources/linux/application.py [new file with mode: 0644]
test/resources/linux/interface.py [new file with mode: 0644]
test/resources/linux/node.py [new file with mode: 0644]
test/resources/linux/test_utils.py [new file with mode: 0644]
test/resources/ns3/ns3wrapper.py [new file with mode: 0644]
test/resources/omf/omf_vlc_exp.py [new file with mode: 0755]
test/testbeds/netns/design.py [deleted file]
test/testbeds/netns/execute.py [deleted file]
test/testbeds/netns/integration.py [deleted file]
test/testbeds/ns3/design.py [deleted file]
test/testbeds/ns3/execute.py [deleted file]
test/testbeds/ns3/execute2.py [deleted file]
test/testbeds/ns3/integration.py [deleted file]
test/testbeds/planetlab/design.py [deleted file]
test/testbeds/planetlab/execute.py [deleted file]
test/testbeds/planetlab/integration.py [deleted file]
test/testbeds/planetlab/integration_cross.py [deleted file]
test/testbeds/planetlab/integration_multi.py [deleted file]
test/testbeds/planetlab/integration_ns3.py [deleted file]
test/util/parser.py [new file with mode: 0755]
test/util/plot.py [new file with mode: 0755]
test/util/server.py [deleted file]
test/util/sshfuncs.py [moved from test/lib/test_util.py with 55% similarity]
test/util/tunchannel.py [deleted file]

diff --git a/.hgignore b/.hgignore
deleted file mode 100644 (file)
index 163d7b0..0000000
--- a/.hgignore
+++ /dev/null
@@ -1,7 +0,0 @@
-#use glob syntax.
-syntax: glob
-
-*.pyc
-*~
-build
-
diff --git a/DEPENDENCIES b/DEPENDENCIES
deleted file mode 100644 (file)
index c66fd9c..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-* ipaddr-2.1.7 : http://ipaddr-py.googlecode.com/files/ipaddr-2.1.7.tar.gz
-* sleekxmpp-1.0.1dev:
-* python-crypto
diff --git a/LICENSE b/LICENSE
index b73d893..9a938a6 100644 (file)
--- a/LICENSE
+++ b/LICENSE
@@ -1,5 +1,6 @@
 NEPI, a network experiment management framework
-Copyright (C) 2009-2013 INRIA
+
+Copyright (c) 2009-2013 INRIA
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -14,3 +15,5 @@ GNU General Public License for more details.
 You should have received a copy of the GNU General Public License along
 with this program; if not, write to the Free Software Foundation, Inc.,
 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Author: Alina Quereilhac <alina.quereilhac@inria.fr>
index 340f097..cc56b5e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,5 @@
 SRCDIR      = $(CURDIR)/src
 TESTDIR     = $(CURDIR)/test
-TESTLIB     = $(TESTDIR)/lib
 BUILDDIR    = $(CURDIR)/build
 DISTDIR     = $(CURDIR)/dist
 
@@ -17,7 +16,7 @@ else
 BUILDDIR := $(BUILDDIR)/lib
 endif
 
-PYPATH = $(BUILDDIR):$(TESTLIB):$(PYTHONPATH)
+PYPATH = $(BUILDDIR):$(PYTHONPATH)
 COVERAGE = $(or $(shell which coverage), $(shell which python-coverage), \
           coverage)
 
@@ -31,14 +30,14 @@ test: all
        retval=0; \
               for i in `find "$(TESTDIR)" -iname '*.py' -perm -u+x -type f`; do \
               echo $$i; \
-              TESTLIBPATH="$(TESTLIB)" PYTHONPATH="$(PYPATH)" $$i -v || retval=$$?; \
+              PYTHONPATH="$(PYPATH)" $$i -v || retval=$$?; \
               done; exit $$retval
 
 coverage: all
        rm -f .coverage
        for i in `find "$(TESTDIR)" -perm -u+x -type f`; do \
                set -e; \
-               TESTLIBPATH="$(TESTLIB)" PYTHONPATH="$(PYPATH)" $(COVERAGE) -x $$i -v; \
+               PYTHONPATH="$(PYPATH)" $(COVERAGE) -x $$i -v; \
                done
        $(COVERAGE) -c
        $(COVERAGE) -r -m `find "$(BUILDDIR)" -name \\*.py -type f`
diff --git a/README b/README
deleted file mode 100644 (file)
index 9696765..0000000
--- a/README
+++ /dev/null
@@ -1,6 +0,0 @@
-examples/big_buck_bunny_480p_h264.ts:
-       Original author: Blender Foundation | www.blender.org
-                        Peach Open movie team
-                        www.bigbuckbunny.org
-        Recoded for the example and truncated for shorter duration
-
diff --git a/examples/Multicast/multicast_experiment.py b/examples/Multicast/multicast_experiment.py
deleted file mode 100755 (executable)
index 0f93259..0000000
+++ /dev/null
@@ -1,802 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-import os
-import os.path
-import re
-import sys
-import shutil
-import signal
-import tempfile
-import time
-import struct
-import socket
-import operator
-import ipaddr
-import gzip
-import random
-import traceback
-import math
-import subprocess
-
-sys.path.append(os.path.abspath("../../src"))
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC, ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP
-from nepi.testbeds.planetlab import util as plutil
-from optparse import OptionParser
-
-
-class PlanetLabMulticastOverlay:
-    testbed_id = "planetlab"
-    slicename = "inria_nepi"
-    plchost = "www.planet-lab.eu"
-    plkey = os.environ.get(
-            "PL_SSH_KEY",
-            "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-    pluser = os.environ.get("PL_USER")
-    plpass = os.environ.get("PL_PASS")
-    vnet = "192.168.3.0"
-    user = os.getlogin()
-    
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        self.__class__.port_base = self.__class__.port_base + 100
-        
-        print "Using:"
-        print "\tDISPLAY:", os.environ['DISPLAY']
-        print "\tPLC:", self.plchost
-        print "\tUsername:", self.pluser
-        print "\tslice:", self.slicename
-
-        api = plutil.getAPI(self.pluser, self.plpass, hostname=self.plchost)
-        self.vnet = plutil.getVnet(api, self.slicename).split('/')[0].strip()
-
-        print "\tvnet:", self.vnet
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_experiment_desc(self):
-        testbed_id = self.testbed_id
-        slicename = self.slicename
-        plchost = self.plchost
-        pl_ssh_key = self.plkey
-        pl_user = self.pluser
-        pl_pwd = self.plpass
-        
-        plroot_dir = os.path.join(self.root_dir,"pl")
-        if not os.path.exists(plroot_dir):
-            os.makedirs(plroot_dir)
-
-        exp_desc = ExperimentDescription()
-        pl_provider = FactoriesProvider(testbed_id)
-        pl_desc = exp_desc.add_testbed_description(pl_provider)
-        pl_desc.set_attribute_value(DC.ROOT_DIRECTORY, plroot_dir )
-        pl_desc.set_attribute_value("homeDirectory", self.root_dir)
-        pl_desc.set_attribute_value("slice", slicename)
-        pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc.set_attribute_value("authUser", pl_user)
-        pl_desc.set_attribute_value("authPass", pl_pwd)
-        pl_desc.set_attribute_value("plcHost", plchost)
-        pl_desc.set_attribute_value("tapPortBase", self.port_base)
-        pl_desc.set_attribute_value("p2pDeployment", not self.no_p2p_deploy)
-        pl_desc.set_attribute_value("cleanProc", True)
-        pl_desc.set_attribute_value("plLogLevel", "INFO")
-        
-        return pl_desc, exp_desc
-    
-    def make_pl_tapnode(self, pl, ip, inet = None, label = None, hostname = None, routes = None, mcast = False, mcastrouter = False, types = None):
-        if not isinstance(ip, list):
-            ips = [ip]
-        else:
-            ips = ip
-        node1 = pl.create("Node")
-        if label: 
-            node1.set_attribute_value("label", label)
-        if hostname: 
-            node1.set_attribute_value("hostname", hostname)
-        iface1 = pl.create("NodeInterface")
-        if label:
-            iface1.set_attribute_value("label", label+"iface")
-        if types is None:
-            types = ["TapInterface"] * len(ips)
-        tap1 = []
-        tap1ip = []
-        for i,(ip,devtype) in enumerate(zip(ips,types)):
-            _tap1 = pl.create(devtype)
-            _tap1.set_attribute_value("multicast", True)
-            _tap1.enable_trace("pcap") # for error output
-            if label:
-                _tap1.set_attribute_value("label", label+"tap"+(str(i+1) if i else ""))
-        
-            _tap1ip = _tap1.add_address()
-            _tap1ip.set_attribute_value("Address", ip)
-            _tap1ip.set_attribute_value("NetPrefix", 32)
-            _tap1ip.set_attribute_value("Broadcast", False)
-        
-            node1.connector("devs").connect(_tap1.connector("node"))
-            
-            tap1.append(_tap1)
-            tap1ip.append(_tap1ip)
-            
-        inet = inet or pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        
-        for destip, destprefix, nexthop in routes:
-            r1 = node1.add_route()
-            r1.set_attribute_value("Destination", destip)
-            r1.set_attribute_value("NetPrefix", destprefix)
-            r1.set_attribute_value("NextHop", nexthop)
-        
-        if mcast:
-            if mcastrouter:
-                fwd = pl.create("MulticastForwarder")
-                fwd.enable_trace("stderr")
-                fwd.connector("node").connect(node1.connector("apps"))
-                mrt = pl.create("MulticastRouter")
-                mrt.connector("fwd").connect(fwd.connector("router"))
-                mrt.enable_trace("stderr")
-            else:
-                ann = pl.create("MulticastAnnouncer")
-                ann.enable_trace("stderr")
-                ann.connector("node").connect(node1.connector("apps"))
-                
-        return node1, iface1, tap1, tap1ip, inet
-    
-    def add_vlc_base(self, pl, node):
-        app = pl.create("Application")
-        app.set_attribute_value("rpmFusion", True)
-        app.set_attribute_value("depends", "vlc")
-        app.set_attribute_value("command", "sudo -S dbus-uuidgen --ensure ; vlc --version")
-        app.enable_trace("stdout")
-        app.enable_trace("stderr")
-        node.connector("apps").connect(app.connector("node"))
-        return app
-    
-    def add_vlc_restreamer(self, pl, node):
-        hostname = node.get_attribute_value("hostname")
-        app = self.add_vlc_base(pl, node)
-        app.set_attribute_value("label","vlc_restreamer_%d" % (node.guid,))
-        app.set_attribute_value("command",
-            "sudo -S dbus-uuidgen --ensure ; "
-            "while true ; do "
-            "vlc -vvv -I dummy"
-            " udp/ts://@239.255.12.42"
-            " --sout '#std{access=http,mux=ts,dst=:8080}'"
-            " ; sleep 5 ; done ")
-        return app
-    
-    def add_vlc_dumper(self, pl, node, hostname=None, labelprefix = "vlc_dumper", precmd = "sleep 5 ; "):
-        app = self.add_vlc_base(pl, node)
-        mylabel = "%s_%d" % (labelprefix, node.guid,)
-        if hostname is None:
-            hostname = node.get_attribute_value("hostname")
-        app.set_attribute_value("label",mylabel)
-        app.set_attribute_value("command",
-            precmd+
-            "sudo -S dbus-uuidgen --ensure ; "
-            "cat /dev/null > {#["+mylabel+"].trace[output].[name]#} ; "
-            "while [[ $(stat -c '%s' {#["+mylabel+"].trace[output].[name]#}) == '0' ]] ; do "
-            "vlc -vvv -I dummy"
-            " http://"+hostname+":8080 vlc://quit"
-            " --sout '#std{access=file,mux=ts,dst={#["+mylabel+"].trace[output].[name]#}}'"
-            " ; sleep 5 ; done ")
-        app.enable_trace("output")
-        return app
-    
-    def add_vlc_source(self, pl, node, iflabels):
-        app = self.add_vlc_base(pl, node)
-        app.set_attribute_value("label","vlc_source_%d" % (node.guid,))
-        app.set_attribute_value("sources", self.movie_source)
-        app.set_attribute_value("command",
-            "sudo -S dbus-uuidgen --ensure ; "
-            "vlc -vvv -I dummy "
-            +os.path.basename(self.movie_source)
-            +" --sout '#duplicate{"
-            +','.join([
-                "dst=std{access=udp,dst=239.255.12.42,mux=ts,ttl=64,miface-addr={#[%s].addr[0].[Address]#}}" % (iflabel,)
-                for iflabel in iflabels
-            ])
-            +"}'")
-        return app
-    
-    def add_net_monitor(self, pl, node):
-        app = pl.create("Application")
-        app.set_attribute_value("label","network_monitor_%d" % (node.guid,))
-        app.set_attribute_value("command", 
-            r"""head -n 2 /proc/net/dev ; while true ; do cat /proc/net/dev | sed -r 's/.*/'"$(date -R)"': \0/' | grep eth0 ; sleep 1 ; done""")
-        app.enable_trace("stdout")
-        node.connector("apps").connect(app.connector("node"))
-        return app
-    
-    def add_ip_address(self, iface, address, netprefix):
-        ip = iface.add_address()
-        ip.set_attribute_value("Address", address)
-        ip.set_attribute_value("NetPrefix", netprefix)
-
-    def add_route(self, node, destination, netprefix, nexthop):
-        route = node.add_route()
-        route.set_attribute_value("Destination", destination)
-        route.set_attribute_value("NetPrefix", netprefix)
-        route.set_attribute_value("NextHop", nexthop)
-
-    def make_ns_in_pl(self, pl, exp, node1, iface1, root):
-        ns3_testbed_id = "ns3"
-        
-        # Add NS3 support in node1
-        plnepi = pl.create("NepiDependency")
-        plns3 = pl.create("NS3Dependency")
-        plnepi.connector("node").connect(node1.connector("deps"))
-        plns3.connector("node").connect(node1.connector("deps"))
-
-        # Create NS3 testbed running in node1
-        ns3_provider = FactoriesProvider(ns3_testbed_id)
-        ns_desc = exp.add_testbed_description(ns3_provider)
-        ns_desc.set_attribute_value("rootDirectory", root)
-        ns_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-        ns_desc.set_attribute_value("ChecksumEnabled", True)
-        ns_desc.set_attribute_value(DC.DEPLOYMENT_HOST, "{#[%s].addr[0].[Address]#}" % (
-            iface1.get_attribute_value("label"),))
-        ns_desc.set_attribute_value(DC.DEPLOYMENT_USER, 
-            pl.get_attribute_value("slice"))
-        ns_desc.set_attribute_value(DC.DEPLOYMENT_KEY, 
-            pl.get_attribute_value("sliceSSHKey"))
-        ns_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        ns_desc.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        ns_desc.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP,
-            "{#[%s].[%s]#}" % (
-                node1.get_attribute_value("label"),
-                ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,))
-        ns_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        
-        return ns_desc
-   
-    def add_ip_address(self, iface, address, netprefix, broadcast = False):
-        ip = iface.add_address()
-        ip.set_attribute_value("Address", address)
-        ip.set_attribute_value("NetPrefix", netprefix)
-        ip.set_attribute_value("Broadcast", broadcast)
-        return ip
-
-    def add_route(self, node, destination, netprefix, nexthop):
-        route = node.add_route()
-        route.set_attribute_value("Destination", destination)
-        route.set_attribute_value("NetPrefix", netprefix)
-        route.set_attribute_value("NextHop", nexthop)
-        return route
-
-    def add_ns_fdnd(self, ns_desc, node):
-        fdnd = ns_desc.create("ns3::FdNetDevice")
-        node.connector("devs").connect(fdnd.connector("node"))
-        #fdnd.enable_trace("FdPcapTrace")
-        return fdnd
-
-    def add_ns_node(self, ns_desc):
-        node = ns_desc.create("ns3::Node")
-        ipv4 = ns_desc.create("ns3::Ipv4L3Protocol")
-        arp  = ns_desc.create("ns3::ArpL3Protocol")
-        icmp = ns_desc.create("ns3::Icmpv4L4Protocol")
-        udp = ns_desc.create("ns3::UdpL4Protocol")
-        node.connector("protos").connect(ipv4.connector("node"))
-        node.connector("protos").connect(arp.connector("node"))
-        node.connector("protos").connect(icmp.connector("node"))
-        node.connector("protos").connect(udp.connector("node"))
-        return node
-
-    def add_ns_wifi_dev(self, ns_desc, node, access_point = False):
-        wifi = ns_desc.create("ns3::WifiNetDevice")
-        node.connector("devs").connect(wifi.connector("node"))
-
-        phy = ns_desc.create("ns3::YansWifiPhy")
-        error = ns_desc.create("ns3::NistErrorRateModel")
-        manager = ns_desc.create("ns3::ArfWifiManager")
-        if access_point:
-            mac = ns_desc.create("ns3::ApWifiMac")
-        else:
-            mac = ns_desc.create("ns3::StaWifiMac")
-
-        phy.set_attribute_value("Standard", "WIFI_PHY_STANDARD_80211a")
-        mac.set_attribute_value("Standard", "WIFI_PHY_STANDARD_80211a")
-        phy.connector("err").connect(error.connector("phy"))
-        wifi.connector("phy").connect(phy.connector("dev"))
-        wifi.connector("mac").connect(mac.connector("dev"))
-        wifi.connector("manager").connect(manager.connector("dev"))
-
-        #phy.enable_trace("YansWifiPhyPcapTrace")
-        return wifi, phy
-
-    def add_ns_constant_mobility(self, ns_desc, node, x, y, z):
-        mobility = ns_desc.create("ns3::ConstantPositionMobilityModel") 
-        position = "%d:%d:%d" % (x, y, z)
-        mobility.set_attribute_value("Position", position)
-        node.connector("mobility").connect(mobility.connector("node"))
-        return mobility
-
-    def add_ns_wifi_channel(self, ns_desc):
-        channel = ns_desc.create("ns3::YansWifiChannel")
-        delay = ns_desc.create("ns3::ConstantSpeedPropagationDelayModel")
-        loss  = ns_desc.create("ns3::LogDistancePropagationLossModel")
-        channel.connector("delay").connect(delay.connector("chan"))
-        channel.connector("loss").connect(loss.connector("prev"))
-        return channel
-
-    def make_netns_testbed(self, exp_desc):
-        netns_provider = FactoriesProvider("netns")
-        netns_desc = exp_desc.add_testbed_description(netns_provider)
-        netns_desc.set_attribute_value("homeDirectory", self.root_dir)
-        netns_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        netns_root_dir = os.path.join(self.root_dir, "netns")
-        os.mkdir(netns_root_dir)
-        netns_desc.set_attribute_value(DC.ROOT_DIRECTORY, netns_root_dir)
-        netns_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        netns_desc.set_attribute_value(DC.USE_SUDO, True)
-        return netns_desc
-
-    def add_netns_node(self, netns_desc, forwardX = True, label = None):
-        node = netns_desc.create("Node")
-        node.set_attribute_value("forward_X11", forwardX)
-        if label:
-            node.set_attribute_value("label", label)
-        return node
-    
-    def add_netns_app(self, netns_desc, command, node):
-        app = netns_desc.create("Application")
-        app.set_attribute_value("command", command)
-        app.set_attribute_value("user", self.user)
-        app.connector("node").connect(node.connector("apps"))
-        return app
-
-    def add_pl_netns_connection(self, 
-            pl_tap, 
-            netns_desc, netns_node, netns_addr, netns_prefix = 30,
-            taplabel = None):
-        pl_tap.set_attribute_value("tun_cipher", "PLAIN") 
-        pl_tap.set_attribute_value("multicast", True) 
-        #pl_tap.enable_trace("pcap")
-        #pl_tap.enable_trace("packets")
-        pl_tapip = pl_tap.addresses[0].get_attribute_value("Address")
-        netns_tap = netns_desc.create("TunNodeInterface")
-        netns_tap.set_attribute_value("up", True)
-        netns_tap.set_attribute_value("mtu", 1448)
-        self.add_ip_address(netns_tap, netns_addr, netns_prefix)
-        if taplabel:
-            netns_tap.set_attribute_value("label", taplabel)
-        netns_node.connector("devs").connect(netns_tap.connector("node"))
-        netns_tunchannel = netns_desc.create("TunChannel")
-        netns_tunchannel.set_attribute_value("tun_cipher", "PLAIN") 
-        netns_tunchannel.connector("->fd").connect(netns_tap.connector("fd->"))
-        pl_tap.connector("tcp").connect(netns_tunchannel.connector("tcp"))
-        
-        pl_tap.set_attribute_value("tun_cipher", "PLAIN") 
-        pl_tap.set_attribute_value("pointopoint", netns_addr)
-
-    def add_pl_ns_connection(self, pl_desc, pl_node, pl_addr,
-            ns, ns_node, ns_addr, prefix = 30,
-            fd = False, ptp = False):
-        pl_tap = pl_desc.create("TapInterface")
-        if fd:
-            pl_tap.set_attribute_value("tun_cipher", "PLAIN") 
-        self.add_ip_address(pl_tap, pl_addr, prefix)
-        pl_node.connector("devs").connect(pl_tap.connector("node"))
-        ns_fdnd = ns.create("ns3::FdNetDevice")
-        ns_node.connector("devs").connect(ns_fdnd.connector("node"))
-        self.add_ip_address(ns_fdnd, ns_addr, prefix)
-        
-        if fd:
-            pl_tap.connector("fd->").connect(ns_fdnd.connector("->fd"))
-        else:
-            tunchannel = ns.create("ns3::Nepi::TunChannel")
-            tunchannel.connector("fd->").connect(ns_fdnd.connector("->fd"))
-            pl_tap.connector("udp").connect(tunchannel.connector("udp"))
-        
-        if ptp:
-            pl_tap.set_attribute_value("pointopoint", ns_addr)
-
-    def make_pl_overlay(self, numnodes):
-        ns3_testbed_id = "ns3"
-        
-        pl, exp = self.make_experiment_desc()
-        
-        # We'll make a distribution spanning tree using prefix matching as a distance
-        api = plutil.getAPI(self.pluser, self.plpass, hostname=self.plchost)
-        nodes = plutil.getNodes(api, numnodes, operatingSystem = 'f12')
-        root = min(nodes, key=operator.attrgetter('hostname'))
-        links = list(plutil.getSpanningTree(nodes, root=root))
-        
-        for node in nodes:
-            node.vif_ips = set()
-            node.children = []
-            node.childips = set()
-        
-        # Build an explicit tree
-        for slave, master in links:
-            master.children.append(slave)
-        
-        # We have to assign IPs and routes.
-        # The IP will be assigned sequentially, depth-first.
-        # This will result in rather compact routing rules
-        nextip = [128-numnodes]
-        def traverse(traverse, node, parent=None, base=struct.unpack('!L',socket.inet_aton(self.vnet))[0]):
-            if nextip[0] >= 254:
-                raise RuntimeError, "Too many IPs to assign!"
-            
-            node.vif_addr = base | (nextip[0])
-            nips = 1+len(node.children) # one vif per child, plus one for the parent
-            nextip[0] += nips
-            
-            for i in xrange(nips):
-                node.vif_ips.add(node.vif_addr+i)
-
-            if parent:
-                parent.childips.update(node.vif_ips)
-
-            for i,child in enumerate(node.children):
-                traverse(traverse, child, node, base)
-                
-            if parent:
-                parent.childips.update(node.childips)
-                
-        traverse(traverse, root)
-        
-        def printtree(printtree, node, indent=''):
-            print indent, '-', socket.inet_ntoa(struct.pack('!L',node.vif_addr)), '\t', node.country, node.city, node.site, '\t', node.hostname
-            for child in node.children:
-                childips = map(ipaddr.IPAddress, child.childips)
-                childnets = ipaddr.collapse_address_list(childips)
-                cip = ipaddr.IPAddress(child.vif_addr)
-                for cnet in childnets:
-                    print indent, '|- R', cnet, '->', cip
-                printtree(printtree, child, indent+' | ')
-        printtree(printtree, root)
-
-        inet = pl.create("Internet")
-
-        ns_chosen = []
-        leaves = []
-
-        def maketree(maketree, node, parent=None, parentIp=None):
-            routes = []
-            ctaps = []
-            for i,child in enumerate(node.children):
-                childips = map(ipaddr.IPAddress, child.childips)
-                childnets = ipaddr.collapse_address_list(childips)
-                cip = ipaddr.IPAddress(child.vif_addr)
-                pip = ipaddr.IPAddress(node.vif_addr+1+i)
-                for cnet in childnets:
-                    routes.append((cnet.ip.exploded, cnet.prefixlen, cip.exploded))
-                ctaps.append( maketree(maketree, child, node, pip) )
-            if parentIp:
-                routes.append((self.vnet,24,parentIp))
-            
-            if not parent:
-                label = "root"
-            else:
-                label = None
-                
-            # NS node, first leaf
-            if not ns_chosen and not node.children:
-                ns_chosen.append(True)
-                label = "ns_root"
-                
-            ips = [ ipaddr.IPAddress(node.vif_addr+i) for i in xrange(1+len(node.children)) ]
-            node1, iface1, tap1, tap1ip, _ = self.make_pl_tapnode(pl, ips, inet, 
-                hostname = node.hostname,
-                routes = routes,
-                mcastrouter = bool(node.children),
-                mcast = True,
-                label = label,
-                types = ( [ "TapInterface" ] * len(ips) if parent else [ "TunInterface" ] + [ "TapInterface" ] * (len(ips)-1) ) 
-                )
-            
-            for tap, ctap in zip(tap1[1:], ctaps):
-                tap.connector("udp").connect(ctap.connector("udp"))
-            
-            # Store leaves
-            if not node.children:
-                leaves.append((node, node1))
-            
-            self.add_net_monitor(pl, node1)
-            self.add_vlc_dumper(pl, node1)
-            self.add_vlc_restreamer(pl, node1)
-            #if not parent:
-            #    taplabels = [
-            #        t.get_attribute_value("label")
-            #        for t in tap1[1:]
-            #    ]
-            #    self.add_vlc_source(pl, node1, taplabels)
-            
-            return tap1[0]
-        roottap = maketree(maketree, root)
-
-        vnet_i = int(ipaddr.IPAddress(self.vnet))
-
-        ## NS3 ##
-        pl_ns_root = exp.get_element_by_label("ns_root")
-        pl_ns_root_iface = exp.get_element_by_label("ns_rootiface")
-        ns = self.make_ns_in_pl(pl, exp, pl_ns_root, pl_ns_root_iface, "ns3")
-        wifi_chan = self.add_ns_wifi_channel(ns)
-
-        # AP node
-        ap_node = self.add_ns_node(ns)
-        self.add_ns_constant_mobility(ns, ap_node, 0, 0, 0)
-        ap_wifi, ap_phy = self.add_ns_wifi_dev(ns, ap_node, access_point = True)
-        ap_phy.connector("chan").connect(wifi_chan.connector("phys"))
-
-        # Net range free for WiFi
-        wifi_net_prefix = 32-int(math.floor(math.log(256-nextip[0]&0xff) / math.log(2)))
-        wifi_net = vnet_i | (256 - (1<<(32-wifi_net_prefix)))
-
-        # connect AP to PL
-        pl_addr = str(ipaddr.IPAddress(wifi_net | 1))
-        ns_addr = str(ipaddr.IPAddress(wifi_net | 2))
-        self.add_pl_ns_connection(
-            pl, pl_ns_root, pl_addr, 
-            ns, ap_node, ns_addr, 
-            fd = True, ptp = True, prefix=30)
-
-        
-        # AP ip
-        ap_addr = str(ipaddr.IPAddress(vnet_i | 254))
-        ap_addr_prefix = 32-int(math.ceil(math.log(self.nsta+3) / math.log(2)))
-        self.add_ip_address(ap_wifi, ap_addr, ap_addr_prefix)
-        
-        # route for PL->wifi
-        self.add_route(pl_ns_root, 
-            str(ipaddr.IPAddress(wifi_net)), wifi_net_prefix,
-            ns_addr)
-        
-        print "NS-3 AP\t%s/%s <--> PL AP %s" % (ns_addr, 30, pl_addr)
-        print " | (|) %s/%s" % (ap_addr, ap_addr_prefix)
-        print " |"
-        print " |                  R %s/%d --> %s" % (str(ipaddr.IPAddress(wifi_net)), wifi_net_prefix, ns_addr)
-       
-        nextpip = (vnet_i | 255) >> (32-ap_addr_prefix) << (32-ap_addr_prefix)
-        nextdip = vnet_i | 253
-        ap_net = nextpip - (1<<(32-ap_addr_prefix))
-        r = 50
-        # STA nodes
-        for i in xrange(self.nsta):
-            stai = self.add_ns_node(ns)
-            angi = (360/self.nsta)*i
-            xi = r*math.cos(angi)
-            yi = r*math.sin(angi)
-            self.add_ns_constant_mobility(ns, stai, xi, yi, 0)
-            wifi, phy = self.add_ns_wifi_dev(ns, stai, access_point = False)
-            phy.connector("chan").connect(wifi_chan.connector("phys"))
-            
-            wifi_addr = str(ipaddr.IPAddress(vnet_i | nextdip))
-            nextdip -= 1
-
-            nextpip -= 4
-            while nextpip & 3:
-                nextpip -= 1
-            plns_net_i = nextpip
-            plns_net = str(ipaddr.IPAddress(plns_net_i))
-            pl_addr2 = str(ipaddr.IPAddress(plns_net_i | 1))
-            ns_addr2 = str(ipaddr.IPAddress(plns_net_i | 2))
-
-            # route from AP (after others)
-            print " | R %s/%s -> %s" % ( plns_net,30,ns_addr2 )
-            self.add_route(ap_node, plns_net, 30, wifi_addr)
-
-            print " +---\t(|) %16s/%s" % (wifi_addr,ap_addr_prefix)
-            print " |         %16s (ns3) <---> (pl) %16s/30" % (ns_addr2, pl_addr2)
-            print " |\t       \t\t                 <--  R %s/24" % (self.vnet, )
-            print " |\t       \t R %s/30 -> %s" % (plns_net, pl_addr2)
-            print " |\t       \t R %s <-- %s/24" % (ap_addr, plns_net)
-
-            self.add_ip_address(wifi, wifi_addr, ap_addr_prefix)
-            self.add_route(stai, plns_net, 30, pl_addr2)
-            self.add_route(stai, self.vnet, 24, ap_addr)
-            
-            pl_nodei, _, pl_ifacei, _, _ = self.make_pl_tapnode(pl, [], inet, 
-                routes = [(self.vnet, 24, ns_addr2)],
-                mcast = False,
-                label = "ns_plnode_%d" % (i+1,)
-                )
-            self.add_pl_ns_connection(
-                pl, pl_nodei, pl_addr2,
-                ns, stai, ns_addr2,
-                prefix = 30)
-            
-            self.add_vlc_dumper(pl, pl_nodei,
-                hostname = pl_addr,
-                labelprefix = "vlc_dumper_ns",
-                precmd = "sleep 15 ; ")
-
-            # Validate (post-fact to let the user see the diagram above)
-            if nextpip < wifi_net:
-                raise RuntimeError, "Not enough IPs for wifi section"
-        
-        # route back to PL (after others)
-        print " | R %s/%s -> %s" % ( self.vnet,24,pl_addr )
-        self.add_route(ap_node, self.vnet, 24, pl_addr)
-
-
-        ## NETNS ##
-        netns_addr = str(ipaddr.IPAddress(vnet_i | 1))
-
-        root1 = exp.get_element_by_label("root")
-        netns = self.make_netns_testbed(exp)
-        netns_node = self.add_netns_node(netns)
-        netns_term = self.add_netns_app(netns, "xterm", netns_node)
-        if self.movie_source:
-            cmd = (
-                "vlc -I dummy "
-                +os.path.abspath(self.movie_source)
-                +" --sout '#std{access=udp{ttl=64,miface-addr="+netns_addr+"},dst=239.255.12.42,mux=ts}'"
-            )
-        else:
-            cmd = self.movie_command % {
-                "dst" : "std{access=udp{ttl=64,miface-addr="+netns_addr+"},dst=239.255.12.42,mux=ts}"
-            }
-        netns_vlc  = self.add_netns_app(netns, cmd, netns_node)
-        
-        # connection PL1/NETNS
-        self.add_pl_netns_connection(
-            roottap,
-            netns, netns_node, netns_addr,
-            24,
-            taplabel="netns_source")
-        self.add_route(netns_node, 
-            "0.0.0.0", 0, 
-            str(ipaddr.IPAddress(root.vif_addr)) )
-        
-        # pick random hostname to stream from
-        interactive_source_host = random.sample(leaves,1)[0][0].hostname
-
-        xml = exp.to_xml()
-        test_dir = "./results"
-        #sys.exit(1)
-
-        try:
-            controller = ExperimentController(xml, self.root_dir)
-            controller.start()
-            
-            # launch vlc client to monitor activity
-            time.sleep(5)
-            proc = subprocess.Popen([
-                "vlc", "-I", "dummy", "http://%s:8080" % (interactive_source_host,)])
-            
-            print >>sys.stderr, "Close xterm to shut down or Ctrl+C"
-            try:
-                while not controller.is_finished(netns_term.guid):
-                    time.sleep(5)
-            except KeyboardInterrupt:
-                # ping netns
-                try:
-                    controller.traces_info()
-                except:
-                    pass
-                try:
-                    controller.traces_info()
-                except:
-                    pass
-            
-            # kill streamer
-            os.kill(proc.pid, signal.SIGTERM)
-            
-            # download results
-            traces_info = controller.traces_info()
-            for progress, (testbed_guid, guids) in enumerate(traces_info.iteritems()):
-                for subprogress, (guid, traces) in enumerate(guids.iteritems()):
-                    for name, data in traces.iteritems():
-                        path = data["filepath"]
-                        elem = exp.get_element(guid)
-                        if elem is not None:
-                            label = elem.get_attribute_value("label")
-                            if label is not None:
-                                path = "%s-%s" % (label,path)
-                        
-                        if not path:
-                            continue
-                        
-                        print >>sys.stderr, ("%.2f%% Downloading trace" % (progress + (subprogress * 1.0 / len(guids)) * 100.0 / len(traces_info))), path
-                        
-                        filepath = os.path.join(test_dir, path)
-                        
-                        try:
-                            trace = controller.trace(guid, name)
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-                            continue
-                        try:
-                            if not os.path.exists(os.path.dirname(filepath)):
-                                os.makedirs(os.path.dirname(filepath))
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-                        
-                        try:
-                            if len(trace) >= 2**20:
-                                # Bigger than 1M, compress
-                                tracefile = gzip.GzipFile(filepath+".gz", "wb")
-                            else:
-                                tracefile = open(filepath,"wb")
-                            try:
-                                tracefile.write(trace)
-                            finally:
-                                tracefile.close()
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-        finally:
-            try:
-                controller.stop()
-            except:
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                traceback.print_exc()
-
-
-if __name__ == '__main__':
-    usage = "usage: %prog -m movie -u user"
-    parser = OptionParser(usage=usage)
-    parser.add_option("-u", "--user", dest="user", help="Valid linux system user (not root).", type="str")
-    parser.add_option("-U", "--pluser", dest="pluser", help="PlanetLab PLC username", type="str")
-    parser.add_option("-m", "--movie", dest="movie", help="Path to movie file to play", type="str")
-    parser.add_option("-n", "--nsta", dest="nsta", default=3, help="Number of wifi stations attached to the overlay", type="int")
-    parser.add_option("-N", "--nodes", dest="nodes", default=5, help="Number of overlay nodes", type="int")
-    parser.add_option("-s", "--slicename", dest="slicename", help="PlanetLab slice", type="str")
-    parser.add_option("-H", "--plchost", dest="plchost", help="PlanetLab's PLC hostname", type="str")
-    parser.add_option("-k", "--plkey", dest="plkey", help="Slice SSH key", type="str")
-    parser.add_option("-P", "--no-p2p", dest="nop2p", help="Disable peer-to-peer deployment. Not recommended for first deployment.", 
-        action="store_true", default=False)
-    (options, args) = parser.parse_args()
-    if options.user == 'root':
-        parser.error("Missing or invalid 'user' option.")
-
-    exp = PlanetLabMulticastOverlay()
-    if not options.movie or options.movie.startswith("/dev/"):
-        # use camera
-        if not options.movie:
-            options.movie = "/dev/video0"
-        exp.movie_source = None
-        exp.movie_command = (
-            "vlc -I dummy -vvv --color "
-            "v4l:///dev/video0:size=320x240:channel=0:adev=/dev/dsp:audio=0 "
-            "--sout '#transcode{vcodec=mpeg4,acodec=aac,vb=100,ab=16,venc=ffmpeg{keyint=80,hq=rd},deinterlace}:"
-            "%(dst)s'"
-        )
-    else:
-        exp.movie_source = options.movie
-    exp.no_p2p_deploy = options.nop2p
-    exp.nsta = options.nsta
-    if options.user:
-        exp.user = options.user
-    if options.plchost:
-        exp.plchost = options.plchost
-    if options.slicename:
-        exp.slicename = options.slicename
-    if options.plkey:
-        exp.plkey = options.plkey
-    if options.pluser:
-        exp.pluser = options.pluser
-    if not exp.plpass:
-        exp.plpass = getpass.getpass("Password for %s: " % (exp.pluser,))
-    
-    # Fix some distro's environment to work well with netns
-    if re.match(r"[^:]*:\d+$", os.environ['DISPLAY']):
-        os.environ['DISPLAY'] += '.0'
-    if not os.environ.get('XAUTHORITY'):
-        os.environ['XAUTHORITY'] = os.path.join(os.environ['HOME'], '.Xauthority')
-    
-    try:
-        exp.setUp()
-        exp.make_pl_overlay(options.nodes)
-    finally:
-        exp.tearDown()
-
diff --git a/examples/POPI/popi-0.9-rpy2.patch b/examples/POPI/popi-0.9-rpy2.patch
deleted file mode 100644 (file)
index 269e1d2..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-diff -ur popi-0.9/rlranklib.py popi-0.9-rpy2/rlranklib.py
---- popi-0.9/rlranklib.py      2011-08-17 15:30:31.000000000 +0200
-+++ popi-0.9-rpy2/rlranklib.py 2011-09-16 05:28:16.547000068 +0200
-@@ -5,10 +5,13 @@
- # $Header: /data/cvsroot/plportest/anadata/rlranklib.py,v 1.11.2.1 2007/11/26 08:46:29 lgh Exp $
- import random
--from rpy import *
-+import rpy2.rpy_classic
-+from rpy2.rpy_classic import *
- from math import sqrt, log
- from sets import Set
-+rpy2.rpy_classic.set_default_mode(rpy2.rpy_classic.BASIC_CONVERSION)
-+
- safe_eff = [None,1.365,1.208,1.116,1.110,1.104,1.064,1.064]
- def calc_rlrank(lrs, opt_ranktype):
-@@ -66,7 +69,7 @@
-         riag = [ rlranks[i] for i in range(0, len(rlranks)) if partition[i] == igrp ]    # rlranks in a group
-         if len(riag) == 1:
-             continue
--        threshold = r.qtukey(alpha, len(riag), 1000)*sqrt(1.0/12/nb)*len(riag)/len(rlranks)
-+        threshold = r.qtukey(alpha, len(riag), 1000)[0]*sqrt(1.0/12/nb)*len(riag)/len(rlranks)
-         range_    = max(riag) - min(riag)
-         if verbose:
-             print "   ", igrp, len(riag), min(riag), max(riag), range_, "%.3f" % threshold,
-@@ -99,7 +102,7 @@
-         return True
-     
-     # t = r.qtukey(alpha, len(arrs), 1000)*sqrt(1.0/12/nb)*(len(arrs)+1)/ksum
--    t = r.qtukey(alpha, km, 1000) * sqrt((km*km-1)/12.0/nb) / ksum
-+    t = r.qtukey(alpha, km, 1000)[0] * sqrt((km*km-1)/12.0/nb) / ksum
-     if km <= len(safe_eff):
-         t = t * safe_eff[km-1]
-     if max(arrs) - min(arrs) > t:
-Binary files popi-0.9/rlranklib.pyc and popi-0.9-rpy2/rlranklib.pyc differ
diff --git a/examples/POPI/popi-0.9.tar.gz b/examples/POPI/popi-0.9.tar.gz
deleted file mode 100644 (file)
index 9f11707..0000000
Binary files a/examples/POPI/popi-0.9.tar.gz and /dev/null differ
diff --git a/examples/POPI/popi-tun-classfilter-2MB-q500-pl-hibw.xml b/examples/POPI/popi-tun-classfilter-2MB-q500-pl-hibw.xml
deleted file mode 100644 (file)
index 6ee1bc7..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<experiment>
-    <testbeds>
-        <testbed guid="1" testbed_id="planetlab" testbed_version="0.1">
-            <graphical_info height="580.0" width="872.882843778" x="-317.0" y="-124.0"/>
-            <attributes>
-                <attribute name="slice" type="STRING" value="##plslice##"/>
-                <attribute name="plLogLevel" type="STRING" value="INFO"/>
-                <attribute name="logLevel" type="STRING" value="Debug"/>
-                <attribute name="cleanProc" type="BOOL" value="True"/>
-                <attribute name="plcUrl" type="STRING" value="https://%(hostname)s:443/PLCAPI/"/>
-                <attribute name="recoveryPolicy" type="STRING" value="Recover"/>
-                <attribute name="p2pDeployment" type="BOOL" value="False"/>
-                <attribute name="label" type="STRING" value=""/>
-                <attribute name="tapPortBase" type="INTEGER" value="16000"/>
-                <attribute name="authUser" type="STRING" value="##pluser##"/>
-                <attribute name="sliceSSHKey" type="STRING" value="##plkey##"/>
-                <attribute name="authPass" type="STRING" value="##plpass##"/>
-                <attribute name="plcHost" type="STRING" value="www.planet-lab.eu"/>
-            </attributes>
-            <elements>
-                <element factory_id="Node" guid="2">
-                    <graphical_info height="100.0" width="100.0" x="-256.0" y="-223.0"/>
-                    <attributes>
-                        <attribute name="maxBandwidth" type="DOUBLE" value="1024.0"/>
-                        <attribute name="minCpu" type="DOUBLE" value="30.0"/>
-                        <attribute name="operatingSystem" type="STRING" value="f12"/>
-                    </attributes>
-                    <routes>
-                        <route Destination="192.168.2.32" Metric="0" NetPrefix="27" NextHop="192.168.2.3"/>
-                    </routes>
-                    <connections>
-                        <connection connector="devs" other_connector="node" other_guid="11"/>
-                        <connection connector="devs" other_connector="node" other_guid="21"/>
-                        <connection connector="apps" other_connector="node" other_guid="8"/>
-                    </connections>
-                </element>
-                <element factory_id="Node" guid="3">
-                    <graphical_info height="100.0" width="100.0" x="263.0" y="-222.0"/>
-                    <attributes>
-                        <attribute name="maxBandwidth" type="DOUBLE" value="1024.0"/>
-                        <attribute name="forward_X11" type="BOOL" value="False"/>
-                        <attribute name="minCpu" type="DOUBLE" value="30.0"/>
-                        <attribute name="operatingSystem" type="STRING" value="f12"/>
-                    </attributes>
-                    <routes>
-                        <route Destination="192.168.2.0" Metric="0" NetPrefix="27" NextHop="192.168.2.34"/>
-                    </routes>
-                    <connections>
-                        <connection connector="devs" other_connector="node" other_guid="24"/>
-                        <connection connector="devs" other_connector="node" other_guid="12"/>
-                        <connection connector="apps" other_connector="node" other_guid="10"/>
-                    </connections>
-                </element>
-                <element factory_id="Application" guid="8">
-                    <graphical_info height="100.0" width="100.0" x="-365.684303842" y="-223.842151921"/>
-                    <attributes>
-                        <attribute name="buildDepends" type="STRING" value="libpcap-devel make gcc "/>
-                        <attribute name="sudo" type="BOOL" value="True"/>
-                        <attribute name="label" type="STRING" value="popi-cli"/>
-                        <attribute name="sources" type="STRING" value="popi-0.9.tar.gz"/>
-                        <attribute name="depends" type="STRING" value="libpcap"/>
-                        <attribute name="command" type="STRING" value="sleep 2; ./popi -et 136/2004/30002,6/2000/30002,17/2002/30002,1/30002/8 -b 32 -r 2000  -o {#[popi-cli].trace[output].[name]#} -i {#[tap-cli].[if_name]#} {#[tap-srv].addr[0].[Address]#}"/>
-                        <attribute name="build" type="STRING" value="tar xzf ${SOURCES}/popi-0.9.tar.gz &amp;&amp; cd popi-0.9 &amp;&amp; ./configure --enable-debug --with-host=planetlab &amp;&amp; make"/>
-                        <attribute name="install" type="STRING" value="cp ./popi-0.9/popi ${SOURCES}"/>
-                    </attributes>
-                    <traces>
-                        <trace name="output"/>
-                        <trace name="stderr"/>
-                        <trace name="stdout"/>
-                    </traces>
-                    <connections>
-                        <connection connector="node" other_connector="apps" other_guid="2"/>
-                    </connections>
-                </element>
-                <element factory_id="Internet" guid="9">
-                    <graphical_info height="100.0" width="100.0" x="8.0" y="211.0"/>
-                    <connections>
-                        <connection connector="devs" other_connector="inet" other_guid="16"/>
-                        <connection connector="devs" other_connector="inet" other_guid="11"/>
-                        <connection connector="devs" other_connector="inet" other_guid="12"/>
-                    </connections>
-                </element>
-                <element factory_id="Application" guid="10">
-                    <graphical_info height="100.0" width="100.0" x="374.0" y="-222.0"/>
-                    <attributes>
-                        <attribute name="buildDepends" type="STRING" value="libpcap-devel make gcc "/>
-                        <attribute name="sudo" type="BOOL" value="True"/>
-                        <attribute name="label" type="STRING" value="popi-srv"/>
-                        <attribute name="sources" type="STRING" value="popi-0.9.tar.gz"/>
-                        <attribute name="depends" type="STRING" value="libpcap"/>
-                       <attribute name="command" type="STRING" value="./popid -v -i {#[tap-srv].[if_name]#} -b 136/30002,6/30002,17/30002,1/30002"/>
-                        <attribute name="build" type="STRING" value="tar xzf ${SOURCES}/popi-0.9.tar.gz &amp;&amp; cd popi-0.9 &amp;&amp; ./configure --enable-debug --with-host=planetlab &amp;&amp; make"/>
-                        <attribute name="install" type="STRING" value="cp ./popi-0.9/popid ${SOURCES}"/>
-                    </attributes>
-                    <traces>
-                        <trace name="stderr"/>
-                        <trace name="stdout"/>
-                    </traces>
-                    <connections>
-                        <connection connector="node" other_connector="apps" other_guid="3"/>
-                    </connections>
-                </element>
-                <element factory_id="NodeInterface" guid="11">
-                    <graphical_info height="100.0" width="100.0" x="-355.87051993" y="67.7927863957"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="2"/>
-                        <connection connector="inet" other_connector="devs" other_guid="9"/>
-                    </connections>
-                </element>
-                <element factory_id="NodeInterface" guid="12">
-                    <graphical_info height="100.0" width="100.0" x="356.0" y="76.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="3"/>
-                        <connection connector="inet" other_connector="devs" other_guid="9"/>
-                    </connections>
-                </element>
-                <element factory_id="Node" guid="15">
-                    <graphical_info height="100.0" width="100.0" x="1.0" y="-216.0"/>
-                    <attributes>
-                        <attribute name="maxBandwidth" type="DOUBLE" value="512.0"/>
-                        <attribute name="forward_X11" type="BOOL" value="False"/>
-                        <attribute name="minCpu" type="DOUBLE" value="50.0"/>
-                        <attribute name="operatingSystem" type="STRING" value="f12"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="devs" other_connector="node" other_guid="16"/>
-                        <connection connector="devs" other_connector="node" other_guid="22"/>
-                        <connection connector="devs" other_connector="node" other_guid="23"/>
-                    </connections>
-                </element>
-                <element factory_id="NodeInterface" guid="16">
-                    <graphical_info height="100.0" width="100.0" x="6.0" y="72.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="15"/>
-                        <connection connector="inet" other_connector="devs" other_guid="9"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="21">
-                    <graphical_info height="100.0" width="100.0" x="-255.0" y="-110.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="label" type="STRING" value="tap-cli"/>
-                        <attribute name="bwlimit" type="INTEGER" value="256"/>
-                    </attributes>
-                    <traces>
-                        <trace name="pcap"/>
-                    </traces>
-                    <addresses>
-                        <address Address="192.168.2.2" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="2"/>
-                        <connection connector="udp" other_connector="udp" other_guid="22"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="22">
-                    <graphical_info height="100.0" width="100.0" x="-56.0" y="-105.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="bwlimit" type="INTEGER" value="256"/>
-                    </attributes>
-                    <traces>
-                        <trace name="pcap"/>
-                    </traces>
-                    <addresses>
-                        <address Address="192.168.2.3" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="15"/>
-                        <connection connector="udp" other_connector="udp" other_guid="21"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="23">
-                    <graphical_info height="100.0" width="100.0" x="77.0" y="-104.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="bwlimit" type="INTEGER" value="##bwlimit##"/>
-                    </attributes>
-                    <traces>
-                        <trace name="pcap"/>
-                    </traces>
-                    <addresses>
-                        <address Address="192.168.2.34" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="15"/>
-                        <connection connector="fd-&gt;" other_connector="-&gt;fd" other_guid="26"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="24">
-                    <graphical_info height="100.0" width="100.0" x="264.0" y="-107.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="label" type="STRING" value="tap-srv"/>
-                        <attribute name="bwlimit" type="INTEGER" value="256"/>
-                    </attributes>
-                    <traces>
-                        <trace name="pcap"/>
-                    </traces>
-                    <addresses>
-                        <address Address="192.168.2.35" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="3"/>
-                        <connection connector="udp" other_connector="udp" other_guid="26"/>
-                    </connections>
-                </element>
-                <element factory_id="ClassQueueFilter" guid="26">
-                    <graphical_info height="100.0" width="100.0" x="174.0" y="19.0"/>
-                    <attributes>
-                        <attribute name="args" type="STRING" value="size=500,classes=##classes##"/>
-                    </attributes>
-                    <traces>
-                        <trace name="dropped_stats"/>
-                    </traces>
-                    <connections>
-                        <connection connector="udp" other_connector="udp" other_guid="24"/>
-                        <connection connector="-&gt;fd" other_connector="fd-&gt;" other_guid="23"/>
-                    </connections>
-                </element>
-            </elements>
-        </testbed>
-    </testbeds>
-</experiment>
diff --git a/examples/POPI/popi-tun-classfilter-2MB-q500-pl.xml b/examples/POPI/popi-tun-classfilter-2MB-q500-pl.xml
deleted file mode 100644 (file)
index ddf70bc..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<experiment>
-    <testbeds>
-        <testbed guid="1" testbed_id="planetlab" testbed_version="0.1">
-            <graphical_info height="580.0" width="872.882843778" x="-317.0" y="-124.0"/>
-            <attributes>
-                <attribute name="slice" type="STRING" value="##plslice##"/>
-                <attribute name="plLogLevel" type="STRING" value="INFO"/>
-                <attribute name="logLevel" type="STRING" value="Debug"/>
-                <attribute name="cleanProc" type="BOOL" value="True"/>
-                <attribute name="plcUrl" type="STRING" value="https://%(hostname)s:443/PLCAPI/"/>
-                <attribute name="recoveryPolicy" type="STRING" value="Recover"/>
-                <attribute name="p2pDeployment" type="BOOL" value="False"/>
-                <attribute name="label" type="STRING" value=""/>
-                <attribute name="tapPortBase" type="INTEGER" value="16000"/>
-                <attribute name="authUser" type="STRING" value="##pluser##"/>
-                <attribute name="sliceSSHKey" type="STRING" value="##plkey##"/>
-                <attribute name="authPass" type="STRING" value="##plpass##"/>
-                <attribute name="plcHost" type="STRING" value="www.planet-lab.eu"/>
-            </attributes>
-            <elements>
-                <element factory_id="Node" guid="2">
-                    <graphical_info height="100.0" width="100.0" x="-256.0" y="-223.0"/>
-                    <attributes>
-                        <attribute name="operatingSystem" type="STRING" value="f12"/>
-                    </attributes>
-                    <routes>
-                        <route Destination="192.168.2.32" Metric="0" NetPrefix="27" NextHop="192.168.2.3"/>
-                    </routes>
-                    <connections>
-                        <connection connector="devs" other_connector="node" other_guid="11"/>
-                        <connection connector="devs" other_connector="node" other_guid="21"/>
-                        <connection connector="apps" other_connector="node" other_guid="8"/>
-                    </connections>
-                </element>
-                <element factory_id="Node" guid="3">
-                    <graphical_info height="100.0" width="100.0" x="263.0" y="-222.0"/>
-                    <attributes>
-                        <attribute name="forward_X11" type="BOOL" value="False"/>
-                        <attribute name="operatingSystem" type="STRING" value="f12"/>
-                    </attributes>
-                    <routes>
-                        <route Destination="192.168.2.0" Metric="0" NetPrefix="27" NextHop="192.168.2.34"/>
-                    </routes>
-                    <connections>
-                        <connection connector="devs" other_connector="node" other_guid="24"/>
-                        <connection connector="devs" other_connector="node" other_guid="12"/>
-                        <connection connector="apps" other_connector="node" other_guid="10"/>
-                    </connections>
-                </element>
-                <element factory_id="Application" guid="8">
-                    <graphical_info height="100.0" width="100.0" x="-365.684303842" y="-223.842151921"/>
-                    <attributes>
-                        <attribute name="buildDepends" type="STRING" value="libpcap-devel make gcc "/>
-                        <attribute name="sudo" type="BOOL" value="True"/>
-                        <attribute name="label" type="STRING" value="popi-cli"/>
-                       <attribute name="sources" type="STRING" value="popi-0.9.tar.gz"/>
-                        <attribute name="depends" type="STRING" value="libpcap"/>
-                        <attribute name="command" type="STRING" value="sleep 2; ./popi -et 136/2004/30002,6/2000/30002,17/2002/30002,1/30002/8 -b 32 -r 2000  -o {#[popi-cli].trace[output].[name]#} -i {#[tap-cli].[if_name]#} {#[tap-srv].addr[0].[Address]#}"/>
-                        <attribute name="build" type="STRING" value="tar xzf ${SOURCES}/popi-0.9.tar.gz &amp;&amp; cd popi-0.9 &amp;&amp; ./configure --enable-debug --with-host=planetlab &amp;&amp; make"/>
-                        <attribute name="install" type="STRING" value="cp ./popi-0.9/popi ${SOURCES}"/>
-                    </attributes>
-                    <traces>
-                        <trace name="output"/>
-                        <trace name="stderr"/>
-                        <trace name="stdout"/>
-                    </traces>
-                    <connections>
-                        <connection connector="node" other_connector="apps" other_guid="2"/>
-                    </connections>
-                </element>
-                <element factory_id="Internet" guid="9">
-                    <graphical_info height="100.0" width="100.0" x="8.0" y="211.0"/>
-                    <connections>
-                        <connection connector="devs" other_connector="inet" other_guid="16"/>
-                        <connection connector="devs" other_connector="inet" other_guid="11"/>
-                        <connection connector="devs" other_connector="inet" other_guid="12"/>
-                    </connections>
-                </element>
-                <element factory_id="Application" guid="10">
-                    <graphical_info height="100.0" width="100.0" x="374.0" y="-222.0"/>
-                    <attributes>
-                        <attribute name="buildDepends" type="STRING" value="libpcap-devel make gcc "/>
-                        <attribute name="sudo" type="BOOL" value="True"/>
-                        <attribute name="label" type="STRING" value="popi-srv"/>
-                        <attribute name="sources" type="STRING" value="popi-0.9.tar.gz"/>
-                        <attribute name="depends" type="STRING" value="libpcap"/>
-                       <attribute name="command" type="STRING" value="./popid -v -i {#[tap-srv].[if_name]#} -b 136/30002,6/30002,17/30002,1/30002"/>
-                        <attribute name="build" type="STRING" value="tar xzf ${SOURCES}/popi-0.9.tar.gz &amp;&amp; cd popi-0.9 &amp;&amp; ./configure --enable-debug --with-host=planetlab &amp;&amp; make"/>
-                        <attribute name="install" type="STRING" value="cp ./popi-0.9/popid ${SOURCES}"/>
-                    </attributes>
-                    <traces>
-                        <trace name="stderr"/>
-                        <trace name="stdout"/>
-                    </traces>
-                    <connections>
-                        <connection connector="node" other_connector="apps" other_guid="3"/>
-                    </connections>
-                </element>
-                <element factory_id="NodeInterface" guid="11">
-                    <graphical_info height="100.0" width="100.0" x="-355.87051993" y="67.7927863957"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="2"/>
-                        <connection connector="inet" other_connector="devs" other_guid="9"/>
-                    </connections>
-                </element>
-                <element factory_id="NodeInterface" guid="12">
-                    <graphical_info height="100.0" width="100.0" x="356.0" y="76.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="3"/>
-                        <connection connector="inet" other_connector="devs" other_guid="9"/>
-                    </connections>
-                </element>
-                <element factory_id="Node" guid="15">
-                    <graphical_info height="100.0" width="100.0" x="1.0" y="-216.0"/>
-                    <attributes>
-                        <attribute name="forward_X11" type="BOOL" value="False"/>
-                        <attribute name="operatingSystem" type="STRING" value="f12"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="devs" other_connector="node" other_guid="16"/>
-                        <connection connector="devs" other_connector="node" other_guid="22"/>
-                        <connection connector="devs" other_connector="node" other_guid="23"/>
-                    </connections>
-                </element>
-                <element factory_id="NodeInterface" guid="16">
-                    <graphical_info height="100.0" width="100.0" x="6.0" y="72.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </attributes>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="15"/>
-                        <connection connector="inet" other_connector="devs" other_guid="9"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="21">
-                    <graphical_info height="100.0" width="100.0" x="-255.0" y="-110.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="label" type="STRING" value="tap-cli"/>
-                        <attribute name="bwlimit" type="INTEGER" value="256"/>
-                    </attributes>
-                    <addresses>
-                        <address Address="192.168.2.2" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="2"/>
-                        <connection connector="udp" other_connector="udp" other_guid="22"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="22">
-                    <graphical_info height="100.0" width="100.0" x="-56.0" y="-105.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="bwlimit" type="INTEGER" value="256"/>
-                    </attributes>
-                    <addresses>
-                        <address Address="192.168.2.3" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="15"/>
-                        <connection connector="udp" other_connector="udp" other_guid="21"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="23">
-                    <graphical_info height="100.0" width="100.0" x="77.0" y="-104.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="bwlimit" type="INTEGER" value="##bwlimit##"/>
-                    </attributes>
-                    <addresses>
-                        <address Address="192.168.2.34" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="15"/>
-                        <connection connector="fd-&gt;" other_connector="-&gt;fd" other_guid="26"/>
-                    </connections>
-                </element>
-                <element factory_id="TunInterface" guid="24">
-                    <graphical_info height="100.0" width="100.0" x="264.0" y="-107.0"/>
-                    <factory_attributes>
-                        <factory_attribute name="maxAddresses" type="INTEGER" value="1"/>
-                    </factory_attributes>
-                    <attributes>
-                        <attribute name="up" type="BOOL" value="True"/>
-                        <attribute name="maxAddresses" type="INTEGER" value="1"/>
-                        <attribute name="label" type="STRING" value="tap-srv"/>
-                        <attribute name="bwlimit" type="INTEGER" value="256"/>
-                    </attributes>
-                    <addresses>
-                        <address Address="192.168.2.35" NetPrefix="27"/>
-                    </addresses>
-                    <connections>
-                        <connection connector="node" other_connector="devs" other_guid="3"/>
-                        <connection connector="udp" other_connector="udp" other_guid="26"/>
-                    </connections>
-                </element>
-                <element factory_id="ClassQueueFilter" guid="26">
-                    <graphical_info height="100.0" width="100.0" x="174.0" y="19.0"/>
-                    <attributes>
-                        <attribute name="args" type="STRING" value="size=500,classes=##classes##"/>
-                    </attributes>
-                    <traces>
-                        <trace name="dropped_stats"/>
-                    </traces>
-                    <connections>
-                        <connection connector="udp" other_connector="udp" other_guid="24"/>
-                        <connection connector="-&gt;fd" other_connector="fd-&gt;" other_guid="23"/>
-                    </connections>
-                </element>
-            </elements>
-        </testbed>
-    </testbeds>
-</experiment>
diff --git a/examples/POPI/run_one_experiment.py b/examples/POPI/run_one_experiment.py
deleted file mode 100755 (executable)
index 0280654..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import os
-import os.path
-import re
-import time
-import commands
-import subprocess
-import collections
-import signal
-import traceback
-import shutil
-import sys
-import cPickle
-import gzip
-
-sys.path.append(os.path.abspath("../../src"))
-
-from nepi.core.execute import ExperimentController
-
-class PopiExperiment(object):
-    def run(self, duration, xml_filepath, testset, results_dir, iteration):
-        app_guid = 8
-
-        testset_dir = os.path.join(results_dir, testset)
-        
-        # create test results file
-        test_dir = os.path.join(testset_dir, str(iteration))
-
-        # replace results values in xml
-        replacements = cPickle.loads(os.environ['POPI_REPLACEMENTS'].strip().decode("base64"))
-        file = open(xml_filepath)
-        xml2 = xml = file.read()
-        file.close()
-
-        for key,value in replacements.iteritems():
-            xml2 = xml2.replace("##%s##" % (key,), value)
-
-        # launch experiment
-        controller = ExperimentController(xml2, results_dir)
-        
-        try:
-            controller.start()
-
-            t0 = time.time()
-            t1 = t0
-            while (t1-t0) < duration and not controller.is_finished(app_guid):
-                time.sleep(10)
-            
-            # download results
-            for testbed_guid, guids in controller.traces_info().iteritems():
-                for guid, traces in guids.iteritems():
-                    for name, data in traces.iteritems():
-                        path = data["filepath"]
-                        print >>sys.stderr, "Downloading trace", path
-                        
-                        filepath = os.path.join(test_dir, path)
-                        
-                        try:
-                            trace = controller.trace(guid, name)
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-                            continue
-                        try:
-                            if not os.path.exists(os.path.dirname(filepath)):
-                                os.makedirs(os.path.dirname(filepath))
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-                        
-                        try:
-                            if len(trace) >= 2**20:
-                                # Bigger than 1M, compress
-                                tracefile = gzip.GzipFile(filepath+".gz", "wb")
-                            else:
-                                tracefile = open(filepath,"wb")
-                            try:
-                                tracefile.write(trace)
-                            finally:
-                                tracefile.close()
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-
-        finally:
-            # clean up
-            try:
-                controller.stop()
-            except:
-                pass
-            try:
-                controller.shutdown()
-            except:
-                pass
-
-    def results_append(self, file, testset, sta_pcap, ap_pcap):
-        line = "%s %s %s\n" % (testset, sta_pcap, ap_pcap)
-        file.write(line)
-
-if __name__ == '__main__':
-    experiment = PopiExperiment()
-    duration = int(sys.argv[1])
-    xml_filepath = sys.argv[2]
-    testset = sys.argv[3]
-    results_dir = sys.argv[4]
-    iteration = sys.argv[5]
-    experiment.run(duration, xml_filepath, testset, results_dir, iteration)
-
diff --git a/examples/POPI/run_popi_experiments.py b/examples/POPI/run_popi_experiments.py
deleted file mode 100755 (executable)
index dcb07e5..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.core.execute import ExperimentController
-from optparse import OptionParser, SUPPRESS_HELP
-import collections
-import commands
-import os
-import shutil
-import signal
-import subprocess
-import sys
-import time
-import traceback
-import getpass
-import cPickle
-
-class PopiExample(object):
-    _testsets = dict({
-        "popi":  "./popi-tun-classfilter-2MB-q500-pl.xml",
-        "popi_hibw":  "./popi-tun-classfilter-2MB-q500-pl-hibw.xml",
-        })
-    
-    classes = {
-        'tcpx4' : 'udp:tcp*4:icmp:',
-        'icmpx4' : 'udp:tcp:icmp*4:',
-        'udpx4' : 'udp*4:tcp:icmp:',
-        'u1t4i16' : 'udp:tcp*4:icmp*16:',
-        'u4t4i16' : 'udp*4:tcp*4:icmp*16:',
-        'u1t16i16' : 'udp*4:tcp*16:icmp*16:',
-        'u1t1t1' : 'udp:tcp:icmp:',
-    }
-    
-    bwlimits = {
-        '32K' : '32',
-        '64K' : '64',
-        '128K' : '128',
-        '256K' : '256',
-        '384K' : '384',
-    #    '512K' : '512',
-    #    '768K' : '768',
-    #    '1M' : '1024',
-    #    '2M' : '2048',
-    }
-    
-    testsets = dict([
-        ("%s-%s-%s" % (tset,clsname,bwname), (xml, {'classes':cls, 'bwlimit':bw}))
-        for tset,xml in _testsets.iteritems()
-        for clsname,cls in classes.iteritems()
-        for bwname,bw in bwlimits.iteritems()
-    ])
-
-    def __init__(self):
-        usage = "usage: %prog -u user -t times -d results_dir -f remove -e experiment -s start"
-        parser = OptionParser(usage=usage)
-        parser.add_option("-u", "--user", dest="pluser", help="PlanetLab PLC user (email)", type="str")
-        parser.add_option("-p", "--pass", dest="plpass", help="PlanetLab PLC user (password) - leave empty for interactive prompt", type="str")
-        parser.add_option("-k", "--key", dest="plkey", help="PlanetLab PLC private key to use", type="str")
-        parser.add_option("-S", "--slice", dest="plslice", help="PlanetLab slice into which to deploy experiments", type="str")
-        parser.add_option("-t", "--times", dest="times", help="Number of times to run each scenario", type="int")
-        parser.add_option("-d", "--dir", dest="results_dir", help="Results directory", type="str")
-        parser.add_option("-f", "--remove", dest="remove", help="Remove previous results directory",  action="store_true", default=False)
-        parser.add_option("-e", "--experiment", dest="experiment", help="Experiment to execute [%s]" % ('|'.join(self._testsets.keys()),),  type="str")
-        parser.add_option("-s", "--start", dest="start", help="Start experiment at specific iteration",  type="int")
-        (options, args) = parser.parse_args()
-        
-        if not options.pluser:
-            print >>sys.stderr, "Must specify --user"
-            sys.exit(1)
-        else:
-            self.pluser = options.pluser
-            
-        if not options.plslice:
-            print >>sys.stderr, "Must specify --slice"
-            sys.exit(1)
-        else:
-            self.plslice = options.plslice
-            
-        if not options.plkey:
-            print >>sys.stderr, "Must specify --key"
-            sys.exit(1)
-        else:
-            self.plkey = options.plkey
-            
-        if not options.plpass:
-            self.plpass = getpass.getpass("Password for %s: " % (self.pluser,))
-            
-        self.times = options.times if options.times else 5
-        self.results_dir = options.results_dir if options.results_dir else "results"
-        self.remove = options.remove
-        if options.experiment:
-            if ',' in options.experiment:
-                options.experiment = options.experiment.split(',')
-            else:
-                options.experiment = [ options.experiment ]
-        else:
-            options.experiment = self.testsets.keys()
-        self.experiments = [x for x in options.experiment if x in self.testsets]
-        self.start = options.start if options.start else 0
-
-    def run(self):
-        duration = 3600
-
-        if self.remove:
-            try:
-                shutil.rmtree(self.results_dir)
-            except:
-                traceback.print_exc(file=sys.stderr)
-
-        try:
-            os.mkdir(self.results_dir)
-        except:
-            traceback.print_exc(file=sys.stderr)
-
-        for j,testset in enumerate(self.experiments):
-            xml_filepath, replacements = self.testsets[testset]
-            replacements = dict(replacements)
-            replacements['pluser'] = self.pluser
-            replacements['plpass'] = self.plpass
-            replacements['plslice'] = self.plslice
-            replacements['plkey'] = self.plkey
-            
-            for i in xrange(self.start, self.times):
-                testset_dir = os.path.join(self.results_dir, testset, str(i))
-                os.makedirs(testset_dir)
-
-                print >>sys.stderr, "%3d%% - " % ((j+i*1.0/(self.times-self.start))*100/len(self.experiments),), testset, "...",
-                
-                # launch experiment
-                command = "python run_one_experiment.py %d '%s' '%s' '%s' %d" % \
-                        (duration, xml_filepath, testset, self.results_dir, i)
-                # send by environment, we don't want passwords in the commandline
-                env = dict(os.environ)
-                env['POPI_REPLACEMENTS'] = cPickle.dumps(replacements,2).encode("base64").strip()
-                
-                for trials in xrange(5):
-                    logfile = open(os.path.join(testset_dir,"log"), "w")
-                    p = subprocess.Popen(
-                        command, 
-                        shell = True, 
-                        env = env,
-                        stdout = logfile,
-                        stderr = logfile,
-                        stdin = open("/dev/null","rb") )
-                    
-                    # we wait two time the estimated dirantion of the movie (120s)
-                    for i in xrange(0, duration * 2, 10):
-                        time.sleep(10)
-                        returncode = p.poll()
-                        if returncode is not None:
-                            break
-                    time.sleep(10)
-                    try:
-                        os.kill(p.pid, signal.SIGKILL)
-                    except:
-                        pass
-                    
-                    logfile.close()
-                    retfile = open(os.path.join(testset_dir,"retcode"), "w")
-                    if returncode:
-                        rettext = "FAIL %s" % (returncode,)
-                    else:
-                        rettext = "SUCCESS"
-                    retfile.write(rettext)
-                    retfile.close()
-
-                    print >>sys.stderr, rettext,
-                    
-                    if not returncode:
-                        print >>sys.stderr
-                        break
-                    else:
-                        time.sleep(60)
-                else:
-                    print >>sys.stderr, "Giving up"
-
-if __name__ == '__main__':
-    example = PopiExample()
-    example.run()
-
diff --git a/examples/POPI/run_popi_experiments.sh b/examples/POPI/run_popi_experiments.sh
deleted file mode 100755 (executable)
index 92ec590..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-#
-# description: run nef demo script 
-#
-
-PYTHONPATH="../../../nepi/src:../../../nepi/test/lib:../../../netns/src:../../../python-passfd/src:../../../python-unshare/src" python run_popi_experiments.py $*
-
-
diff --git a/examples/automated_vlc_experiment_plexus.py b/examples/automated_vlc_experiment_plexus.py
new file mode 100644 (file)
index 0000000..b9d7e35
--- /dev/null
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceFactory, ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+from nepi.resources.omf.omf_node import OMFNode
+from nepi.resources.omf.omf_application import OMFApplication
+from nepi.resources.omf.omf_interface import OMFWifiInterface
+from nepi.resources.omf.omf_channel import OMFChannel
+
+import logging
+import time
+
+logging.basicConfig()
+
+# Create the EC
+ec = ExperimentController()
+
+# Register the different RM that will be used
+ResourceFactory.register_type(OMFNode)
+ResourceFactory.register_type(OMFWifiInterface)
+ResourceFactory.register_type(OMFChannel)
+ResourceFactory.register_type(OMFApplication)
+
+# Create and Configure the Nodes
+node1 = ec.register_resource("OMFNode")
+ec.set(node1, 'hostname', 'omf.plexus.wlab17')
+ec.set(node1, 'xmppSlice', "nepi")
+ec.set(node1, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(node1, 'xmppPort', "5222")
+ec.set(node1, 'xmppPassword', "1234")
+
+node2 = ec.register_resource("OMFNode")
+ec.set(node2, 'hostname', "omf.plexus.wlab37")
+ec.set(node2, 'xmppSlice', "nepi")
+ec.set(node2, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(node2, 'xmppPort', "5222")
+ec.set(node2, 'xmppPassword', "1234")
+
+# Create and Configure the Interfaces
+iface1 = ec.register_resource("OMFWifiInterface")
+ec.set(iface1, 'alias', "w0")
+ec.set(iface1, 'mode', "adhoc")
+ec.set(iface1, 'type', "g")
+ec.set(iface1, 'essid', "vlcexp")
+#ec.set(iface1, 'ap', "11:22:33:44:55:66")
+ec.set(iface1, 'ip', "10.0.0.17")
+ec.set(iface1, 'xmppSlice', "nepi")
+ec.set(iface1, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(iface1, 'xmppPort', "5222")
+ec.set(iface1, 'xmppPassword', "1234")
+
+iface2 = ec.register_resource("OMFWifiInterface")
+ec.set(iface2, 'alias', "w0")
+ec.set(iface2, 'mode', "adhoc")
+ec.set(iface2, 'type', 'g')
+ec.set(iface2, 'essid', "vlcexp")
+#ec.set(iface2, 'ap', "11:22:33:44:55:66")
+ec.set(iface2, 'ip', "10.0.0.37")
+ec.set(iface2, 'xmppSlice', "nepi")
+ec.set(iface2, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(iface2, 'xmppPort', "5222")
+ec.set(iface2, 'xmppPassword', "1234")
+
+# Create and Configure the Channel
+channel = ec.register_resource("OMFChannel")
+ec.set(channel, 'channel', "6")
+ec.set(channel, 'xmppSlice', "nepi")
+ec.set(channel, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(channel, 'xmppPort', "5222")
+ec.set(channel, 'xmppPassword', "1234")
+
+# Create and Configure the Application
+app1 = ec.register_resource("OMFApplication")
+ec.set(app1, 'appid', 'Vlc#1')
+ec.set(app1, 'path', "/opt/vlc-1.1.13/cvlc")
+ec.set(app1, 'args', "/opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
+ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
+ec.set(app1, 'xmppSlice', "nepi")
+ec.set(app1, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(app1, 'xmppPort', "5222")
+ec.set(app1, 'xmppPassword', "1234")
+
+app2 = ec.register_resource("OMFApplication")
+ec.set(app2, 'appid', 'Vlc#2')
+ec.set(app2, 'path', "/opt/vlc-1.1.13/cvlc")
+ec.set(app2, 'args', "rtp://10.0.0.37:1234")
+ec.set(app2, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
+ec.set(app2, 'xmppSlice', "nepi")
+ec.set(app2, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(app2, 'xmppPort', "5222")
+ec.set(app2, 'xmppPassword', "1234")
+
+app3 = ec.register_resource("OMFApplication")
+ec.set(app3, 'appid', 'Kill#2')
+ec.set(app3, 'path', "/usr/bin/killall")
+ec.set(app3, 'args', "vlc")
+ec.set(app3, 'env', " ")
+ec.set(app3, 'xmppSlice', "nepi")
+ec.set(app3, 'xmppHost', "xmpp-plexus.onelab.eu")
+ec.set(app3, 'xmppPort', "5222")
+ec.set(app3, 'xmppPassword', "1234")
+
+# Connection
+ec.register_connection(app3, node1)
+ec.register_connection(app1, node1)
+ec.register_connection(node1, iface1)
+ec.register_connection(iface1, channel)
+ec.register_connection(iface2, channel)
+ec.register_connection(node2, iface2)
+ec.register_connection(app2, node2)
+
+# Condition
+#      Topology behaviour : It should not be done by the user, but ....
+#ec.register_condition([iface1, iface2, channel], ResourceAction.START, [node1, node2], ResourceState.STARTED , 2)
+#ec.register_condition(channel, ResourceAction.START, [iface1, iface2], ResourceState.STARTED , 1)
+#ec.register_condition(app1, ResourceAction.START, channel, ResourceState.STARTED , 1)
+
+#      User Behaviour
+ec.register_condition(app2, ResourceAction.START, app1, ResourceState.STARTED , "4s")
+ec.register_condition([app1, app2], ResourceAction.STOP, app2, ResourceState.STARTED , "20s")
+ec.register_condition(app3, ResourceAction.START, app2, ResourceState.STARTED , "25s")
+
+# Deploy
+ec.deploy()
+
+# Stop Experiment
+time.sleep(50)
+ec.shutdown()
diff --git a/examples/big_buck_bunny_240p_mpeg4.ts b/examples/big_buck_bunny_240p_mpeg4.ts
deleted file mode 100644 (file)
index 542244b..0000000
Binary files a/examples/big_buck_bunny_240p_mpeg4.ts and /dev/null differ
diff --git a/examples/big_buck_bunny_240p_mpeg4_lq.ts b/examples/big_buck_bunny_240p_mpeg4_lq.ts
deleted file mode 100644 (file)
index d947b4b..0000000
Binary files a/examples/big_buck_bunny_240p_mpeg4_lq.ts and /dev/null differ
diff --git a/examples/big_buck_bunny_license_information b/examples/big_buck_bunny_license_information
deleted file mode 100644 (file)
index f0b35ff..0000000
+++ /dev/null
@@ -1 +0,0 @@
-(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org
diff --git a/examples/ccnx/ccnx-0.6.0rc3.tar.gz b/examples/ccnx/ccnx-0.6.0rc3.tar.gz
deleted file mode 100644 (file)
index 48d6ff7..0000000
Binary files a/examples/ccnx/ccnx-0.6.0rc3.tar.gz and /dev/null differ
diff --git a/examples/ccnx/planetlab_ccnx_multicast.py b/examples/ccnx/planetlab_ccnx_multicast.py
deleted file mode 100644 (file)
index 0327d08..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/env python
-
-##
-## Experiment topology:
-## 
-##  ccncatchunks                                ccnsendchunks
-##       |                                            |
-##       .->  node1 -- .. -- nodei -- .. -- nodeN   <-.
-##    
-##
-##  - Nodes are connected through an overlay network over the Intenet
-##  - On each node runs a CCNx daemon
-##  - Static multicast entries are added to the CCNx FIB on each node to communicate them in series.
-##    (Nodes only have FIB entries to at most two nodes)
-##
-
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util.constants import ApplicationStatus as AS
-import ipaddr
-import math
-from optparse import OptionParser, SUPPRESS_HELP
-import os
-import signal
-import string
-import subprocess
-import tempfile
-import time
-
-# Trak SIGTERM, and set global termination flag instead of dying
-TERMINATE = []
-def _finalize(sig,frame):
-    global TERMINATE
-    TERMINATE.append(None)
-signal.signal(signal.SIGTERM, _finalize)
-signal.signal(signal.SIGINT, _finalize)
-
-def create_slice_desc(slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, exp_desc):
-    pl_provider = FactoriesProvider("planetlab")
-    slice_desc = exp_desc.add_testbed_description(pl_provider)
-    slice_desc.set_attribute_value("homeDirectory", root_dir)
-    slice_desc.set_attribute_value("slice", slicename)
-    slice_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-    slice_desc.set_attribute_value("authUser", pl_user)
-    slice_desc.set_attribute_value("authPass", pl_pwd)
-    slice_desc.set_attribute_value("plcHost", plc_host)
-    slice_desc.set_attribute_value("tapPortBase", port_base)
-    # Kills all running processes before starting the experiment
-    slice_desc.set_attribute_value("cleanProc", True)
-    # NOTICE: Setting 'cleanHome' to 'True' will erase all previous
-    # folders in the sliver Home directory, including result files!
-    #slice_desc.set_attribute_value("cleanHome", True)
-    slice_desc.set_attribute_value("plLogLevel", "DEBUG")
-    return slice_desc
-def create_node(hostname, pl_inet, slice_desc):
-    pl_node = slice_desc.create("Node")
-    pl_node.set_attribute_value("hostname", hostname)
-    pl_node.set_attribute_value("label", hostname)
-    pl_iface = slice_desc.create("NodeInterface")
-    pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-    pl_node.connector("devs").connect(pl_iface.connector("node"))
-    return pl_node
-
-def create_tunnel(node, peer, pl_nodes, slice_desc, subnet):
-    pl_node = pl_nodes[node]
-    pl_peer = pl_nodes[peer]
-
-    pl_tun = slice_desc.create("TunInterface")
-    pl_tun.set_attribute_value("label", "tun_%s%s" % (node, peer))
-    pl_node.connector("devs").connect(pl_tun.connector("node"))
-
-    pl_tunpeer = slice_desc.create("TunInterface")
-    pl_tunpeer.set_attribute_value("label", "tun_%s%s" % (peer, node))
-    pl_peer.connector("devs").connect(pl_tunpeer.connector("node"))
-
-    pl_tun.connector("udp").connect(pl_tunpeer.connector("udp"))
-    
-    iterhosts = subnet.iterhosts()
-    addr = iterhosts.next()
-    ip = pl_tun.add_address()
-    ip.set_attribute_value("Address", addr.exploded)
-    ip.set_attribute_value("NetPrefix", subnet.prefixlen)
-
-    peeraddr = iterhosts.next()
-    peerip = pl_tunpeer.add_address()
-    peerip.set_attribute_value("Address", peeraddr.exploded)
-    peerip.set_attribute_value("NetPrefix", subnet.prefixlen)
-
-def create_ccnd(pl_node, port, hostname, routes, slice_desc):
-    pl_app = slice_desc.create("CCNxDaemon")
-    # We use a wildcard to replace the TUN IP address of the node during runtime
-    routes = "|".join(map(lambda route: "udp 224.0.23.170 %d 3 1 {#[tun_%s%s].addr[0].[Address]#}" \
-            % (route[1], hostname, route[0]), routes))
-    # Add multicast ccn routes 
-    pl_app.set_attribute_value("ccnRoutes", routes)
-    # Use a specific port to bind the CCNx daemon
-    if port:
-        pl_app.set_attribute_value("ccnLocalPort", port)
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-
-def create_ccnsendchunks(pl_node, port, slice_desc):
-    pl_app = slice_desc.create("Application")
-    path_to_video = os.path.join(os.path.dirname(os.path.abspath(__file__)),
-        "../big_buck_bunny_240p_mpeg4_lq.ts")
-    pl_app.set_attribute_value("stdin", path_to_video)
-
-    command = "ccnsendchunks ccnx:/VIDEO"
-    if port:
-        command = "CCN_LOCAL_PORT=%d %s " % (port, command)
-    pl_app.set_attribute_value("command", command)
-
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-    return pl_app
-
-def exec_ccncatchunks(slicename, port, hostname):
-    print "Starting Vlc streamming ..."
-
-    command = 'PATH=$PATH:$(ls | egrep nepi-ccnd- | head -1)/bin;'
-    if port:
-        command += "CCN_LOCAL_PORT=%d " % port
-    command += ' ccncatchunks2 ccnx:/VIDEO'
-
-    login = "%s@%s" % (slicename, hostname)
-    proc1 = subprocess.Popen(['ssh', login, command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell = False)
-    proc2 = subprocess.Popen(['vlc', 
-        '--sub-filter', 'marq', 
-        '--marq-marquee', 
-        '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org', 
-        '--marq-position=8', 
-        '--no-video-title-show',  '-'], 
-        stdin=proc1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    return proc2
-
-def create_ed(hostnames, vsys_vnet, slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, port):
-
-    # Create the experiment description object
-    exp_desc = ExperimentDescription()
-
-    # Create the slice description object
-    slice_desc = create_slice_desc(slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, exp_desc)
-    
-    # Create the Internet box object
-    pl_inet = slice_desc.create("Internet")
-    
-    # Create the Node boxes
-    pl_nodes = dict()
-    ccn_routes = dict()
-    prev_hostname = None
-    mport = port
-    for hostname in hostnames:
-        pl_node = create_node(hostname, pl_inet, slice_desc)
-        pl_nodes[hostname] = pl_node
-
-        ccn_routes[hostname] = list()
-        if prev_hostname:
-            ccn_routes[hostname].append((prev_hostname, mport))
-            ccn_routes[prev_hostname].append((hostname, mport))
-            mport = port if mport != port else port + 1
-        prev_hostname = hostname
-
-    # Get the base network segment (slice vsys_vnet) to assign all the IP addresses
-    # to the virtual interfaces
-    base = ipaddr.IPNetwork(vsys_vnet)
-
-    # Calculate the number of virtual networks required to connect all the nodes 
-    # with all other nodes as the binomial coeficient C(n, 2), with n = #nodes
-    n = len(hostnames)
-    c = n * (n-1) / 2
-
-    # Validate that we can get 'c' /30 subnetworks
-    if c > math.pow(2, (30 - base.prefixlen)):
-        raise RuntimeError("Insufficient address segment %s for experiment", vsys_vnet)
-            
-    # Create the subnetwors iterator 
-    iter_sub = base.iter_subnets(new_prefix=30)
-
-    # Create tunnels between nodes
-    for i, node in enumerate(hostnames):
-        peers = hostnames[i+1:]
-        for peer in peers:
-            subnet = iter_sub.next()
-            create_tunnel(node, peer, pl_nodes, slice_desc, subnet)
-
-    # Create ccnd daemons in all nodes
-    for hostname, pl_node in pl_nodes.iteritems():
-        routes = ccn_routes[hostname]
-        create_ccnd(pl_node, port, hostname, routes, slice_desc)
-
-    # Create a ccnsendchunks application box in the first node
-    hostname = hostnames[0]
-    pl_node = pl_nodes[hostname]
-    pl_app = create_ccnsendchunks(pl_node, port, slice_desc)
-
-    return exp_desc, pl_nodes, hostname, pl_app
-
-def run(hostnames, vsys_vnet, slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, port):
-
-    exp_desc, pl_nodes, hostname, pl_app = create_ed(hostnames, vsys_vnet, 
-            slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, port_base, 
-            root_dir, port)
-
-    xml = exp_desc.to_xml()
-    controller = ExperimentController(xml, root_dir)
-    controller.start()
-    
-    while not TERMINATE and controller.status(pl_app.guid) == AS.STATUS_NOT_STARTED:
-        time.sleep(0.5)
-
-    proc = None
-    if not TERMINATE:
-        hostname = hostnames[-1]
-        proc = exec_ccncatchunks(slicename, port, hostname)
-
-    while not TERMINATE and proc and proc.poll() is None:
-        time.sleep(0.5)
-    
-    if proc:
-        if proc.poll() < 1:
-           err = proc.stderr.read()
-           print "ERROR ", err
-        else:   
-           out = proc.stdout.read()
-           print "OUTPUT ", out
-
-    controller.stop()
-    controller.shutdown()
-
-if __name__ == '__main__':
-    root_dir = tempfile.mkdtemp()
-    slicename = os.environ.get("PL_SLICE")
-    pl_host = os.environ.get("PL_HOST", "www.planet-lab.eu")
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    pl_ssh_key = os.environ.get(
-        "PL_SSH_KEY",
-        "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-    pl_user = os.environ.get('PL_USER')
-    pl_pwd = os.environ.get('PL_PASS')
-    pl_vsys_vnet = os.environ.get('PL_VSYS_NET')
-    pl_hostnames = os.environ.get('PL_HOSTNAMES')
-    default_hostnames = ['openlab02.pl.sophia.inria.fr',
-                 'ple4.ipv6.lip6.fr',
-                 'planetlab2.di.unito.it',
-                 'merkur.planetlab.haw-hamburg.de',
-                 'planetlab1.cs.uit.no',
-                 'planetlab3.cs.st-andrews.ac.uk',
-                 'planetlab2.cs.uoi.gr',
-                 'planetlab3.xeno.cl.cam.ac.uk',
-                 'planet2.inf.tu-dresden.de',
-                 'planetlab2.csg.uzh.ch',
-                 'planetlab2.upm.ro',
-                 'planetlab-um00.di.uminho.pt',
-                 'planetlabpc2.upf.edu',
-                 'planet2.elte.hu',
-                 'planetlab2.esprit-tn.com' ]
-    ccn_local_port = os.environ.get('CCN_LOCAL_PORT', 49695)
-
-    usage = "usage: %prog -s <pl_slice> -H <pl_host> -k <ssh_key> -u <pl_user> -p <pl_password> -v <vsys_vnet> -N <host_names> -c <node_count> -P <ccn-local-port>"
-
-    parser = OptionParser(usage=usage)
-    parser.add_option("-s", "--slicename", dest="slicename", 
-            help="PlanetLab slicename", default=slicename, type="str")
-    parser.add_option("-H", "--pl-host", dest="pl_host", 
-            help="PlanetLab site (e.g. www.planet-lab.eu)", 
-            default=pl_host, type="str")
-    parser.add_option("-k", "--ssh-key", dest="pl_ssh_key", 
-            help="Path to private ssh key used for PlanetLab authentication", 
-            default=pl_ssh_key, type="str")
-    parser.add_option("-u", "--pl-user", dest="pl_user", 
-            help="PlanetLab account user (i.e. Registration email address)", 
-            default=pl_user, type="str")
-    parser.add_option("-p", "--pl-pwd", dest="pl_pwd", 
-            help="PlanetLab account password", default=pl_pwd, type="str")
-    parser.add_option("-v", "--vsys-vnet", dest="vsys_vnet", 
-            help="Value of the vsys_vnet tag addigned to your slice. (e.g. 192.168.3.0/16)", 
-            default=pl_vsys_vnet, type="str")
-    parser.add_option("-N", "--host-names", dest="hostnames", 
-            help="Comma separated list of PlanetLab hostnames to use", 
-            default=pl_hostnames, type="str")
-    parser.add_option("-c", "--node-count", dest="node_count", 
-            help="Number of nodes to use", 
-            default=5, type="str")
-    parser.add_option("-P", "--ccn-local-port", dest="port", 
-            help="Port to bind the CCNx daemon", 
-            default=ccn_local_port, type="int")
-
-    (options, args) = parser.parse_args()
-
-    hostnames = map(string.strip, options.hostnames.split(",")) if options.hostnames else default_hostnames
-    if options.node_count > 0 and options.node_count < len(hostnames):
-       hostnames = hostnames[0:options.node_count]
-    vsys_vnet = options.vsys_vnet
-    slicename = options.slicename
-    pl_host = options.pl_host
-    pl_user= options.pl_user
-    pl_pwd = options.pl_pwd
-    pl_ssh_key = options.pl_ssh_key
-    port = options.port
-
-    run(hostnames, vsys_vnet, slicename, pl_host, pl_user, pl_pwd, pl_ssh_key, 
-            port_base, root_dir, port)
-
diff --git a/examples/ccnx/planetlab_ccnx_unicast.py b/examples/ccnx/planetlab_ccnx_unicast.py
deleted file mode 100644 (file)
index d7f5f83..0000000
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/env python
-
-##
-## Experiment topology:
-## 
-##  ccncatchunks                                ccnsendchunks
-##       |                                            |
-##       .->  node1 -- .. -- nodei -- .. -- nodeN   <-.
-##    
-##
-##  - Nodes are connected through Intenet
-##  - On each node runs a CCNx daemon
-##  - Static entries are added to the CCNx FIB on each node to communicate them in series.
-##    (Nodes only have FIB entries to at most two nodes)
-##
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util.constants import ApplicationStatus as AS
-from optparse import OptionParser, SUPPRESS_HELP
-import os
-import signal
-import string
-import subprocess
-import tempfile
-import time
-
-# Trak SIGTERM, and set global termination flag instead of dying
-TERMINATE = []
-def _finalize(sig,frame):
-    global TERMINATE
-    TERMINATE.append(None)
-signal.signal(signal.SIGTERM, _finalize)
-signal.signal(signal.SIGINT, _finalize)
-
-def create_slice_desc(slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, proxy, exp_desc):
-    pl_provider = FactoriesProvider("planetlab")
-    slice_desc = exp_desc.add_testbed_description(pl_provider)
-    slice_desc.set_attribute_value("homeDirectory", root_dir)
-    slice_desc.set_attribute_value("slice", slicename)
-    slice_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-    slice_desc.set_attribute_value("authUser", pl_user)
-    slice_desc.set_attribute_value("authPass", pl_pwd)
-    slice_desc.set_attribute_value("plcHost", plc_host)
-    if proxy:
-        slice_desc.set_attribute_value("proxy", proxy)
-    slice_desc.set_attribute_value("tapPortBase", port_base)
-    slice_desc.set_attribute_value("p2pDeployment", True)
-    # Kills all running processes before starting the experiment
-    slice_desc.set_attribute_value("cleanProc", True)
-    # NOTICE: Setting 'cleanHome' to 'True' will erase all previous
-    # folders in the sliver Home directory, including result files!
-    #slice_desc.set_attribute_value("cleanHome", True)
-    slice_desc.set_attribute_value("plLogLevel", "DEBUG")
-    return slice_desc
-def create_node(hostname, pl_inet, slice_desc):
-    pl_node = slice_desc.create("Node")
-    pl_node.set_attribute_value("hostname", hostname)
-    pl_node.set_attribute_value("label", hostname)
-    pl_iface = slice_desc.create("NodeInterface")
-    pl_iface.set_attribute_value("label", "iface_%s" % hostname)
-    pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-    pl_node.connector("devs").connect(pl_iface.connector("node"))
-    return pl_node
-
-def create_ccnd(pl_node, port, routes, slice_desc):
-    pl_app = slice_desc.create("CCNxDaemon")
-    
-    # We can specify a default ccnx version to be either 0.6.0 or 0.7.1
-    # We can also specify a custom local source and build and install directives
-    path_to_source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
-        "ccnx-0.6.0rc3.tar.gz")
-    pl_app.set_attribute_value("sources", path_to_source)
-    pl_app.set_attribute_value("build", 
-            "tar xzf ${SOURCES}/ccnx-0.6.0rc3.tar.gz && "
-            "cd ./ccnx-0.6.0rc3 && "
-            "./configure && make ")
-    pl_app.set_attribute_value("install", "cp -r ./ccnx-0.6.0rc3/bin ${SOURCES}")
-
-    # We use a wildcard to replace the public IP address of the node during runtime,
-    # once this IP is known
-    routes = "|".join(map(lambda route: "udp {#[iface_%s].addr[0].[Address]#}" % route, routes))
-    
-    # Add unicast ccn routes 
-    pl_app.set_attribute_value("ccnRoutes", routes)
-
-    # Use a specific port to bind the CCNx daemon
-    if port:
-        pl_app.set_attribute_value("ccnLocalPort", port)
-
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-
-def create_ccnsendchunks(pl_node, port, slice_desc):
-    pl_app = slice_desc.create("Application")
-    path_to_video = os.path.join(os.path.dirname(os.path.abspath(__file__)),
-        "../big_buck_bunny_240p_mpeg4_lq.ts")
-    pl_app.set_attribute_value("stdin", path_to_video)
-
-    command = "ccnsendchunks ccnx:/VIDEO"
-    if port:
-        command = "CCN_LOCAL_PORT=%d %s " % (port, command)
-    pl_app.set_attribute_value("command", command)
-
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-    return pl_app
-
-def exec_ccncatchunks(slicename, port, hostname):
-    print "Getting video chunks from %s ..." % hostname
-
-    command = 'PATH=$PATH:$(ls | egrep nepi-ccnd- | head -1)/bin;'
-    if port:
-        command += "CCN_LOCAL_PORT=%d " % port
-    command += ' ccncatchunks2 ccnx:/VIDEO'
-
-    login = "%s@%s" % (slicename, hostname)
-    proc1 = subprocess.Popen(['ssh',
-        '-o', 'StrictHostKeyChecking=no',
-        login, 
-        command], 
-        stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell = False)
-    
-    proc2 = subprocess.Popen(['vlc', 
-        '--ffmpeg-threads=1',
-        '--sub-filter', 'marq', 
-        '--marq-marquee', 
-        '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org', 
-        '--marq-position=8', 
-        '--no-video-title-show', '-'], 
-        stdin=proc1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    return proc2
-
-def create_ed(hostnames, vsys_vnet, slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, delay, port, proxy):
-
-    # Create the experiment description object
-    exp_desc = ExperimentDescription()
-
-    # Create the slice description object
-    slice_desc = create_slice_desc(slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, proxy, exp_desc)
-    
-    # Create the Internet box object
-    pl_inet = slice_desc.create("Internet")
-    
-    # Create the Node boxes
-    pl_nodes = dict()
-    ccn_routes = dict()
-    prev_hostname = None
-    for hostname in hostnames:
-        pl_node = create_node(hostname, pl_inet, slice_desc)
-        pl_nodes[hostname] = pl_node
-
-        ccn_routes[hostname] = list()
-        if prev_hostname:
-            ccn_routes[hostname].append(prev_hostname)
-            ccn_routes[prev_hostname].append(hostname)
-        prev_hostname = hostname
-     
-    for hostname in hostnames:
-        pl_node = pl_nodes[hostname] 
-        routes = ccn_routes[hostname]
-        create_ccnd(pl_node, port, routes, slice_desc)
-
-    # Create a ccnsendchunks application box in the first node
-    hostname = hostnames[0]
-    pl_node = pl_nodes[hostname]
-    pl_app = create_ccnsendchunks(pl_node, port, slice_desc)
-
-    return exp_desc, pl_nodes, hostname, pl_app
-
-def run(hostnames, vsys_vnet, slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, 
-        port_base, root_dir, delay, port, proxy):
-
-    exp_desc, pl_nodes, hostname, pl_app = create_ed(hostnames, vsys_vnet, 
-            slicename, plc_host, pl_user, pl_pwd, pl_ssh_key, port_base, 
-            root_dir, delay, port, proxy)
-
-    xml = exp_desc.to_xml()
-    controller = ExperimentController(xml, root_dir)
-    controller.start()
-    
-    while not TERMINATE and controller.status(pl_app.guid) == AS.STATUS_NOT_STARTED:
-        time.sleep(0.5)
-
-    proc1 = None
-    if not TERMINATE:
-        hostname = hostnames[-1]
-        proc1 = exec_ccncatchunks(slicename, port, hostname)
-
-    if not TERMINATE and proc1:
-        time.sleep(delay)
-
-    proc2 = None
-    if not TERMINATE:
-        hostname = hostnames[-2]
-        proc2 = exec_ccncatchunks(slicename, port, hostname)
-
-    while not TERMINATE and proc1 and proc2 and proc2.poll() is None:
-        time.sleep(0.5)
-
-    if proc1:
-        if proc1.poll() < 1:
-           err = proc1.stderr.read()
-           print "Stream 1 ERROR ", err
-        else:   
-           out = proc1.stdout.read()
-           print "Stream 1 OUTPUT ", out
-
-    if proc2:
-        if proc2.poll() < 1:
-           err = proc2.stderr.read()
-           print "Stream 2 ERROR ", err
-        else:   
-           out = proc2.stdout.read()
-           print "Stream 2 OUTPUT ", out
-
-    controller.stop()
-    controller.shutdown()
-
-if __name__ == '__main__':
-    root_dir = tempfile.mkdtemp()
-    slicename = os.environ.get("PL_SLICE")
-    pl_host = os.environ.get("PL_HOST", "www.planet-lab.eu")
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    pl_ssh_key = os.environ.get(
-        "PL_SSH_KEY",
-        "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-    pl_user = os.environ.get('PL_USER')
-    pl_pwd = os.environ.get('PL_PASS')
-    pl_vsys_vnet = os.environ.get('PL_VSYS_NET')
-    pl_hostnames = os.environ.get('PL_HOSTNAMES')
-    default_hostnames = ['openlab02.pl.sophia.inria.fr',
-                 'ple4.ipv6.lip6.fr',
-                 'planetlab2.di.unito.it',
-                 'merkur.planetlab.haw-hamburg.de',
-                 'planetlab1.cs.uit.no',
-                 'planetlab3.cs.st-andrews.ac.uk',
-                 'planetlab2.cs.uoi.gr',
-                 'planetlab3.xeno.cl.cam.ac.uk',
-                 'planet2.inf.tu-dresden.de',
-                 'planetlab2.csg.uzh.ch',
-                 'planetlab2.upm.ro',
-                 'planetlab-um00.di.uminho.pt',
-                 'planetlabpc2.upf.edu',
-                 'planet2.elte.hu',
-                 'planetlab2.esprit-tn.com' ]
-
-    ccn_local_port = os.environ.get('CCN_LOCAL_PORT')
-
-    usage = "usage: %prog -s <pl_slice> -H <pl_host> -k <ssh_key> -u <pl_user> -p <pl_password> -v <vsys_vnet> -N <host_names> -c <node_count> -d <delay> -P <ccn-local-port> -x <proxy>"
-
-    parser = OptionParser(usage=usage)
-    parser.add_option("-s", "--slicename", dest="slicename", 
-            help="PlanetLab slicename", default=slicename, type="str")
-    parser.add_option("-H", "--pl-host", dest="pl_host", 
-            help="PlanetLab site (e.g. www.planet-lab.eu)", 
-            default=pl_host, type="str")
-    parser.add_option("-k", "--ssh-key", dest="pl_ssh_key", 
-            help="Path to private ssh key used for PlanetLab authentication", 
-            default=pl_ssh_key, type="str")
-    parser.add_option("-u", "--pl-user", dest="pl_user", 
-            help="PlanetLab account user (i.e. Registration email address)", 
-            default=pl_user, type="str")
-    parser.add_option("-p", "--pl-pwd", dest="pl_pwd", 
-            help="PlanetLab account password", default=pl_pwd, type="str")
-    parser.add_option("-v", "--vsys-vnet", dest="vsys_vnet", 
-            help="Value of the vsys_vnet tag addigned to your slice. (e.g. 192.168.3.0/16)", 
-            default=pl_vsys_vnet, type="str")
-    parser.add_option("-N", "--host-names", dest="hostnames", 
-            help="Comma separated list of PlanetLab hostnames to use", 
-            default=pl_hostnames, type="str")
-    parser.add_option("-c", "--node-count", dest="node_count", 
-            help="Number of nodes to use", 
-            default=9, type="int")
-    parser.add_option("-d", "--delay", dest="delay", 
-            help="Time to wait before retrieveing the second video stream in seconds", 
-            default=40, type="int")
-    parser.add_option("-P", "--ccn-local-port", dest="port", 
-            help="Port to bind the CCNx daemon", 
-            default=ccn_local_port, type="int")
-    parser.add_option("-x", "--proxy", dest="proxy", 
-            help="Https proxy between here and PlanetLab machines", 
-            default=None, type="str")
-    (options, args) = parser.parse_args()
-
-    hostnames = map(string.strip, options.hostnames.split(",")) if options.hostnames else default_hostnames
-    if options.node_count > 0 and options.node_count < len(hostnames):
-       hostnames = hostnames[0:options.node_count]
-
-    vsys_vnet = options.vsys_vnet
-    slicename = options.slicename
-    pl_host = options.pl_host
-    pl_user= options.pl_user
-    pl_pwd = options.pl_pwd
-    pl_ssh_key = options.pl_ssh_key
-    delay = options.delay
-    port = options.port
-    proxy = options.proxy
-
-    run(hostnames, vsys_vnet, slicename, pl_host, pl_user, pl_pwd, pl_ssh_key, 
-            port_base, root_dir, delay, port, proxy)
-
diff --git a/examples/daemonized_testbed_controller.py b/examples/daemonized_testbed_controller.py
deleted file mode 100644 (file)
index 2110d70..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import getpass
-import tempfile
-import time
-import os
-
-user = getpass.getuser()
-root_dir = tempfile.mkdtemp()
-netns_dir = os.path.join(root_dir, "netns")
-daemon_dir = os.path.join(netns_dir, "daemon")
-os.makedirs(daemon_dir)
-
-
-exp_desc = ExperimentDescription()
-
-netns_provider = FactoriesProvider("netns")
-netns_desc = exp_desc.add_testbed_description(netns_provider)
-netns_desc.set_attribute_value("homeDirectory", netns_dir)
-#netns_desc.set_attribute_value("enableDebug", True)
-netns_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-netns_desc.set_attribute_value(DC.ROOT_DIRECTORY, daemon_dir)
-netns_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-netns_desc.set_attribute_value(DC.USE_SUDO, True)
-
-node1 = netns_desc.create("Node")
-node2 = netns_desc.create("Node")
-
-iface12 = netns_desc.create("P2PNodeInterface")
-iface12.set_attribute_value("up", True)
-node1.connector("devs").connect(iface12.connector("node"))
-
-iface21 = netns_desc.create("P2PNodeInterface")
-iface21.set_attribute_value("up", True)
-node2.connector("devs").connect(iface21.connector("node"))
-
-iface12.connector("p2p").connect(iface21.connector("p2p"))
-
-ip12 = iface12.add_address()
-ip12.set_attribute_value("Address", "192.168.0.1")
-ip12.set_attribute_value("NetPrefix", 30)
-
-ip21 = iface21.add_address()
-ip21.set_attribute_value("Address", "192.168.0.2")
-ip21.set_attribute_value("NetPrefix", 30)
-
-app1 = netns_desc.create("Application")
-app1.set_attribute_value("command", "ping -qc 10 192.168.0.1")
-app1.set_attribute_value("user", user)
-app1.connector("node").connect(node1.connector("apps"))
-
-app1.enable_trace("stdout")
-
-xml = exp_desc.to_xml()
-
-controller_access_config = proxy.AccessConfiguration()
-controller_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-controller_access_config.set_attribute_value(DC.ROOT_DIRECTORY, root_dir)
-controller_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-controller = proxy.create_experiment_controller(xml, controller_access_config)
-
-controller.start()
-while not controller.is_finished(app1.guid):
-    time.sleep(0.5)
-
-result = controller.trace(app1.guid, "stdout")
-
-controller.stop()
-controller.shutdown()
-
-print result
-
diff --git a/examples/fd_cross_testbed_experiment.py b/examples/fd_cross_testbed_experiment.py
deleted file mode 100644 (file)
index 15c219f..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  ns-3        |     NETNS 
-#              |
-
-#             fd   
-#  n1 --- n2 ----- n3 --- n4
-#              
-#     0.0     1.0     2.0 
-
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import tempfile
-import time
-
-user = getpass.getuser()
-root_dir = tempfile.mkdtemp()
-
-def create_ns3_node(ns3_desc):
-    node = ns3_desc.create("ns3::Node")
-    ipv4 = ns3_desc.create("ns3::Ipv4L3Protocol")
-    arp  = ns3_desc.create("ns3::ArpL3Protocol")
-    icmp = ns3_desc.create("ns3::Icmpv4L4Protocol")
-    udp = ns3_desc.create("ns3::UdpL4Protocol")
-    node.connector("protos").connect(ipv4.connector("node"))
-    node.connector("protos").connect(arp.connector("node"))
-    node.connector("protos").connect(icmp.connector("node"))
-    node.connector("protos").connect(udp.connector("node"))
-    return node
-
-exp_desc = ExperimentDescription()
-
-ns3_provider = FactoriesProvider("ns3")
-ns3_desc = exp_desc.add_testbed_description(ns3_provider)
-ns3_desc.set_attribute_value("homeDirectory", root_dir)
-ns3_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-ns3_desc.set_attribute_value("ChecksumEnabled", True)
-
-node1 = create_ns3_node(ns3_desc)
-iface12 = ns3_desc.create("ns3::PointToPointNetDevice")
-queue12 = ns3_desc.create("ns3::DropTailQueue")
-node1.connector("devs").connect(iface12.connector("node"))
-iface12.connector("queue").connect(queue12.connector("dev"))
-ip12 = iface12.add_address()
-ip12.set_attribute_value("Address", "10.0.0.1")
-
-node2 = create_ns3_node(ns3_desc)
-iface21 = ns3_desc.create("ns3::PointToPointNetDevice")
-queue21 = ns3_desc.create("ns3::DropTailQueue")
-node2.connector("devs").connect(iface21.connector("node"))
-iface21.connector("queue").connect(queue21.connector("dev"))
-ip21 = iface21.add_address()
-ip21.set_attribute_value("Address", "10.0.0.2")
-
-chan = ns3_desc.create("ns3::PointToPointChannel")
-iface12.connector("chan").connect(chan.connector("dev2"))
-iface21.connector("chan").connect(chan.connector("dev2"))
-
-iface23 = ns3_desc.create("ns3::FdNetDevice")
-node2.connector("devs").connect(iface23.connector("node"))
-ip23 = iface23.add_address()
-ip23.set_attribute_value("Address", "10.0.1.1")
-
-netns_provider = FactoriesProvider("netns")
-netns_desc = exp_desc.add_testbed_description(netns_provider)
-netns_desc.set_attribute_value("homeDirectory", root_dir)
-#netns_desc.set_attribute_value("enableDebug", True)
-
-node3 = netns_desc.create("Node")
-iface32 = netns_desc.create("TapNodeInterface")
-iface32.set_attribute_value("up", True)
-node3.connector("devs").connect(iface32.connector("node"))
-ip32 = iface32.add_address()
-ip32.set_attribute_value("Address", "10.0.1.2")
-
-iface23.connector("->fd").connect(iface32.connector("fd->"))
-
-iface34 = netns_desc.create("P2PNodeInterface")
-iface34.set_attribute_value("up", True)
-node3.connector("devs").connect(iface34.connector("node"))
-ip34 = iface34.add_address()
-ip34.set_attribute_value("Address", "10.0.2.1")
-
-node4 = netns_desc.create("Node")
-node4.set_attribute_value("forward_X11", True)
-iface43 = netns_desc.create("P2PNodeInterface")
-iface43.set_attribute_value("up", True)
-node4.connector("devs").connect(iface43.connector("node"))
-ip43 = iface43.add_address()
-ip43.set_attribute_value("Address", "10.0.2.2")
-
-iface34.connector("p2p").connect(iface43.connector("p2p"))
-
-route = node1.add_route()
-route.set_attribute_value("Destination", "10.0.2.0")
-route.set_attribute_value("NextHop", "10.0.0.2")
-
-route = node2.add_route()
-route.set_attribute_value("Destination", "10.0.2.0")
-route.set_attribute_value("NextHop", "10.0.1.2")
-
-route = node3.add_route()
-route.set_attribute_value("Destination", "10.0.0.0")
-route.set_attribute_value("NextHop", "10.0.1.1")
-
-route = node4.add_route()
-route.set_attribute_value("Destination", "10.0.0.0")
-route.set_attribute_value("NextHop", "10.0.2.1")
-
-app = netns_desc.create("Application")
-app.set_attribute_value("command", "ping -qc 3 10.0.0.1")
-app.set_attribute_value("user", user)
-app.connector("node").connect(node4.connector("apps"))
-app.enable_trace("stdout")
-      
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-
-controller.start()
-while not controller.is_finished(app.guid):
-    time.sleep(0.5)
-
-result = controller.trace(app.guid, "stdout")
-
-controller.stop()
-controller.shutdown()
-
-print result
-
diff --git a/examples/linux/ccnx/simple_topo.py b/examples/linux/ccnx/simple_topo.py
new file mode 100644 (file)
index 0000000..ee41017
--- /dev/null
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+from nepi.execution.ec import ExperimentController, ECState 
+from nepi.execution.resource import ResourceState, ResourceAction, \
+        populate_factory
+
+from optparse import OptionParser, SUPPRESS_HELP
+
+import os
+import time
+
+def add_node(ec, host, user):
+    node = ec.register_resource("LinuxNode")
+    ec.set(node, "hostname", host)
+    ec.set(node, "username", user)
+    #ec.set(node, "cleanHome", True)
+    ec.set(node, "cleanProcesses", True)
+    return node
+
+def add_ccnd(ec, os_type, peers):
+    if os_type == "f12":
+        depends = ( " autoconf openssl-devel  expat-devel libpcap-devel "
+                " ecryptfs-utils-devel libxml2-devel automake gawk " 
+                " gcc gcc-c++ git pcre-devel ")
+    elif os_type == "ubuntu":
+        depends = ( " autoconf libssl-dev libexpat-dev libpcap-dev "
+                " libecryptfs0 libxml2-utils automake gawk gcc g++ "
+                " git-core pkg-config libpcre3-dev ")
+
+    sources = "http://www.ccnx.org/releases/ccnx-0.7.1.tar.gz"
+
+    build = (
+        # Evaluate if ccnx binaries are already installed
+        " ( "
+            "  test -d ${EXP_HOME}/ccnx/bin"
+        " ) || ( "
+        # If not, untar and build
+            " ( "
+                " mkdir -p ${SOURCES}/ccnx && "
+                " tar xf ${SOURCES}/ccnx-0.7.1.tar.gz --strip-components=1 -C ${SOURCES}/ccnx "
+             " ) && "
+                "cd ${SOURCES}/ccnx && "
+                # Just execute and silence warnings...
+                "(  ( ./configure && make )  2>&1 )"
+         " )") 
+
+    install = (
+        # Evaluate if ccnx binaries are already installed
+        " ( "
+            "  test -d ${EXP_HOME}/ccnx/bin "
+        " ) || ( "
+            "  mkdir -p ${EXP_HOME}/ccnx/bin && "
+            "  cp -r ${SOURCES}/ccnx ${EXP_HOME}"
+        " )"
+    )
+
+    env = "PATH=$PATH:${EXP_HOME}/ccnx/bin"
+
+    # BASH command -> ' ccndstart 2>&1 ; ccndc add ccnx:/ udp  host ;  ccnr 2>&1 '
+    command = "ccndstart 2>&1 ; "
+    peers = map(lambda peer: "ccndc add ccnx:/ udp  %s" % peer, peers)
+    command += " ; ".join(peers) + " ; "
+    command += " ccnr 2>&1 "
+
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "depends", depends)
+    ec.set(app, "sources", sources)
+    ec.set(app, "install", install)
+    ec.set(app, "build", build)
+    ec.set(app, "env", env)
+    ec.set(app, "command", command)
+
+    return app
+
+def add_publish(ec, movie):
+    env = "PATH=$PATH:${EXP_HOME}/ccnx/bin"
+    command = "ccnseqwriter -r ccnx:/VIDEO"
+
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "stdin", movie)
+    ec.set(app, "env", env)
+    ec.set(app, "command", command)
+
+    return app
+
+def add_stream(ec):
+    env = "PATH=$PATH:${EXP_HOME}/ccnx/bin"
+    command = "sudo -S dbus-uuidgen --ensure ; ( ccncat ccnx:/VIDEO | vlc - ) 2>&1"
+
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "depends", "vlc")
+    ec.set(app, "forwardX11", True)
+    ec.set(app, "env", env)
+    ec.set(app, "command", command)
+
+    return app
+
+def get_options():
+    slicename = os.environ.get("PL_SLICE")
+
+    usage = "usage: %prog -s <pl-slice> -u <user-2> -m <movie> -l <exp-id>"
+
+    parser = OptionParser(usage=usage)
+    parser.add_option("-s", "--pl-slice", dest="pl_slice", 
+            help="PlanetLab slicename", default=slicename, type="str")
+    parser.add_option("-u", "--user-2", dest="user2", 
+            help="User for non PlanetLab machine", type="str")
+    parser.add_option("-m", "--movie", dest="movie", 
+            help="Stream movie", type="str")
+    parser.add_option("-l", "--exp-id", dest="exp_id", 
+            help="Label to identify experiment", type="str")
+
+    (options, args) = parser.parse_args()
+
+    if not options.movie:
+        parser.error("movie is a required argument")
+
+    return (options.pl_slice, options.user2, options.movie, options.exp_id)
+
+if __name__ == '__main__':
+    ( pl_slice, user2, movie, exp_id ) = get_options()
+
+    # Search for available RMs
+    populate_factory()
+    
+    #host1 = 'nepi2.pl.sophia.inria.fr'
+    host1 = 'planetlab2.u-strasbg.fr'
+    host2 = 'roseval.pl.sophia.inria.fr'
+
+    ec = ExperimentController(exp_id = exp_id)
+
+    node1 = add_node(ec, host1, pl_slice)
+    
+    peers = [host2]
+    ccnd1 = add_ccnd(ec, "f12", peers)
+
+    ec.register_connection(ccnd1, node1)
+
+    pub = add_publish(ec, movie)
+    ec.register_connection(pub, node1)
+    # The movie can only be published after ccnd is running
+    ec.register_condition(pub, ResourceAction.START, 
+            ccnd1, ResourceState.STARTED)
+    
+    node2 = add_node(ec, host2, user2)
+    peers = [host1]
+    ccnd2 = add_ccnd(ec, "ubuntu", peers)
+    ec.register_connection(ccnd2, node2)
+     
+    stream = add_stream(ec)
+    ec.register_connection(stream, node2)
+    # The stream can only be retrieved after ccnd is running
+    ec.register_condition(stream, ResourceAction.START, 
+            ccnd2, ResourceState.STARTED)
+    # And also, the stream can only be retrieved after it was published
+    ec.register_condition(stream, ResourceAction.START, 
+            pub, ResourceState.STARTED)
+    ec.deploy()
+
+    apps = [ccnd1, pub, ccnd2, stream]
+    ec.wait_finished(apps)
+
+    ec.shutdown()
+
diff --git a/examples/linux/scalability.py b/examples/linux/scalability.py
new file mode 100644 (file)
index 0000000..903e6b9
--- /dev/null
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+from nepi.execution.ec import ExperimentController, ECState 
+from nepi.execution.resource import ResourceState, ResourceAction, \
+        populate_factory
+
+from optparse import OptionParser, SUPPRESS_HELP
+
+import os
+import time
+
+def add_node(ec, host, user):
+    node = ec.register_resource("LinuxNode")
+    ec.set(node, "hostname", host)
+    ec.set(node, "username", user)
+    ec.set(node, "cleanHome", True)
+    ec.set(node, "cleanProcesses", True)
+    return node
+
+def add_app(ec):
+    app = ec.register_resource("LinuxApplication")
+    ec.set(app, "command", "sleep 30; echo 'HOLA'")
+    return app
+
+def get_options():
+    slicename = os.environ.get("PL_SLICE")
+
+    usage = "usage: %prog -s <pl-slice>"
+
+    parser = OptionParser(usage=usage)
+    parser.add_option("-s", "--pl-slice", dest="pl_slice", 
+            help="PlanetLab slicename", default=slicename, type="str")
+    parser.add_option("-l", "--exp-id", dest="exp_id", 
+            help="Label to identify experiment", type="str")
+
+    (options, args) = parser.parse_args()
+
+    return (options.pl_slice, options.exp_id)
+
+if __name__ == '__main__':
+    ( pl_slice, exp_id ) = get_options()
+
+    # Search for available RMs
+    populate_factory()
+    
+    apps = []
+  
+    hostnames = [
+             "planetlab-2.research.netlab.hut.fi",
+             "planetlab2.willab.fi",
+             "planetlab3.hiit.fi",
+             "planetlab4.hiit.fi",
+             "planetlab1.willab.fi",
+             "planetlab1.s3.kth.se",
+             "itchy.comlab.bth.se",
+             "planetlab-1.ida.liu.se",
+             "planetlab2.s3.kth.se",
+             "planetlab1.sics.se",
+             "planetlab1.tlm.unavarra.es",
+             "planetlab2.uc3m.es",
+             "planetlab1.uc3m.es",
+             "planetlab2.um.es",
+             "planet1.servers.ua.pt",
+             "planetlab2.fct.ualg.pt",
+             "planetlab-1.tagus.ist.utl.pt",
+             "planetlab-2.tagus.ist.utl.pt",
+             "planetlab-um00.di.uminho.pt",
+             "planet2.servers.ua.pt",
+             "planetlab1.mini.pw.edu.pl",
+             "roti.mimuw.edu.pl",
+             "planetlab1.ci.pwr.wroc.pl",
+             "planetlab1.pjwstk.edu.pl",
+             "ple2.tu.koszalin.pl",
+             "planetlab2.ci.pwr.wroc.pl",
+             "planetlab2.cyfronet.pl",
+             "plab2.ple.silweb.pl",
+             "planetlab1.cyfronet.pl",
+             "plab4.ple.silweb.pl",
+             "ple2.dmcs.p.lodz.pl",
+             "planetlab2.pjwstk.edu.pl",
+             "ple1.dmcs.p.lodz.pl",
+             "gschembra3.diit.unict.it",
+             "planetlab1.science.unitn.it",
+             "planetlab-1.ing.unimo.it",
+             "gschembra4.diit.unict.it",
+             "iraplab1.iralab.uni-karlsruhe.de",
+             "planetlab-1.fokus.fraunhofer.de",
+             "iraplab2.iralab.uni-karlsruhe.de",
+             "planet2.zib.de",
+             "pl2.uni-rostock.de",
+             "onelab-1.fhi-fokus.de",
+             "planet2.l3s.uni-hannover.de",
+             "planetlab1.exp-math.uni-essen.de",
+             "planetlab-2.fokus.fraunhofer.de",
+             "planetlab02.tkn.tu-berlin.de",
+             "planetlab1.informatik.uni-goettingen.de",
+             "planetlab1.informatik.uni-erlangen.de",
+             "planetlab2.lkn.ei.tum.de",
+             "planetlab1.wiwi.hu-berlin.de",
+             "planet1.l3s.uni-hannover.de",
+             "planetlab1.informatik.uni-wuerzburg.de",
+             "host3-plb.loria.fr",
+             "inriarennes1.irisa.fr",
+             "inriarennes2.irisa.fr",
+             "peeramide.irisa.fr",
+             "planetlab-1.imag.fr",
+             "planetlab-2.imag.fr",
+             "ple2.ipv6.lip6.fr",
+             "planetlab1.u-strasbg.fr",
+             "planetlab1.ionio.gr",
+             "planetlab2.ionio.gr",
+             "stella.planetlab.ntua.gr",
+             "vicky.planetlab.ntua.gr",
+             "planetlab1.cs.uoi.gr",
+             "pl002.ece.upatras.gr",
+             "planetlab04.cnds.unibe.ch",
+             "lsirextpc01.epfl.ch",
+             "planetlab2.csg.uzh.ch",
+             "planetlab1.csg.uzh.ch",
+             "planetlab-2.cs.unibas.ch",
+             "planetlab-1.cs.unibas.ch",
+             "planetlab4.cs.st-andrews.ac.uk",
+             "planetlab3.xeno.cl.cam.ac.uk",
+             "planetlab1.xeno.cl.cam.ac.uk",
+             "planetlab2.xeno.cl.cam.ac.uk",
+             "planetlab3.cs.st-andrews.ac.uk",
+             "planetlab1.aston.ac.uk",
+             "planetlab1.nrl.eecs.qmul.ac.uk",
+             "chimay.infonet.fundp.ac.be",
+             "orval.infonet.fundp.ac.be",
+             "rochefort.infonet.fundp.ac.be",
+            ]
+    ec = ExperimentController(exp_id = exp_id)
+
+    for host in hostnames:
+        node = add_node(ec, host, pl_slice)
+        for i in xrange(20):
+            app = add_app(ec)
+            ec.register_connection(app, node)
+            apps.append(app)
+
+    ec.deploy()
+
+    ec.wait_finished(apps)
+
+    ec.shutdown()
diff --git a/examples/manual_vlc_experiment_plexus.py b/examples/manual_vlc_experiment_plexus.py
new file mode 100644 (file)
index 0000000..fc449c7
--- /dev/null
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceFactory
+from nepi.execution.ec import ExperimentController
+
+from nepi.resources.omf.omf_node import OMFNode
+from nepi.resources.omf.omf_application import OMFApplication
+from nepi.resources.omf.omf_interface import OMFWifiInterface
+from nepi.resources.omf.omf_channel import OMFChannel
+
+import logging
+import time
+
+logging.basicConfig()
+
+# Create the EC
+ec = ExperimentController()
+
+# Register the different RM that will be used
+ResourceFactory.register_type(OMFNode)
+ResourceFactory.register_type(OMFWifiInterface)
+ResourceFactory.register_type(OMFChannel)
+ResourceFactory.register_type(OMFApplication)
+
+# Create and Configure the Nodes
+guid = ec.register_resource("OMFNode")
+node1 = ec.get_resource(guid)
+node1.set('hostname', 'omf.plexus.wlab17')
+node1.set('xmppSlice', "nepi")
+node1.set('xmppHost', "xmpp-plexus.onelab.eu")
+node1.set('xmppPort', "5222")
+node1.set('xmppPassword', "1234")
+
+guid = ec.register_resource("OMFNode")
+node2 = ec.get_resource(guid)
+node2.set('hostname', "omf.plexus.wlab37")
+node2.set('xmppSlice', "nepi")
+node2.set('xmppHost', "xmpp-plexus.onelab.eu")
+node2.set('xmppPort', "5222")
+node2.set('xmppPassword', "1234")
+
+# Create and Configure the Interfaces
+guid = ec.register_resource("OMFWifiInterface")
+iface1 = ec.get_resource(guid)
+iface1.set('alias', "w0")
+iface1.set('mode', "adhoc")
+iface1.set('type', "g")
+iface1.set('essid', "helloworld")
+iface1.set('ip', "10.0.0.17")
+iface1.set('xmppSlice', "nepi")
+iface1.set('xmppHost', "xmpp-plexus.onelab.eu")
+iface1.set('xmppPort', "5222")
+iface1.set('xmppPassword', "1234")
+
+guid = ec.register_resource("OMFWifiInterface")
+iface2 = ec.get_resource(guid)
+iface2.set('alias', "w0")
+iface2.set('mode', "adhoc")
+iface2.set('type', 'g')
+iface2.set('essid', "helloworld")
+iface2.set('ip', "10.0.0.37")
+iface2.set('xmppSlice', "nepi")
+iface2.set('xmppHost', "xmpp-plexus.onelab.eu")
+iface2.set('xmppPort', "5222")
+iface2.set('xmppPassword', "1234")
+
+# Create and Configure the Channel
+guid = ec.register_resource("OMFChannel")
+channel = ec.get_resource(guid)
+channel.set('channel', "6")
+channel.set('xmppSlice', "nepi")
+channel.set('xmppHost', "xmpp-plexus.onelab.eu")
+channel.set('xmppPort', "5222")
+channel.set('xmppPassword', "1234")
+
+# Create and Configure the Application
+guid = ec.register_resource("OMFApplication")
+app1 = ec.get_resource(guid)
+app1.set('appid', 'Vlc#1')
+app1.set('path', "/opt/vlc-1.1.13/cvlc")
+app1.set('args', "/opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
+app1.set('env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
+app1.set('xmppSlice', "nepi")
+app1.set('xmppHost', "xmpp-plexus.onelab.eu")
+app1.set('xmppPort', "5222")
+app1.set('xmppPassword', "1234")
+
+guid = ec.register_resource("OMFApplication")
+app2 = ec.get_resource(guid)
+app2.set('appid', 'Vlc#2')
+app2.set('path', "/opt/vlc-1.1.13/cvlc")
+app2.set('args', "rtp://10.0.0.37:1234")
+app2.set('env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
+app2.set('xmppSlice', "nepi")
+app2.set('xmppHost', "xmpp-plexus.onelab.eu")
+app2.set('xmppPort', "5222")
+app2.set('xmppPassword', "1234")
+
+guid = ec.register_resource("OMFApplication")
+app3 = ec.get_resource(guid)
+app3.set('appid', 'Kill#2')
+app3.set('path', "/usr/bin/killall")
+app3.set('args', "vlc")
+app3.set('env', " ")
+app3.set('xmppSlice', "nepi")
+app3.set('xmppHost', "xmpp-plexus.onelab.eu")
+app3.set('xmppPort', "5222")
+app3.set('xmppPassword', "1234")
+
+# Connection
+app3.connect(node1.guid)
+node1.connect(app3.guid)
+
+app1.connect(node1.guid)
+node1.connect(app1.guid)
+
+node1.connect(iface1.guid)
+iface1.connect(node1.guid)
+
+iface1.connect(channel.guid)
+channel.connect(iface1.guid)
+
+channel.connect(iface2.guid)
+iface2.connect(channel.guid)
+
+iface2.connect(node2.guid)
+node2.connect(iface2.guid)
+
+node2.connect(app2.guid)
+app2.connect(node2.guid)
+
+# Local Deploy
+node1.deploy()
+node2.deploy()
+iface1.deploy()
+iface2.deploy()
+channel.deploy()
+app1.deploy()
+app2.deploy()
+app3.deploy()
+
+# Start the Nodes
+node1.start()
+node2.start()
+time.sleep(2)
+
+# Start the Interfaces
+iface1.start()
+iface2.start()
+
+# Start the Channel
+time.sleep(2)
+channel.start()
+time.sleep(2)
+
+# Start the Application
+app1.start()
+time.sleep(2)
+app2.start()
+
+time.sleep(20)
+
+# Stop the Application
+app1.stop()
+app2.stop()
+time.sleep(1)
+app3.start()
+time.sleep(2)
+
+# Stop Experiment
+ec.shutdown()
diff --git a/examples/netns_routing_experiment.py b/examples/netns_routing_experiment.py
deleted file mode 100644 (file)
index f9f15ff..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2 --- n3
-# 0.1  0.2  1.1  1.2
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import tempfile
-import time
-
-user = getpass.getuser()
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "netns"
-netns_provider = FactoriesProvider(testbed_id)
-netns_desc = exp_desc.add_testbed_description(netns_provider)
-netns_desc.set_attribute_value("homeDirectory", root_dir)
-#netns_desc.set_attribute_value("enableDebug", True)
-
-node1 = netns_desc.create("Node")
-node2 = netns_desc.create("Node")
-node3 = netns_desc.create("Node")
-
-iface12 = netns_desc.create("P2PNodeInterface")
-iface12.set_attribute_value("up", True)
-node1.connector("devs").connect(iface12.connector("node"))
-
-iface21 = netns_desc.create("P2PNodeInterface")
-iface21.set_attribute_value("up", True)
-node2.connector("devs").connect(iface21.connector("node"))
-
-iface23 = netns_desc.create("P2PNodeInterface")
-iface23.set_attribute_value("up", True)
-node2.connector("devs").connect(iface23.connector("node"))
-
-iface32 = netns_desc.create("P2PNodeInterface")
-iface32.set_attribute_value("up", True)
-node3.connector("devs").connect(iface32.connector("node"))
-
-iface12.connector("p2p").connect(iface21.connector("p2p"))
-iface23.connector("p2p").connect(iface32.connector("p2p"))
-
-ip12 = iface12.add_address()
-ip12.set_attribute_value("Address", "192.168.0.1")
-ip12.set_attribute_value("NetPrefix", 30)
-
-ip21 = iface21.add_address()
-ip21.set_attribute_value("Address", "192.168.0.2")
-ip21.set_attribute_value("NetPrefix", 30)
-
-ip23 = iface23.add_address()
-ip23.set_attribute_value("Address", "192.168.1.1")
-ip23.set_attribute_value("NetPrefix", 30)
-
-ip32 = iface32.add_address()
-ip32.set_attribute_value("Address", "192.168.1.2")
-ip32.set_attribute_value("NetPrefix", 30)
-
-route1 = node1.add_route()
-route1.set_attribute_value("Destination", "192.168.1.0")
-route1.set_attribute_value("NetPrefix", 30)
-route1.set_attribute_value("NextHop", "192.168.0.2")
-
-route2 = node3.add_route()
-route2.set_attribute_value("Destination", "192.168.0.0")
-route2.set_attribute_value("NetPrefix", 30)
-route2.set_attribute_value("NextHop", "192.168.1.1")
-
-app1 = netns_desc.create("Application")
-app1.set_attribute_value("command", "ping -qc 3 192.168.1.2")
-app1.set_attribute_value("user", user)
-app1.connector("node").connect(node1.connector("apps"))
-
-app1.enable_trace("stdout")
-
-app2 = netns_desc.create("Application")
-app2.set_attribute_value("command", "ping -qc 3 192.168.0.1")
-app2.set_attribute_value("user", user)
-app2.connector("node").connect(node3.connector("apps"))
-
-app2.enable_trace("stdout")
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-while not (controller.is_finished(app1.guid) and \
-        controller.is_finished(app2.guid)):
-    time.sleep(0.5)
-
-result1 = controller.trace(app1.guid, "stdout")
-result2 = controller.trace(app2.guid, "stdout")
-
-controller.stop()
-controller.shutdown()
-
-print result1
-print result2
diff --git a/examples/netns_xterm_experiment.py b/examples/netns_xterm_experiment.py
deleted file mode 100644 (file)
index b6f2bad..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import tempfile
-import time
-
-user = getpass.getuser()
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "netns"
-netns_provider = FactoriesProvider(testbed_id)
-netns_desc = exp_desc.add_testbed_description(netns_provider)
-netns_desc.set_attribute_value("homeDirectory", root_dir)
-#netns_desc.set_attribute_value("enableDebug", True)
-
-node1 = netns_desc.create("Node")
-node1.set_attribute_value("forward_X11", True)
-node2 = netns_desc.create("Node")
-node2.set_attribute_value("forward_X11", True)
-
-iface12 = netns_desc.create("P2PNodeInterface")
-iface12.set_attribute_value("up", True)
-node1.connector("devs").connect(iface12.connector("node"))
-
-iface21 = netns_desc.create("P2PNodeInterface")
-iface21.set_attribute_value("up", True)
-node2.connector("devs").connect(iface21.connector("node"))
-
-iface12.connector("p2p").connect(iface21.connector("p2p"))
-
-ip12 = iface12.add_address()
-ip12.set_attribute_value("Address", "192.168.0.1")
-ip12.set_attribute_value("NetPrefix", 30)
-
-ip21 = iface21.add_address()
-ip21.set_attribute_value("Address", "192.168.0.2")
-ip21.set_attribute_value("NetPrefix", 30)
-
-app1 = netns_desc.create("Application")
-app1.set_attribute_value("command", "xterm")
-app1.set_attribute_value("user", user)
-app1.connector("node").connect(node1.connector("apps"))
-
-app2 = netns_desc.create("Application")
-app2.set_attribute_value("command", "xterm")
-app2.set_attribute_value("user", user)
-app2.connector("node").connect(node2.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-while not (controller.is_finished(app1.guid) and \
-        controller.is_finished(app2.guid)):
-    time.sleep(0.5)
-
-controller.stop()
-controller.shutdown()
diff --git a/examples/ns3_runtime_attribute_change.py b/examples/ns3_runtime_attribute_change.py
deleted file mode 100644 (file)
index 7481888..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import tempfile
-import time
-
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "ns3"
-ns3_provider = FactoriesProvider(testbed_id)
-ns3_desc = exp_desc.add_testbed_description(ns3_provider)
-ns3_desc.set_attribute_value("homeDirectory", root_dir)
-ns3_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-ns3_desc.set_attribute_value("ChecksumEnabled", True)
-
-node1 = ns3_desc.create("ns3::Node")
-ipv41 = ns3_desc.create("ns3::Ipv4L3Protocol")
-arp1  = ns3_desc.create("ns3::ArpL3Protocol")
-icmp1 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-node1.connector("protos").connect(ipv41.connector("node"))
-node1.connector("protos").connect(arp1.connector("node"))
-node1.connector("protos").connect(icmp1.connector("node"))
-iface1 = ns3_desc.create("ns3::PointToPointNetDevice")
-queue1 = ns3_desc.create("ns3::DropTailQueue")
-node1.connector("devs").connect(iface1.connector("node"))
-iface1.connector("queue").connect(queue1.connector("dev"))
-trace1 = iface1.enable_trace("P2PAsciiTrace")
-ip1 = iface1.add_address()
-ip1.set_attribute_value("Address", "10.0.0.1")
-
-node2 = ns3_desc.create("ns3::Node")
-ipv42 = ns3_desc.create("ns3::Ipv4L3Protocol")
-arp2  = ns3_desc.create("ns3::ArpL3Protocol")
-icmp2 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-node2.connector("protos").connect(ipv42.connector("node"))
-node2.connector("protos").connect(arp2.connector("node"))
-node2.connector("protos").connect(icmp2.connector("node"))
-iface2 = ns3_desc.create("ns3::PointToPointNetDevice")
-queue2 = ns3_desc.create("ns3::DropTailQueue")
-node2.connector("devs").connect(iface2.connector("node"))
-iface2.connector("queue").connect(queue2.connector("dev"))
-trace2 = iface2.enable_trace("P2PAsciiTrace")
-ip2 = iface2.add_address()
-ip2.set_attribute_value("Address", "10.0.0.2")
-
-chan = ns3_desc.create("ns3::PointToPointChannel")
-chan.set_attribute_value("Delay", "0ns")
-iface1.connector("chan").connect(chan.connector("dev2"))
-iface2.connector("chan").connect(chan.connector("dev2"))
-
-app = ns3_desc.create("ns3::V4Ping")
-app.set_attribute_value("Remote", "10.0.0.2")
-app.set_attribute_value("Verbose", True)
-app.set_attribute_value("StartTime", "0s")
-app.set_attribute_value("StopTime", "20s")
-
-app.connector("node").connect(node1.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-
-time.sleep(5)
-
-controller.set(chan.guid, "Delay", "10s")
-
-time.sleep(5)
-
-controller.set(chan.guid, "Delay", "0s")
-
-while not controller.is_finished(app.guid):
-    time.sleep(0.5)
-
-controller.stop()
-controller.shutdown()
diff --git a/examples/ns3_wifi_hidden_terminal.py b/examples/ns3_wifi_hidden_terminal.py
deleted file mode 100644 (file)
index 4a1cc39..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2 --- n3
-# 
-#
-# This example is based on the ns-3 wifi-hidden-terminal.cc example.
-#
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import tempfile
-import time
-
-def create_node(ns3_desc):
-    node = ns3_desc.create("ns3::Node")
-    ipv4 = ns3_desc.create("ns3::Ipv4L3Protocol")
-    arp  = ns3_desc.create("ns3::ArpL3Protocol")
-    icmp = ns3_desc.create("ns3::Icmpv4L4Protocol")
-    udp = ns3_desc.create("ns3::UdpL4Protocol")
-    tcp = ns3_desc.create("ns3::TcpL4Protocol")
-    node.connector("protos").connect(ipv4.connector("node"))
-    node.connector("protos").connect(arp.connector("node"))
-    node.connector("protos").connect(icmp.connector("node"))
-    node.connector("protos").connect(udp.connector("node"))
-    node.connector("protos").connect(tcp.connector("node"))
-    mobility = ns3_desc.create("ns3::ConstantPositionMobilityModel")
-    node.connector("mobility").connect(mobility.connector("node"))
-    
-    return (node, mobility)
-
-def create_wifi_device(ns3_desc, node, channel):
-    dev = ns3_desc.create("ns3::WifiNetDevice")
-    node.connector("devs").connect(dev.connector("node"))
-    manager = ns3_desc.create("ns3::ConstantRateWifiManager")
-    manager.set_attribute_value("DataMode", "DsssRate2Mbps")
-    manager.set_attribute_value("ControlMode", "DsssRate1Mbps")
-    dev.connector("manager").connect(manager.connector("dev"))
-
-    mac = ns3_desc.create("ns3::AdhocWifiMac")
-    mac.set_attribute_value("Standard",  "WIFI_PHY_STANDARD_80211b")
-    mac.set_attribute_value("QosSupported", False)
-    dev.connector("mac").connect(mac.connector("dev"))
-
-    phy = ns3_desc.create("ns3::YansWifiPhy")
-    phy.set_attribute_value("Standard",  "WIFI_PHY_STANDARD_80211b")
-    dev.connector("phy").connect(phy.connector("dev"))
-    channel.connector("phys").connect(phy.connector("chan"))
-    error = ns3_desc.create("ns3::NistErrorRateModel")
-    phy.connector("err").connect(error.connector("phy"))
-    return dev
-
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "ns3"
-ns3_provider = FactoriesProvider(testbed_id)
-ns3_desc = exp_desc.add_testbed_description(ns3_provider)
-ns3_desc.set_attribute_value("homeDirectory", root_dir)
-#ns3_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-#ns3_desc.set_attribute_value("ChecksumEnabled", True)
-
-# 0. Enable or disable CTS/RTS
-# ??
-
-# 1 & 2 & 6. Create 3 nodes with their mobility models, and Install TCP/IP stack & assign IP addresses
-(node1, mob1) = create_node(ns3_desc)
-(node2, mob2) = create_node(ns3_desc)
-(node3, mob3) = create_node(ns3_desc)
-
-#  3. Create propagation loss matrix
-matrix = ns3_desc.create("ns3::MatrixPropagationLossModel")
-matrix.set_attribute_value("DefaultLoss", 200.0)
-
-mp1 = ns3_desc.create("ns3::Nepi::MobilityPair")
-mp1.connector("matrix").connect(matrix.connector("mobpair"))
-mp1.set_attribute_value("Loss", 50.0)
-mp1.connector("ma").connect(mob1.connector("mp"))
-mp1.connector("mb").connect(mob2.connector("mp"))
-
-mp2 = ns3_desc.create("ns3::Nepi::MobilityPair")
-mp2.connector("matrix").connect(matrix.connector("mobpair"))
-mp2.set_attribute_value("Loss", 50.0)
-mp2.connector("ma").connect(mob3.connector("mp"))
-mp2.connector("mb").connect(mob2.connector("mp"))
-
-# 4. Create & setup wifi channel
-channel = ns3_desc.create("ns3::YansWifiChannel")
-channel.connector("loss").connect(matrix.connector("chan"))
-delay = ns3_desc.create("ns3::ConstantSpeedPropagationDelayModel")
-channel.connector("delay").connect(delay.connector("chan"))
-
-# 5. Install wireless devices
-
-dev1 = create_wifi_device(ns3_desc, node1, channel)
-ip1 = dev1.add_address()
-ip1.set_attribute_value("Address", "10.0.0.1")
-ip1.set_attribute_value("NetPrefix", 8)
-
-dev2 = create_wifi_device(ns3_desc, node2, channel)
-ip2 = dev2.add_address()
-ip2.set_attribute_value("Address", "10.0.0.2")
-ip2.set_attribute_value("NetPrefix", 8)
-
-dev3 = create_wifi_device(ns3_desc, node3, channel)
-ip3 = dev3.add_address()
-ip3.set_attribute_value("Address", "10.0.0.3")
-ip3.set_attribute_value("NetPrefix", 8)
-
-app = ns3_desc.create("ns3::V4Ping")
-app.set_attribute_value("Remote", "10.0.0.2")
-app.set_attribute_value("Verbose", True)
-app.set_attribute_value("StartTime", "0s")
-app.set_attribute_value("StopTime", "20s")
-
-app.connector("node").connect(node1.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-
-while not controller.is_finished(app.guid):
-    time.sleep(0.5)
-
-controller.stop()
-controller.shutdown()
diff --git a/examples/omf-iminds-vlc.py b/examples/omf-iminds-vlc.py
deleted file mode 100644 (file)
index d3a8ec0..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import logging
-import tempfile
-import time
-
-logging.basicConfig(level=logging.DEBUG)
-
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "omf"
-omf_provider = FactoriesProvider(testbed_id)
-omf_desc = exp_desc.add_testbed_description(omf_provider)
-omf_desc.set_attribute_value("homeDirectory", root_dir)
-omf_desc.set_attribute_value("enableDebug", True)
-omf_desc.set_attribute_value("xmppSlice", "default_slice_iMinds")
-omf_desc.set_attribute_value("xmppHost", "xmpp.ilabt.iminds.be")
-omf_desc.set_attribute_value("xmppPort", 5222)
-omf_desc.set_attribute_value("xmppPassword", "1234")
-
-# Add node1
-node1 = omf_desc.create("Node")
-node1.set_attribute_value("hostname", "omf.ibbt.open.node3")
-
-# Add configuration for interface 1
-iface1 = omf_desc.create("WifiInterface")
-iface1.set_attribute_value("alias", "w0")
-iface1.set_attribute_value("mode", "adhoc")
-iface1.set_attribute_value("channel", "6")
-iface1.set_attribute_value("type", "g")
-iface1.set_attribute_value("essid", "cvlcmode")
-iface1.set_attribute_value("ip", "192.168.0.3")
-node1.connector("devs").connect(iface1.connector("node"))
-
-# Add multicast route to node 1
-route1 = node1.add_route()
-route1.set_attribute_value("Destination", "224.0.0.0")
-route1.set_attribute_value("NetPrefix", 4)
-route1.set_attribute_value("Device", "wlan0")
-
-# Add node2
-node2 = omf_desc.create("Node")
-node2.set_attribute_value("hostname", "omf.ibbt.open.node4")
-
-# Add configuration for interface 2
-iface2 = omf_desc.create("WifiInterface")
-iface2.set_attribute_value("alias", "w0")
-iface2.set_attribute_value("mode", "adhoc")
-iface2.set_attribute_value("channel", "6")
-iface2.set_attribute_value("type", "g")
-iface2.set_attribute_value("essid", "cvlcmode")
-iface2.set_attribute_value("ip", "192.168.0.4")
-node2.connector("devs").connect(iface2.connector("node"))
-
-# Add multicast route to node 2
-route2 = node2.add_route()
-route2.set_attribute_value("Destination", "224.0.0.0")
-route2.set_attribute_value("NetPrefix", 4)
-route2.set_attribute_value("Device", "wlan0")
-
-# Add a channel
-channel = omf_desc.create("Channel")
-channel.set_attribute_value("mode", "adhoc")
-channel.set_attribute_value("channel", "6")
-channel.set_attribute_value("type", "g")
-channel.set_attribute_value("essid", "cvlcmode")
-channel.connector("devs").connect(iface1.connector("chan"))
-channel.connector("devs").connect(iface2.connector("chan"))
-
-# Add a vlc server to stream a video using multicast
-app1 = omf_desc.create("OmfApplication")
-app1.set_attribute_value("appId", "Vlc#1")
-app1.set_attribute_value("arguments", "/opt/bbb_240p_mpeg4_lq.ts --sout '#rtp{dst=239.255.0.1,port=1234,mux=ts}' vlc://quit")
-app1.set_attribute_value("path", "/opt/vlc-1.1.13/cvlc")
-app1.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app1.connector("node").connect(node1.connector("apps"))
-
-# Add a vlc client to receive the video stream
-app2 = omf_desc.create("OmfApplication")
-app2.set_attribute_value("appId", "Vlc#2")
-app2.set_attribute_value("arguments", "rtp://239.255.0.1:1234")
-app2.set_attribute_value("path", "/opt/vlc-1.1.13/cvlc")
-# To see the stream to a ssh -X connection, the DISPLAY variable must be set to the value of the node.
-# Also don't forget to execute in 'xhost + localhost' in the node
-app2.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app2.connector("node").connect(node2.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-#while not (controller.is_finished(app1.guid) and \
-#        controller.is_finished(app2.guid)):
-#    time.sleep(0.5)
-
-time.sleep(10)
-
-#controller.set(iface2.guid, "channel", "1")
-
-#time.sleep(5)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/omf-nitos-vlc.py b/examples/omf-nitos-vlc.py
deleted file mode 100644 (file)
index 33f13d3..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import logging
-import tempfile
-import time
-
-logging.basicConfig(level=logging.DEBUG)
-
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "omf"
-omf_provider = FactoriesProvider(testbed_id)
-omf_desc = exp_desc.add_testbed_description(omf_provider)
-omf_desc.set_attribute_value("homeDirectory", root_dir)
-omf_desc.set_attribute_value("enableDebug", True)
-omf_desc.set_attribute_value("xmppSlice", "tribino")
-omf_desc.set_attribute_value("xmppHost", "nitlab.inf.uth.gr")
-omf_desc.set_attribute_value("xmppPort", 5222)
-omf_desc.set_attribute_value("xmppPassword", "1234")
-
-# Add node1
-node1 = omf_desc.create("Node")
-node1.set_attribute_value("hostname", "omf.nitos.node019")
-
-# Add configuration for interface 1
-iface1 = omf_desc.create("WifiInterface")
-iface1.set_attribute_value("alias", "w0")
-iface1.set_attribute_value("mode", "adhoc")
-iface1.set_attribute_value("channel", "6")
-iface1.set_attribute_value("type", "g")
-iface1.set_attribute_value("essid", "cvlcmode")
-iface1.set_attribute_value("ip", "192.168.0.19")
-node1.connector("devs").connect(iface1.connector("node"))
-
-# Add multicast route to node 1
-route1 = node1.add_route()
-route1.set_attribute_value("Destination", "224.0.0.0")
-route1.set_attribute_value("NetPrefix", 4)
-route1.set_attribute_value("Device", "wlan0")
-
-# Add node2
-node2 = omf_desc.create("Node")
-node2.set_attribute_value("hostname", "omf.nitos.node020")
-
-# Add configuration for interface 2
-iface2 = omf_desc.create("WifiInterface")
-iface2.set_attribute_value("alias", "w0")
-iface2.set_attribute_value("mode", "adhoc")
-iface2.set_attribute_value("channel", "6")
-iface2.set_attribute_value("type", "g")
-iface2.set_attribute_value("essid", "cvlcmode")
-iface2.set_attribute_value("ip", "192.168.0.20")
-node2.connector("devs").connect(iface2.connector("node"))
-
-# Add multicast route to node 2
-route2 = node2.add_route()
-route2.set_attribute_value("Destination", "224.0.0.0")
-route2.set_attribute_value("NetPrefix", 4)
-route2.set_attribute_value("Device", "wlan0")
-
-# Add a channel
-channel = omf_desc.create("Channel")
-channel.set_attribute_value("mode", "adhoc")
-channel.set_attribute_value("channel", "6")
-channel.set_attribute_value("type", "g")
-channel.set_attribute_value("essid", "cvlcmode")
-channel.connector("devs").connect(iface1.connector("chan"))
-channel.connector("devs").connect(iface2.connector("chan"))
-
-# Add a vlc server to stream a video using multicast
-app1 = omf_desc.create("OmfApplication")
-app1.set_attribute_value("appId", "Vlc#1")
-#app1.set_attribute_value("arguments", "/opt/bbb_240p_mpeg4_lq.ts --sout '#rtp{dst=239.255.0.1,port=1234,mux=ts}' vlc://quit")
-app1.set_attribute_value("arguments", "/opt/10-by-p0d.avi --sout '#rtp{dst=192.168.0.20,port=1234,mux=ts}' vlc://quit")
-app1.set_attribute_value("path", "/opt/vlc-1.1.13/cvlc")
-app1.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app1.connector("node").connect(node1.connector("apps"))
-
-# Add a vlc client to receive the video stream
-app2 = omf_desc.create("OmfApplication")
-app2.set_attribute_value("appId", "Vlc#2")
-#app2.set_attribute_value("arguments", "rtp://239.255.0.1:1234")
-app2.set_attribute_value("arguments", "rtp://192.168.0.20:1234")
-app2.set_attribute_value("path", "/opt/vlc-1.1.13/cvlc")
-# To see the stream to a ssh -X connection, the DISPLAY variable must be set to the value of the node.
-# Also don't forget to execute in 'xhost + localhost' in the node
-app2.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app2.connector("node").connect(node2.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-#while not (controller.is_finished(app1.guid) and \
-#        controller.is_finished(app2.guid)):
-#    time.sleep(0.5)
-
-time.sleep(10)
-
-#controller.set(iface2.guid, "channel", "1")
-
-#time.sleep(5)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/omf-plexuslab-vlc.py b/examples/omf-plexuslab-vlc.py
deleted file mode 100644 (file)
index 7127c1c..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import logging
-import tempfile
-import time
-
-logging.basicConfig(level=logging.DEBUG)
-
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "omf"
-omf_provider = FactoriesProvider(testbed_id)
-omf_desc = exp_desc.add_testbed_description(omf_provider)
-omf_desc.set_attribute_value("homeDirectory", root_dir)
-omf_desc.set_attribute_value("enableDebug", True)
-omf_desc.set_attribute_value("xmppSlice", "nepi")
-omf_desc.set_attribute_value("xmppHost", "xmpp-plexus.onelab.eu")
-omf_desc.set_attribute_value("xmppPort", 5222)
-omf_desc.set_attribute_value("xmppPassword", "1234")
-
-# Add node1
-node1 = omf_desc.create("Node")
-node1.set_attribute_value("hostname", "omf.plexus.wlab17")
-
-# Add configuration for interface 1
-iface1 = omf_desc.create("WifiInterface")
-iface1.set_attribute_value("alias", "w0")
-iface1.set_attribute_value("mode", "adhoc")
-iface1.set_attribute_value("channel", "6")
-iface1.set_attribute_value("type", "g")
-iface1.set_attribute_value("essid", "cvlcmode")
-iface1.set_attribute_value("ip", "10.0.0.17")
-node1.connector("devs").connect(iface1.connector("node"))
-
-# Add multicast route to node 1
-route1 = node1.add_route()
-route1.set_attribute_value("Destination", "224.0.0.0")
-route1.set_attribute_value("NetPrefix", 4)
-route1.set_attribute_value("Device", "wlan0")
-
-# Add node2
-node2 = omf_desc.create("Node")
-node2.set_attribute_value("hostname", "omf.plexus.wlab37")
-
-# Add configuration for interface 2
-iface2 = omf_desc.create("WifiInterface")
-iface2.set_attribute_value("alias", "w0")
-iface2.set_attribute_value("mode", "adhoc")
-iface2.set_attribute_value("channel", "6")
-iface2.set_attribute_value("type", "g")
-iface2.set_attribute_value("essid", "cvlcmode")
-iface2.set_attribute_value("ip", "10.0.0.37")
-node2.connector("devs").connect(iface2.connector("node"))
-
-# Add multicast route to node 2
-route2 = node2.add_route()
-route2.set_attribute_value("Destination", "224.0.0.0")
-route2.set_attribute_value("NetPrefix", 4)
-route2.set_attribute_value("Device", "wlan0")
-
-# Add a channel
-channel = omf_desc.create("Channel")
-channel.set_attribute_value("mode", "adhoc")
-channel.set_attribute_value("channel", "6")
-channel.set_attribute_value("type", "g")
-channel.set_attribute_value("essid", "cvlcmode")
-channel.connector("devs").connect(iface1.connector("chan"))
-channel.connector("devs").connect(iface2.connector("chan"))
-
-# Add a vlc server to stream a video using multicast
-app1 = omf_desc.create("OmfApplication")
-app1.set_attribute_value("appId", "Vlc#1")
-#app1.set_attribute_value("arguments", "/opt/bbb_240p_mpeg4_lq.ts --sout '#rtp{dst=239.255.0.1,port=1234,mux=ts}' vlc://quit")
-app1.set_attribute_value("arguments", "/opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}' vlc://quit")
-app1.set_attribute_value("path", "/opt/vlc-1.1.13/cvlc")
-app1.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app1.connector("node").connect(node1.connector("apps"))
-
-# Add a vlc client to receive the video stream
-app2 = omf_desc.create("OmfApplication")
-app2.set_attribute_value("appId", "Vlc#2")
-#app2.set_attribute_value("arguments", "rtp://239.255.0.1:1234")
-app2.set_attribute_value("arguments", "rtp://10.0.0.37:1234")
-app2.set_attribute_value("path", "/opt/vlc-1.1.13/cvlc")
-# To see the stream to a ssh -X connection, the DISPLAY variable must be set to the value of the node.
-# Also don't forget to execute in 'xhost + localhost' in the node
-app2.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app2.connector("node").connect(node2.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-#while not (controller.is_finished(app1.guid) and \
-#        controller.is_finished(app2.guid)):
-#    time.sleep(0.5)
-
-time.sleep(30)
-
-#controller.set(iface2.guid, "channel", "1")
-
-#time.sleep(5)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/omf-plexuslab-xeyes.py b/examples/omf-plexuslab-xeyes.py
deleted file mode 100644 (file)
index 7e83943..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-#
-# Experiment Topology:
-#
-#  n1 --- n2
-#  0.1   0.2 
-#    
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import logging
-import tempfile
-import time
-
-logging.basicConfig(level=logging.DEBUG)
-
-root_dir = tempfile.mkdtemp()
-
-exp_desc = ExperimentDescription()
-
-testbed_id = "omf"
-omf_provider = FactoriesProvider(testbed_id)
-omf_desc = exp_desc.add_testbed_description(omf_provider)
-omf_desc.set_attribute_value("homeDirectory", root_dir)
-omf_desc.set_attribute_value("enableDebug", True)
-omf_desc.set_attribute_value("xmppSlice", "default_slice")
-omf_desc.set_attribute_value("xmppHost", "xmpp-plexus.onelab.eu")
-omf_desc.set_attribute_value("xmppPort", 5222)
-omf_desc.set_attribute_value("xmppPassword", "1234")
-
-node1 = omf_desc.create("Node")
-node1.set_attribute_value("hostname", "omf.plexus.wlab17")
-
-app1 = omf_desc.create("OmfApplication")
-app1.set_attribute_value("appId", "xeyes#1")
-app1.set_attribute_value("path", "/usr/bin/xeyes")
-app1.set_attribute_value("env", "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
-app1.connector("node").connect(node1.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-
-time.sleep(30)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/planetlab_fix_gpgkeys.py b/examples/planetlab_fix_gpgkeys.py
deleted file mode 100644 (file)
index 5bb2bf0..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import shutil
-import tempfile
-import time
-
-def add_node(pl_desc, pl_inet, hostname):
-    pl_node = pl_desc.create("Node")
-    pl_node.set_attribute_value("hostname", hostname)
-    pl_iface = pl_desc.create("NodeInterface")
-    pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-    pl_node.connector("devs").connect(pl_iface.connector("node"))
-
-    return pl_node
-
-def add_app(pl_desc, pl_node):
-    pl_app = pl_desc.create("Application")
-    pl_app.set_attribute_value("command", "yum reinstall -y --nogpgcheck fedora-release")
-    pl_app.set_attribute_value("sudo", True)
-    pl_app.enable_trace("stderr")
-    pl_app.enable_trace("stdout")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-    
-    return pl_app
-
-root_dir = tempfile.mkdtemp()
-user = getpass.getuser()
-slicename = os.environ["PL_SLICE"]
-plchost = "www.planet-lab.eu"
-port_base = 2000 + (os.getpid() % 1000) * 13
-pl_ssh_key = os.environ.get(
-    "PL_SSH_KEY",
-    "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-pl_user = os.environ.get('PL_USER')
-pl_pwd = os.environ.get('PL_PASS')
-
-exp_desc = ExperimentDescription()
-
-pl_provider = FactoriesProvider("planetlab")
-pl_desc = exp_desc.add_testbed_description(pl_provider)
-pl_desc.set_attribute_value("homeDirectory", root_dir)
-pl_desc.set_attribute_value("slice", slicename)
-pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-pl_desc.set_attribute_value("authUser", pl_user)
-pl_desc.set_attribute_value("authPass", pl_pwd)
-pl_desc.set_attribute_value("plcHost", plchost)
-pl_desc.set_attribute_value("tapPortBase", port_base)
-pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-pl_desc.set_attribute_value("cleanProc", True)
-pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-       
-pl_inet = pl_desc.create("Internet")
-
-hostnames = ["ait21.us.es", "planetlab4.cslab.ece.ntua.gr", "kostis.di.uoa.gr", "pl1.uni-rostock.de" ]
-apps = []
-
-for hostname in hostnames:
-    pl_node = add_node(pl_desc, pl_inet, hostname)
-    pl_app = add_app(pl_desc, pl_node)
-    apps.append(pl_app)
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-
-stop = False
-
-while not stop:
-    time.sleep(0.5)
-
-    stop = True
-    for pl_app in set(apps):
-        if not controller.is_finished(pl_app.guid):
-            stop = False
-            break
-        else:
-            apps.remove(pl_app)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/planetlab_package_install.py b/examples/planetlab_package_install.py
deleted file mode 100644 (file)
index 650a5df..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import shutil
-import tempfile
-import time
-
-def add_node(pl_desc, pl_inet, hostname):
-    pl_node = pl_desc.create("Node")
-    pl_node.set_attribute_value("hostname", hostname)
-    pl_iface = pl_desc.create("NodeInterface")
-    pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-    pl_node.connector("devs").connect(pl_iface.connector("node"))
-
-    return pl_node
-
-def add_dependency(pl_desc, pl_node):
-    pl_dep = pl_desc.create("Dependency")
-    pl_dep.set_attribute_value("depends", "gcc vim emacs")
-    pl_dep.connector("node").connect(pl_node.connector("deps"))
-    
-    return pl_dep
-
-root_dir = tempfile.mkdtemp()
-user = getpass.getuser()
-slicename = os.environ["PL_SLICE"]
-plchost = "www.planet-lab.eu"
-port_base = 2000 + (os.getpid() % 1000) * 13
-pl_ssh_key = os.environ.get(
-    "PL_SSH_KEY",
-    "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-pl_user = os.environ.get('PL_USER')
-pl_pwd = os.environ.get('PL_PASS')
-
-exp_desc = ExperimentDescription()
-
-pl_provider = FactoriesProvider("planetlab")
-pl_desc = exp_desc.add_testbed_description(pl_provider)
-pl_desc.set_attribute_value("homeDirectory", root_dir)
-pl_desc.set_attribute_value("slice", slicename)
-pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-pl_desc.set_attribute_value("authUser", pl_user)
-pl_desc.set_attribute_value("authPass", pl_pwd)
-pl_desc.set_attribute_value("plcHost", plchost)
-pl_desc.set_attribute_value("tapPortBase", port_base)
-pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-pl_desc.set_attribute_value("cleanProc", True)
-pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-       
-pl_inet = pl_desc.create("Internet")
-
-hostnames = ["ait21.us.es", "planetlab4.cslab.ece.ntua.gr", "kostis.di.uoa.gr", "pl1.uni-rostock.de" ]
-deps = []
-
-for hostname in hostnames:
-    pl_node = add_node(pl_desc, pl_inet, hostname)
-    pl_dep = add_dependency(pl_desc, pl_node)
-    deps.append(pl_dep)
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-
-stop = False
-
-while not stop:
-    stop = True
-    for pl_dep in set(deps):
-        if not controller.is_finished(pl_dep.guid):
-            stop = False
-            break
-        else:
-            deps.remove(pl_dep)
-
-    time.sleep(0.5)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/planetlab_simple_experiment.py b/examples/planetlab_simple_experiment.py
deleted file mode 100644 (file)
index a1be197..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import shutil
-import tempfile
-import time
-
-root_dir = tempfile.mkdtemp()
-user = getpass.getuser()
-slicename = os.environ["PL_SLICE"]
-plchost = "www.planet-lab.eu"
-port_base = 2000 + (os.getpid() % 1000) * 13
-pl_ssh_key = os.environ.get(
-    "PL_SSH_KEY",
-    "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-pl_user = os.environ.get('PL_USER')
-pl_pwd = os.environ.get('PL_PASS')
-
-exp_desc = ExperimentDescription()
-
-pl_provider = FactoriesProvider("planetlab")
-pl_desc = exp_desc.add_testbed_description(pl_provider)
-pl_desc.set_attribute_value("homeDirectory", root_dir)
-pl_desc.set_attribute_value("slice", slicename)
-pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-pl_desc.set_attribute_value("authUser", pl_user)
-pl_desc.set_attribute_value("authPass", pl_pwd)
-pl_desc.set_attribute_value("plcHost", plchost)
-pl_desc.set_attribute_value("tapPortBase", port_base)
-pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-pl_desc.set_attribute_value("cleanProc", True)
-pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-       
-pl_inet = pl_desc.create("Internet")
-
-pl_node1 = pl_desc.create("Node")
-pl_iface1 = pl_desc.create("NodeInterface")
-pl_iface1.set_attribute_value("label", "iface1")
-pl_iface1.connector("inet").connect(pl_inet.connector("devs"))
-pl_node1.connector("devs").connect(pl_iface1.connector("node"))
-
-pl_node2 = pl_desc.create("Node")
-pl_iface2 = pl_desc.create("NodeInterface")
-pl_iface2.set_attribute_value("label", "iface2")
-pl_iface2.connector("inet").connect(pl_inet.connector("devs"))
-pl_node2.connector("devs").connect(pl_iface2.connector("node"))
-
-pl_app1 = pl_desc.create("Application")
-pl_app1.set_attribute_value("command", "ping -qc3 {#[iface2].addr[0].[Address]#}")
-pl_app1.enable_trace("stdout")
-pl_app1.connector("node").connect(pl_node1.connector("apps"))
-
-pl_app2 = pl_desc.create("Application")
-pl_app2.set_attribute_value("command", "ping -qc3 {#[iface1].addr[0].[Address]#}")
-pl_app2.enable_trace("stdout")
-pl_app2.connector("node").connect(pl_node2.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-while (not controller.is_finished(pl_app1.guid) or not controller.is_finished(pl_app1.guid)):
-    time.sleep(0.5)
-
-ping_result1 = controller.trace(pl_app1.guid, "stdout")
-print ping_result1
-ping_result2 = controller.trace(pl_app2.guid, "stdout")
-print ping_result2
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/streaming/ccn_broadcast.py b/examples/streaming/ccn_broadcast.py
deleted file mode 100644 (file)
index deb306f..0000000
+++ /dev/null
@@ -1,551 +0,0 @@
-#!/usr/bin/env python
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util.constants import ApplicationStatus as AS
-from optparse import OptionParser, SUPPRESS_HELP
-import os
-import tempfile
-import time
-import uuid
-
-# Trak SIGTERM, and set global termination flag instead of dying
-import signal
-TERMINATE = []
-def _finalize(sig,frame):
-    global TERMINATE
-    TERMINATE.append(None)
-signal.signal(signal.SIGTERM, _finalize)
-signal.signal(signal.SIGINT, _finalize)
-
-class MonitorInfo(object):
-    TYPE_ROOT = "root"
-    TYPE_MID  = "middle"
-    TYPE_LEAF = "leaf"
-
-    def __init__(self, hostname, type):
-        self.hostname = hostname
-        self.type = type
-        self.cpumem_monitor = None
-        self.net_out_monitor = None
-        self.net_in_monitor = None
-        self.ccnd = None
-        self.ccncat = None
-        self.ccnseqwriter = None
-
-def create_slice(exp_desc, slicename, plc_host, pl_user, pl_pwd, 
-        pl_ssh_key, root_dir):
-    pl_provider = FactoriesProvider("planetlab")
-    slice_desc = exp_desc.add_testbed_description(pl_provider)
-    slice_desc.set_attribute_value("homeDirectory", root_dir)
-    slice_desc.set_attribute_value("slice", slicename)
-    slice_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-    slice_desc.set_attribute_value("authUser", pl_user)
-    slice_desc.set_attribute_value("authPass", pl_pwd)
-    slice_desc.set_attribute_value("plcHost", plc_host)
-    # Kills all running processes before starting the experiment
-    slice_desc.set_attribute_value("cleanProc", True)
-    # NOTICE: Setting 'cleanHome' to 'True' will erase all previous
-    # folders in the sliver Home directory, including result files!
-    slice_desc.set_attribute_value("cleanHome", True)
-    slice_desc.set_attribute_value("plLogLevel", "DEBUG")
-    return slice_desc
-def create_node(hostname, pl_inet, slice_desc):
-    pl_node = slice_desc.create("Node")
-    pl_node.set_attribute_value("hostname", hostname)
-    pl_node.set_attribute_value("label", "%d" % pl_node.guid)
-    pl_node.set_attribute_value("operatingSystem", "f12")
-    pl_iface = slice_desc.create("NodeInterface")
-    pl_iface.set_attribute_value("label", "iface_%d" % pl_node.guid)
-    pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-    pl_node.connector("devs").connect(pl_iface.connector("node"))
-    return pl_node, pl_iface
-
-def create_ccnd(pl_node, slice_desc, pl_ifaces, port):
-    pl_app = slice_desc.create("CCNxDaemon")
-    pl_app.set_attribute_value("ccnxVersion", "0.7.1")
-    pl_app.set_attribute_value("repository", True)
-    
-    # We use a wildcard to replace the public IP address of the node during runtime,
-    # once this IP is known
-    routes = "|".join(map(lambda pl_iface: "ccnx:/ udp {#[%s].addr[0].[Address]#}" % 
-        pl_iface.get_attribute_value("label"), pl_ifaces))
-    
-    # Add unicast ccn routes 
-    pl_app.set_attribute_value("ccnRoutes", routes)
-
-    # Use a specific port to bind the CCNx daemon
-    if port:
-        pl_app.set_attribute_value("ccnLocalPort", port)
-
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-    return pl_app
-
-def create_ccnpush(movie, pl_node, slice_desc, port):
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("stdin", movie)
-
-    command = "ccnseqwriter -r ccnx:/VIDEO"
-    if port:
-        command = "CCN_LOCAL_PORT=%d %s " % (port, command)
-
-    pl_app.set_attribute_value("command", command)
-
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-    return pl_app
-
-def create_ccnpull(pl_node, slice_desc, port):
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("rpmFusion", True)
-    pl_app.set_attribute_value("depends", "vlc")
-
-    #command = " sudo -S dbus-uuidgen --ensure ; ccncat ccnx:/VIDEO"
-    command = " ccncat ccnx:/VIDEO"
-    if port:
-        command = "CCN_LOCAL_PORT=%d %s " % (port, command)
-
-    #command += " | vlc -I dummy - vlc://quit > /dev/null "
-    command += " > /dev/null "
-    pl_app.set_attribute_value("command", command)
-    
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_app.connector("node").connect(pl_node.connector("apps"))
-    return pl_app
-
-def create_cpumem_monitor(pl_node, slice_desc):
-    label = "%d_cpumem" % pl_node.guid
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("label", label)
-    pl_app.set_attribute_value("command", 
-            "while true; do echo $(date +%Y%m%d%H%M%S%z) "\
-            " $(top -b -n 1 |  grep 'bash\|python' | sed 's/\s\s*/ /g' | "\
-            " sed 's/^\s//g' | cut -d' ' -f9,10,11 | awk '{ sum1 +=$1; sum2 += $2; } "\
-            " END {printf \"%2.1f %2.1f 0:00.00\", sum1, sum2;}'); sleep 1 ; done ")
-
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_node.connector("apps").connect(pl_app.connector("node"))
-    return pl_app
-
-def create_net_monitor(pl_node, slice_desc, pl_ifaces, lblprefix = "any", pcap=False):
-    label = "%d_%s_net" % (pl_node.guid, lblprefix)
-    hosts = " or ".join(map(lambda pl_iface: " ( host {#[%s].addr[0].[Address]#} ) " % 
-        pl_iface.get_attribute_value("label"), pl_ifaces))
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("label", label)
-    pl_app.set_attribute_value("rpmFusion", True)
-    pl_app.set_attribute_value("sudo", True)
-    pl_app.set_attribute_value("depends", "tcpdump pv")
-
-    output = "/dev/null"
-    if pcap:
-        output = "{#[%s].trace[output].[name]#}" % label
-
-    pl_app.set_attribute_value("command", 
-            "tcpdump -l -i eth0 -s 0 -f '(%s)' -w - | pv -fbt >%s 2>>{#[%s].trace[stdout].[name]#}" %
-            (hosts, output, label))
-
-    if pcap:
-        pl_app.enable_trace("output")
-    
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_node.connector("apps").connect(pl_app.connector("node"))
-    return pl_app
-
-def store_results(controller, monitors, results_dir, exp_label):
-    # create results directory for experiment
-    root_path = os.path.join(results_dir, exp_label)
-
-    print "STORING RESULTS in ", root_path
-
-    try:
-        os.makedirs(root_path)
-    except OSError:
-        pass
-
-    # collect information on nodes
-    hosts_info = ""
-
-    for mon in monitors:
-        hosts_info += "%s %s\n" % (mon.hostname, mon.type)
-
-        # create a subdir per hostname
-        node_path = os.path.join(root_path, mon.hostname)
-        try:
-            os.makedirs(node_path)
-        except OSError:
-            pass
-
-        # store monitoring results
-        cpumem_out = controller.trace(mon.cpumem_monitor.guid, "stdout")
-        
-        net_in = None
-        if mon.net_in_monitor:
-            net_in = controller.trace(mon.net_in_monitor.guid, "stdout")
-        
-        net_out = None
-        if mon.net_out_monitor:
-            net_out = controller.trace(mon.net_out_monitor.guid, "stdout")
-
-        ccnd_err = controller.trace(mon.ccnd.guid, "stderr")
-        ccnd_out = controller.trace(mon.ccnd.guid, "stdout")
-        
-        ccncat_err = None
-        if mon.ccncat:
-            ccncat_err = controller.trace(mon.ccncat.guid, "stderr")
-
-        ccnseqwriter_err = None
-        if mon.ccnseqwriter:
-            ccnseqwriter_err = controller.trace(mon.ccnseqwriter.guid, "stderr")
-        
-        results = dict({
-            "cpumem": cpumem_out, 
-            "net_in": net_in, 
-            "net_out": net_out, 
-            "ccnd_err": ccnd_err, 
-            "ccnd_out": ccnd_out, 
-            "ccncat_err": ccncat_err,
-            "ccnseqwriter_err": ccnseqwriter_err })
-
-        for name, result in results.iteritems():
-            if not result:
-                continue
-
-            fpath = os.path.join(node_path, name)
-            f = open(fpath, "w")
-            f.write(result)
-            f.close()
-
-    # store node info file
-    fpath = os.path.join(root_path, "hosts")
-    f = open(fpath, "w")
-    f.write(hosts_info)
-    f.close()
-
-def get_options():
-    slicename = os.environ.get("PL_SLICE")
-    pl_host = os.environ.get("PL_HOST", "www.planet-lab.eu")
-    pl_ssh_key = os.environ.get(
-        "PL_SSH_KEY",
-        "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-    pl_user = os.environ.get('PL_USER')
-    pl_pwd = os.environ.get('PL_PASS')
-    exp_label = "%s" % uuid.uuid4()
-
-    usage = "usage: %prog -s <pl_slice> -H <pl_host> -k <ssh_key> -u <pl_user> \
--m <movie> -p <pl_password> -r <results-dir> -l <experiment-label> \
--P <ccnd-port>"
-
-    parser = OptionParser(usage=usage)
-    parser.add_option("-s", "--slicename", dest="slicename", 
-            help="PlanetLab slicename", default=slicename, type="str")
-    parser.add_option("-H", "--pl-host", dest="pl_host", 
-            help="PlanetLab site (e.g. www.planet-lab.eu)", 
-            default=pl_host, type="str")
-    parser.add_option("-k", "--ssh-key", dest="pl_ssh_key", 
-            help="Path to private ssh key used for PlanetLab authentication", 
-            default=pl_ssh_key, type="str")
-    parser.add_option("-u", "--pl-user", dest="pl_user", 
-            help="PlanetLab account user (i.e. Registration email address)", 
-            default=pl_user, type="str")
-    parser.add_option("-p", "--pl-pwd", dest="pl_pwd", 
-            help="PlanetLab account password", default=pl_pwd, type="str")
-    parser.add_option("-m", "--movie", dest="movie", 
-            help="Stream movie", type="str")
-    parser.add_option("-r", "--results", dest="results_dir", default = "/tmp", 
-            help="Path to directory to store results", type="str")
-    parser.add_option("-l", "--label", dest="exp_label", default = exp_label, 
-            help="Label to identify experiment results", type="str")
-    parser.add_option("-t", "--time", dest="time_to_run", default = 20, 
-            help="Time to run the experiment in hours", type="float")
-    parser.add_option("-P", "--port", dest="port", 
-            help="Port to bind the CCNx daemon", type="int")
-
-    (options, args) = parser.parse_args()
-
-    if not options.movie:
-        parser.error("movie is a required argument")
-
-    return (options.slicename, options.pl_host, options.pl_user, 
-            options.pl_pwd, options.pl_ssh_key, options.movie,
-            options.results_dir, options.exp_label, options.time_to_run,
-            options.port)
-
-if __name__ == '__main__':
-    root_dir = tempfile.mkdtemp()
-    (pl_slice, 
-            pl_host, 
-            pl_user, 
-            pl_pwd, 
-            pl_ssh_key, 
-            movie, 
-            results_dir,
-            exp_label,
-            time_to_run,
-            port) = get_options()
-
-    # list to store information on monitoring apps per node
-    monitors = []
-    
-    # Create the experiment description object
-    exp_desc = ExperimentDescription()
-
-    # Create slice
-    slice_desc = create_slice(exp_desc, pl_slice, pl_host, pl_user, pl_pwd,
-        pl_ssh_key, root_dir)
-   
-    # Create the Internet box object
-    pl_inet = slice_desc.create("Internet")
-
-    ### Level 0 - Root node
-    root_hostname = "ple6.ipv6.lip6.fr"
-    (root_node, root_iface) = create_node(root_hostname, pl_inet, slice_desc)
-
-    ### Level 1 - Intermediate nodes
-    l1_hostnames = dict()
-    l1_hostnames["fi"] = "planetlab-1.research.netlab.hut.fi"
-    l1_hostnames["se"] = "planetlab2.sics.se"
-    l1_hostnames["es"] = "planetlab1.um.es"
-    l1_hostnames["pt"] = "planetlab-um10.di.uminho.pt"
-    l1_hostnames["pl"] = "pandora.we.po.opole.pl"
-    l1_hostnames["it"] = "planetlab02.dis.unina.it"
-    l1_hostnames["de"] = "planetlab2.wiwi.hu-berlin.de"
-    l1_hostnames["fr"] = "planetlab2.u-strasbg.fr"
-    l1_hostnames["gr"] = "planetlab1.ics.forth.gr"
-    l1_hostnames["ch"] = "lsirextpc02.epfl.ch"
-    l1_hostnames["uk"] = "planetlab2.aston.ac.uk"
-    l1_hostnames["be"] = "planetlab1.extern.kuleuven.be"
-
-    l1_ifaces = dict()
-    l1_nodes = dict()
-    
-    for country, hostname in l1_hostnames.iteritems():
-        l1_node, l1_iface = create_node(hostname, pl_inet, slice_desc)
-        l1_ifaces[country] = l1_iface
-        l1_nodes[country] = l1_node
-
-    ### Level 0 - CCN & Monitoring
-    
-    # Add CCN Daemon to root node
-    out_ifaces = l1_ifaces.values()
-    root_ccnd = create_ccnd(root_node, slice_desc, out_ifaces, port)
-
-    # Publish video in root node
-    root_ccnseqwriter = create_ccnpush(movie, root_node, slice_desc, port)
-
-    # Create monitor info object for root node
-    root_mon = MonitorInfo(root_hostname, MonitorInfo.TYPE_ROOT)
-    monitors.append(root_mon)
-   
-    # Add memory and cpu monitoring for root node
-    root_mon.cpumem_monitor = create_cpumem_monitor(root_node, slice_desc)
-    root_mon.net_out_monitor = create_net_monitor(root_node, slice_desc, 
-            out_ifaces, lblprefix = "out")
-    root_mon.ccnd = root_ccnd
-    root_mon.ccnseqwriter = root_ccnseqwriter
-
-    ### Level 2 - Leaf nodes
-    l2_hostnames = dict()
-    l2_hostnames["fi"] = ["planetlab1.rd.tut.fi",
-             "planetlab-2.research.netlab.hut.fi",
-             "planetlab2.willab.fi",
-             "planetlab3.hiit.fi",
-             "planetlab4.hiit.fi",
-             "planetlab1.willab.fi",
-    ]
-
-    l2_hostnames["se"] = ["planetlab1.s3.kth.se",
-             "itchy.comlab.bth.se",
-             "planetlab-1.ida.liu.se",
-             "scratchy.comlab.bth.se",
-             "planetlab2.s3.kth.se",
-             "planetlab1.sics.se",
-    ]
-
-    l2_hostnames["es"] = ["planetlab1.tlm.unavarra.es",
-             "planetlab2.uc3m.es",
-             "planetlab2.upc.es",
-             "ait21.us.es",
-             "planetlab3.upc.es",
-             "planetlab1.uc3m.es",
-             "planetlab2.dit.upm.es",
-             "planetlab1.upc.es",
-             "planetlab2.um.es",
-    ]
-
-    l2_hostnames["pt"] = ["planet1.servers.ua.pt",
-             "planetlab2.fct.ualg.pt",
-             "planetlab-1.tagus.ist.utl.pt",
-             "planetlab-2.tagus.ist.utl.pt",
-             "planetlab-um00.di.uminho.pt",
-             "planet2.servers.ua.pt",
-    ]
-
-    l2_hostnames["pl"] = ["planetlab1.mini.pw.edu.pl",
-             "roti.mimuw.edu.pl",
-             "planetlab1.ci.pwr.wroc.pl",
-             "planetlab1.pjwstk.edu.pl",
-             "ple2.tu.koszalin.pl",
-             "planetlab2.ci.pwr.wroc.pl",
-             "planetlab2.cyfronet.pl",
-             "plab2.ple.silweb.pl",
-             "planetlab1.cyfronet.pl",
-             "plab4.ple.silweb.pl",
-             "ple2.dmcs.p.lodz.pl",
-             "planetlab2.pjwstk.edu.pl",
-             "ple1.dmcs.p.lodz.pl",
-    ]
-
-    l2_hostnames["it"] = ["gschembra3.diit.unict.it",
-             "onelab6.iet.unipi.it",
-             "planetlab1.science.unitn.it",
-             "planetlab-1.ing.unimo.it",
-             "gschembra4.diit.unict.it",
-    ]
-
-    l2_hostnames["de"] = ["iraplab1.iralab.uni-karlsruhe.de",
-             "planetlab-1.fokus.fraunhofer.de",
-             "iraplab2.iralab.uni-karlsruhe.de",
-             "planet2.zib.de",
-             "pl2.uni-rostock.de",
-             "onelab-1.fhi-fokus.de",
-             "planet2.l3s.uni-hannover.de",
-             "planetlab1.exp-math.uni-essen.de",
-             "planetlab-2.fokus.fraunhofer.de",
-             "planetlab02.tkn.tu-berlin.de",
-             "planetlab1.informatik.uni-goettingen.de",
-             "planetlab1.informatik.uni-erlangen.de",
-             "planetlab2.exp-math.uni-essen.de",
-             "planetlab2.lkn.ei.tum.de",
-             "planetlab1.wiwi.hu-berlin.de",
-             "planet1.l3s.uni-hannover.de",
-             "planetlab1.informatik.uni-wuerzburg.de",
-              "planet1.zib.de",
-    ]
-
-    l2_hostnames["fr"] = ["host3-plb.loria.fr",
-             "inriarennes1.irisa.fr",
-             "inriarennes2.irisa.fr",
-             "peeramide.irisa.fr",
-             "pl1.bell-labs.fr",
-             "pl2.bell-labs.fr",
-             "host4-plb.loria.fr",
-             "planetlab-1.imag.fr",
-             "planetlab-2.imag.fr",
-             "ple2.ipv6.lip6.fr",
-             "planetlab1.u-strasbg.fr",
-    ]
-
-    l2_hostnames["gr"] = ["kostis.di.uoa.gr",
-             "planetlab1.ionio.gr",
-             "planetlab2.ionio.gr",
-             "planetlab2.cs.uoi.gr",
-             "stella.planetlab.ntua.gr",
-             "vicky.planetlab.ntua.gr",
-             "planetlab1.cs.uoi.gr",
-             "pl002.ece.upatras.gr",
-    ]
-
-    l2_hostnames["ch"] = ["planetlab04.cnds.unibe.ch",
-             "lsirextpc01.epfl.ch",
-             "planetlab2.csg.uzh.ch",
-             "planetlab1.csg.uzh.ch",
-             "planetlab-2.cs.unibas.ch",
-             "planetlab-1.cs.unibas.ch",
-    ]
-
-    l2_hostnames["uk"] = ["planetlab4.cs.st-andrews.ac.uk",
-             "planetlab-1.imperial.ac.uk",
-             "planetlab3.xeno.cl.cam.ac.uk",
-             "planetlab1.xeno.cl.cam.ac.uk",
-             "planetlab2.xeno.cl.cam.ac.uk",
-             "planetlab3.cs.st-andrews.ac.uk",
-             "planetlab1.aston.ac.uk",
-             "planetlab1.nrl.eecs.qmul.ac.uk",
-    ]
-
-    l2_hostnames["be"] = ["chimay.infonet.fundp.ac.be",
-             "orval.infonet.fundp.ac.be",
-             "rochefort.infonet.fundp.ac.be",
-             "planck227ple.test.ibbt.be",
-    ]
-    
-    for country, hostnames in l2_hostnames.iteritems():
-        l2_ifaces = []
-        l1_hostname = l1_hostnames[country]
-        l1_iface = l1_ifaces[country]
-        l1_node = l1_nodes[country]
-        
-        for hostname in hostnames:
-            l2_node, l2_iface = create_node(hostname, pl_inet, slice_desc)
-            l2_ifaces.append(l2_iface)
-
-            in_ifaces = [l1_iface]
-
-            ### Level 2 - CCN & Monitoring
-        
-            # Add CCN Daemon to intermediate nodes
-            ccnd = create_ccnd(l2_node, slice_desc, in_ifaces, port)
-
-            # Retrieve video in leaf node
-            ccncat = create_ccnpull(l2_node, slice_desc, port)
-
-            # Create monitor info object for intermediate nodes
-            mon = MonitorInfo(hostname, MonitorInfo.TYPE_LEAF)
-            monitors.append(mon)
-       
-            # Add memory and cpu monitoring for intermediate nodes
-            mon.cpumem_monitor = create_cpumem_monitor(l2_node, slice_desc)
-            mon.net_in_monitor = create_net_monitor(l2_node, slice_desc, 
-                    in_ifaces, lblprefix = "in")
-            mon.ccnd = ccnd
-            mon.ccncat = ccncat
-
-        ### Level 1 - CCN & Monitoring
-
-        in_ifaces = [root_iface]
-        out_ifaces = l2_ifaces
-        all_ifaces = list(out_ifaces)
-        all_ifaces.extend(in_ifaces)
-
-        # Add CCN Daemon to intermediate nodes
-        ccnd = create_ccnd(l1_node, slice_desc, all_ifaces, port)
-
-        # Create monitor info object for intermediate nodes
-        mon = MonitorInfo(l1_hostname, MonitorInfo.TYPE_MID)
-        monitors.append(mon)
-       
-        # Add memory and cpu monitoring for intermediate nodes
-        mon.cpumem_monitor = create_cpumem_monitor(l1_node, slice_desc)
-        mon.net_in_monitor = create_net_monitor(l1_node, slice_desc, 
-                in_ifaces, lblprefix="in")
-        mon.net_out_monitor = create_net_monitor(l1_node, slice_desc, 
-                out_ifaces, lblprefix="out")
-        mon.ccnd = ccnd
-
-    xml = exp_desc.to_xml()
-   
-    controller = ExperimentController(xml, root_dir)
-    controller.start()
-
-    start_time = time.time()
-    duration = time_to_run * 60 # in seconds
-    while not TERMINATE:
-        time.sleep(1)
-        if (time.time() - start_time) > duration: # elapsed time
-            TERMINATE.append(None)
-
-    controller.stop()
-    # store results in results dir
-    store_results(controller, monitors, results_dir, exp_label)
-   
-    controller.shutdown()
-
diff --git a/examples/streaming/vlc_broadcast.py b/examples/streaming/vlc_broadcast.py
deleted file mode 100644 (file)
index 7d1d19f..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-#!/usr/bin/env python
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util.constants import ApplicationStatus as AS
-from optparse import OptionParser, SUPPRESS_HELP
-import os
-import tempfile
-import time
-import uuid
-
-"""
-This experiment evaluates the consumption of computer resources when using
-VLC for Internet broasdcasting using PlanetLab nodes as both server and clients. 
-A root node (server) streams a broadcast in a loop, while the clients retrieve
-the same video over and over until experiment run time is elapsed.
-
-While the experiment is running cpu and memory usage, and the amount of bytes 
-transmitted per stream are traced to files.
-
-"""
-
-# Trak SIGTERM, and set global termination flag instead of dying
-import signal
-TERMINATE = []
-def _finalize(sig,frame):
-    global TERMINATE
-    TERMINATE.append(None)
-signal.signal(signal.SIGTERM, _finalize)
-signal.signal(signal.SIGINT, _finalize)
-
-class MonitorInfo(object):
-    TYPE_ROOT = "root"
-    TYPE_LEAF = "leaf"
-
-    def __init__(self, hostname, type):
-        self.hostname = hostname
-        self.type = type
-        self.cpumem_monitor = None
-        self.net_in_monitor = None
-        self.net_out_monitor = None
-        self.vlc = None
-
-def create_slice(exp_desc, slicename, plc_host, pl_user, pl_pwd, 
-        pl_ssh_key, root_dir):
-    pl_provider = FactoriesProvider("planetlab")
-    slice_desc = exp_desc.add_testbed_description(pl_provider)
-    slice_desc.set_attribute_value("homeDirectory", root_dir)
-    slice_desc.set_attribute_value("slice", slicename)
-    slice_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-    slice_desc.set_attribute_value("authUser", pl_user)
-    slice_desc.set_attribute_value("authPass", pl_pwd)
-    slice_desc.set_attribute_value("plcHost", plc_host)
-    # Kills all running processes before starting the experiment
-    slice_desc.set_attribute_value("cleanProc", True)
-    # NOTICE: Setting 'cleanHome' to 'True' will erase all previous
-    # folders in the sliver Home directory, including result files!
-    slice_desc.set_attribute_value("cleanHome", True)
-    slice_desc.set_attribute_value("plLogLevel", "DEBUG")
-    return slice_desc
-def create_node(hostname, pl_inet, slice_desc):
-    pl_node = slice_desc.create("Node")
-    pl_node.set_attribute_value("hostname", hostname)
-    pl_node.set_attribute_value("label", "%d" % pl_node.guid)
-    pl_node.set_attribute_value("operatingSystem", "f12")
-    pl_iface = slice_desc.create("NodeInterface")
-    pl_iface.set_attribute_value("label", "iface_%d" % pl_node.guid)
-    pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-    pl_node.connector("devs").connect(pl_iface.connector("node"))
-    return pl_node, pl_iface
-
-def create_vlc_server(movie, pl_node, slice_desc):
-    mv = os.path.basename(movie)
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("rpmFusion", True)
-    pl_app.set_attribute_value("depends", "vlc")
-    pl_app.set_attribute_value("build", 
-    #    "echo -e 'new TEST vod enabled\\nsetup TEST input %s' > ${SOURCES}/VOD.vlm" % mv)
-       "echo -e 'new TEST broadcast enabled loop\\n"\
-       "setup TEST input %s\\n"\
-       "setup TEST output #rtp{mux=ts,sdp=rtsp://0.0.0.0:8554/TEST}\\n\\n"\
-       "new test_sched schedule enabled\\n"\
-       "setup test_sched append control TEST play' > ${SOURCES}/VOD.vlm" % mv)
-
-    pl_app.set_attribute_value("sources", "%s" % movie)
-    pl_app.set_attribute_value("command",
-        "sudo -S dbus-uuidgen --ensure ; vlc -vvv -I dummy --vlm-conf VOD.vlm")
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_node.connector("apps").connect(pl_app.connector("node"))
-    return pl_app
-
-def create_vlc_client(root_node, pl_node, slice_desc):
-    label = "%d_app" % pl_node.guid
-    hostname = root_node.get_attribute_value("hostname")
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("label", label)
-    pl_app.set_attribute_value("rpmFusion", True)
-    pl_app.set_attribute_value("depends", "vlc")
-    pl_app.set_attribute_value("command",
-       "sudo -S dbus-uuidgen --ensure ; sleep 5;" \
-       "vlc -I dummy rtsp://%s:8554/TEST --sout '#std{access=file,mux=ts,dst=/dev/null}'" % (hostname))
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_node.connector("apps").connect(pl_app.connector("node"))
-    return pl_app
-
-def create_cpumem_monitor(pl_node, slice_desc):
-    """ This function creates a monitoring application for the
-    utilization of node resources by the vlc application.
-
-    The format of the stdout trace file is the following:
-    'timestamp cpu(%) mem(%) time'
-    """
-    label = "%d_cpumem" % pl_node.guid
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("label", label)
-    pl_app.set_attribute_value("command", 
-            "while true ; do echo $(date +%Y%m%d%H%M%S%z) " \
-            " $(top -b -n 1 | grep 'vlc' | head -1 | sed 's/\s\s*/ /g' | sed 's/^\s//g' | cut -d' ' -f9,10,11)" \
-            "; sleep 1 ; done")
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_node.connector("apps").connect(pl_app.connector("node"))
-    return pl_app
-
-def create_net_monitor(pl_node, slice_desc, pl_ifaces, pcap=False):
-    """ This function creates a monitoring application for the
-    amount of bytes transmitted/received by the vlc application.
-
-    The format of the stdout trace file is the following:
-    'total-Mbytes total-time'
-    """
-    label = "%d_net" % pl_node.guid
-    hosts = " or ".join(map(lambda pl_iface: " ( host {#[%s].addr[0].[Address]#} ) " % 
-        pl_iface.get_attribute_value("label"), pl_ifaces))
-    pl_app = slice_desc.create("Application")
-    pl_app.set_attribute_value("label", label)
-    pl_app.set_attribute_value("rpmFusion", True)
-    pl_app.set_attribute_value("sudo", True)
-    pl_app.set_attribute_value("depends", "tcpdump pv")
-
-    output = "/dev/null"
-    if pcap:
-        output = "{#[%s].trace[output].[name]#}" % label
-
-    pl_app.set_attribute_value("command", 
-            "tcpdump -l -i eth0 -s 0 -f '(%s)' -w - | pv -fbt >%s 2>>{#[%s].trace[stdout].[name]#}" %
-            (hosts, output, label))
-
-    if pcap:
-        pl_app.enable_trace("output")
-    
-    pl_app.enable_trace("stdout")
-    pl_app.enable_trace("stderr")
-    pl_node.connector("apps").connect(pl_app.connector("node"))
-    return pl_app
-
-def store_results(controller, monitors, results_dir, exp_label):
-    # create results directory for experiment
-    root_path = os.path.join(results_dir, exp_label)
-
-    print "STORING RESULTS in ", root_path
-
-    try:
-        os.makedirs(root_path)
-    except OSError:
-        pass
-
-    # collect information on nodes
-    hosts_info = ""
-
-    for mon in monitors:
-        hosts_info += "%s %s\n" % (mon.hostname, mon.type)
-
-        # create a subdir per hostname
-        node_path = os.path.join(root_path, mon.hostname)
-        try:
-            os.makedirs(node_path)
-        except OSError:
-            pass
-
-        # store monitoring results
-   
-        cpumem_out = controller.trace(mon.cpumem_monitor.guid, "stdout")
-
-        net_in = None
-        if mon.net_in_monitor:
-            net_in = controller.trace(mon.net_in_monitor.guid, "stdout")
-        
-        net_out = None
-        if mon.net_out_monitor:
-            net_out = controller.trace(mon.net_out_monitor.guid, "stdout")
-
-        vlc_err = controller.trace(mon.vlc.guid, "stderr")
-        vlc_out = controller.trace(mon.vlc.guid, "stdout")
-
-        results = dict({
-            "cpumem": cpumem_out, 
-            "net_in": net_in, 
-            "net_out": net_out, 
-            "vlc_out": vlc_out,
-            "vlc_err": vlc_err })
-
-        for name, result in results.iteritems():
-            if not result:
-                continue
-
-            fpath = os.path.join(node_path, name)
-            f = open(fpath, "w")
-            f.write(result)
-            f.close()
-
-    # store node info file
-    fpath = os.path.join(root_path, "hosts")
-    f = open(fpath, "w")
-    f.write(hosts_info)
-    f.close()
-
-def get_options():
-    slicename = os.environ.get("PL_SLICE")
-    pl_host = os.environ.get("PL_HOST", "www.planet-lab.eu")
-    pl_ssh_key = os.environ.get(
-        "PL_SSH_KEY",
-        "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-    pl_user = os.environ.get('PL_USER')
-    pl_pwd = os.environ.get('PL_PASS')
-    exp_label = "%s" % uuid.uuid4()
-
-    usage = "usage: %prog -s <pl_slice> -H <pl_host> -k <ssh_key> -u <pl_user> \
-            -p <pl_password> -m <movie> -r <results-dir> -l <experiment-label>"
-
-    parser = OptionParser(usage=usage)
-    parser.add_option("-s", "--slicename", dest="slicename", 
-            help="PlanetLab slicename", default=slicename, type="str")
-    parser.add_option("-H", "--pl-host", dest="pl_host", 
-            help="PlanetLab site (e.g. www.planet-lab.eu)", 
-            default=pl_host, type="str")
-    parser.add_option("-k", "--ssh-key", dest="pl_ssh_key", 
-            help="Path to private ssh key used for PlanetLab authentication", 
-            default=pl_ssh_key, type="str")
-    parser.add_option("-u", "--pl-user", dest="pl_user", 
-            help="PlanetLab account user (i.e. Registration email address)", 
-            default=pl_user, type="str")
-    parser.add_option("-p", "--pl-pwd", dest="pl_pwd", 
-            help="PlanetLab account password", default=pl_pwd, type="str")
-    parser.add_option("-m", "--movie", dest="movie", 
-            help="Stream movie", type="str")
-    parser.add_option("-r", "--results", dest="results_dir", default = "/tmp", 
-            help="Path to directory to store results", type="str")
-    parser.add_option("-l", "--label", dest="exp_label", default = exp_label, 
-            help="Label to identify experiment results", type="str")
-    parser.add_option("-t", "--time", dest="time_to_run", default = 20, 
-            help="Time to run the experiment in minutes", type="float")
-
-    (options, args) = parser.parse_args()
-
-    if not options.movie:
-        parser.error("movie is a required argument")
-
-    return (options.slicename, options.pl_host, options.pl_user, 
-            options.pl_pwd, options.pl_ssh_key, options.movie,
-            options.results_dir, options.exp_label, options.time_to_run)
-
-if __name__ == '__main__':
-    root_dir = tempfile.mkdtemp()
-    (pl_slice, 
-            pl_host, 
-            pl_user, 
-            pl_pwd, 
-            pl_ssh_key, 
-            movie, 
-            results_dir,
-            exp_label,
-            time_to_run) = get_options()
-
-    # list to store information on monitoring apps per node
-    monitors = []
-    
-    # Create the experiment description object
-    exp_desc = ExperimentDescription()
-
-    # Create slice
-    slice_desc = create_slice(exp_desc, pl_slice, pl_host, pl_user, pl_pwd,
-        pl_ssh_key, root_dir)
-   
-    # Create the Internet box object
-    pl_inet = slice_desc.create("Internet")
-
-    # Create root node
-    hostname = "ple6.ipv6.lip6.fr"
-    (root_node, root_iface) = create_node(hostname, pl_inet, slice_desc)
-
-    # Create monitor info object for root node
-    root_mon = MonitorInfo(hostname, MonitorInfo.TYPE_ROOT)
-    monitors.append(root_mon)
-
-    # Add VLC service
-    root_vlc = create_vlc_server(movie, root_node, slice_desc)
-    
-    # Add memory and cpu monitoring for root node
-    root_mon.cpumem_monitor = create_cpumem_monitor(root_node, slice_desc)
-
-    # Add reference to vlc app 
-    root_mon.vlc = root_vlc
-
-    # Create leaf nodes
-    cli_apps = []
-    cli_ifaces = []
-
-    hostnames = [#"planetlab1.rd.tut.fi",
-             "planetlab-2.research.netlab.hut.fi",
-             "planetlab2.willab.fi",
-             "planetlab3.hiit.fi",
-             "planetlab4.hiit.fi",
-             "planetlab1.willab.fi",
-             "planetlab1.s3.kth.se",
-             "itchy.comlab.bth.se",
-             "planetlab-1.ida.liu.se",
-             #"scratchy.comlab.bth.se",
-             "planetlab2.s3.kth.se",
-             "planetlab1.sics.se",
-             "planetlab1.tlm.unavarra.es",
-             #"planetlab2.uc3m.es",
-             "planetlab2.upc.es",
-             #"ait21.us.es",
-             "planetlab3.upc.es",
-             #"planetlab1.uc3m.es",
-             "planetlab2.dit.upm.es",
-             "planetlab1.upc.es",
-             "planetlab2.um.es",
-             "planet1.servers.ua.pt",
-             "planetlab2.fct.ualg.pt",
-             "planetlab-1.tagus.ist.utl.pt",
-             "planetlab-2.tagus.ist.utl.pt",
-             "planetlab-um00.di.uminho.pt",
-             "planet2.servers.ua.pt",
-             "planetlab1.mini.pw.edu.pl",
-             "roti.mimuw.edu.pl",
-             "planetlab1.ci.pwr.wroc.pl",
-             "planetlab1.pjwstk.edu.pl",
-             "ple2.tu.koszalin.pl",
-             #"planetlab2.ci.pwr.wroc.pl",
-             "planetlab2.cyfronet.pl",
-             "plab2.ple.silweb.pl",
-             "planetlab1.cyfronet.pl",
-             "plab4.ple.silweb.pl",
-             "ple2.dmcs.p.lodz.pl",
-             "planetlab2.pjwstk.edu.pl",
-             "ple1.dmcs.p.lodz.pl",
-             "pandora.we.po.opole.pl",
-             "gschembra3.diit.unict.it",
-             #"onelab6.iet.unipi.it",
-             #"planetlab1.science.unitn.it",
-             "planetlab-1.ing.unimo.it",
-             "gschembra4.diit.unict.it",
-             "iraplab1.iralab.uni-karlsruhe.de",
-             "planetlab-1.fokus.fraunhofer.de",
-             "iraplab2.iralab.uni-karlsruhe.de",
-             "planet2.zib.de",
-             "pl2.uni-rostock.de",
-             "onelab-1.fhi-fokus.de",
-             "planet2.l3s.uni-hannover.de",
-             "planetlab1.exp-math.uni-essen.de",
-             "planetlab-2.fokus.fraunhofer.de",
-             "planetlab02.tkn.tu-berlin.de",
-             "planetlab1.informatik.uni-goettingen.de",
-             "planetlab1.informatik.uni-erlangen.de",
-             "planetlab2.exp-math.uni-essen.de",
-             "planetlab2.lkn.ei.tum.de",
-             "planetlab1.wiwi.hu-berlin.de",
-             "planet1.l3s.uni-hannover.de",
-             "planetlab1.informatik.uni-wuerzburg.de",
-             "host3-plb.loria.fr",
-             "inriarennes1.irisa.fr",
-             "inriarennes2.irisa.fr",
-             "peeramide.irisa.fr",
-             #"pl1.bell-labs.fr",
-             #"pl2.bell-labs.fr",
-             "host4-plb.loria.fr",
-             "planetlab-1.imag.fr",
-             "planetlab-2.imag.fr",
-             "ple2.ipv6.lip6.fr",
-             "planetlab1.u-strasbg.fr",
-             "kostis.di.uoa.gr",
-             "planetlab1.ionio.gr",
-             "planetlab2.ionio.gr",
-             "planetlab2.cs.uoi.gr",
-             "stella.planetlab.ntua.gr",
-             "vicky.planetlab.ntua.gr",
-             "planetlab1.cs.uoi.gr",
-             "pl002.ece.upatras.gr",
-             "planetlab04.cnds.unibe.ch",
-             "lsirextpc01.epfl.ch",
-             "planetlab2.csg.uzh.ch",
-             "planetlab1.csg.uzh.ch",
-             "planetlab-2.cs.unibas.ch",
-             "planetlab-1.cs.unibas.ch",
-             "planetlab4.cs.st-andrews.ac.uk",
-             "planetlab-1.imperial.ac.uk",
-             "planetlab3.xeno.cl.cam.ac.uk",
-             "planetlab1.xeno.cl.cam.ac.uk",
-             "planetlab2.xeno.cl.cam.ac.uk",
-             "planetlab3.cs.st-andrews.ac.uk",
-             "planetlab1.aston.ac.uk",
-             "planetlab1.nrl.eecs.qmul.ac.uk",
-             "chimay.infonet.fundp.ac.be",
-             "orval.infonet.fundp.ac.be",
-             "rochefort.infonet.fundp.ac.be",
-             #"planck227ple.test.ibbt.be",
-            ]
-
-
-    for hostname in hostnames:
-        pl_node, pl_iface = create_node(hostname, pl_inet, slice_desc)
-        cli_ifaces.append(pl_iface)
-
-        # Create monitor info object for root node
-        node_mon = MonitorInfo(hostname, MonitorInfo.TYPE_LEAF)
-        monitors.append(node_mon)
-      
-        # Add memory and cpu monitoring for all nodes
-        node_mon.cpumem_monitor = create_cpumem_monitor(pl_node, slice_desc)
-
-        # Add network monitoring for all nodes
-        node_mon.net_out_monitor = create_net_monitor(pl_node, slice_desc, [root_iface])
-
-        # Add VLC clients
-        vlc = create_vlc_client(root_node, pl_node, slice_desc)
-        cli_apps.append(vlc)
-
-        # Add reference to vlc app 
-        node_mon.vlc = vlc
-
-    # Add network monitoring for root node
-    #root_mon.net_monitor = create_net_monitor(root_node, slice_desc, cli_ifaces, pcap=True)
-    root_mon.net_out_monitor = create_net_monitor(root_node, slice_desc, cli_ifaces)
-
-    xml = exp_desc.to_xml()
-   
-    controller = ExperimentController(xml, root_dir)
-    controller.start()
-
-    start_time = time.time()
-    duration = time_to_run * 60 # in seconds
-    while not TERMINATE:
-        time.sleep(1)
-        if (time.time() - start_time) > duration: # elapsed time
-            TERMINATE.append(None)
-
-    controller.stop()
-    # store results in results dir
-    store_results(controller, monitors, results_dir, exp_label)
-   
-    controller.shutdown()
-
diff --git a/examples/tunnel_cross_testbed_experiment.py b/examples/tunnel_cross_testbed_experiment.py
deleted file mode 100644 (file)
index 774bd12..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import shutil
-import tempfile
-import time
-
-root_dir = tempfile.mkdtemp()
-netns_root_dir = os.path.join(root_dir, "netns")
-os.makedirs(netns_root_dir)
-user = getpass.getuser()
-slicename = os.environ["PL_SLICE"]
-plchost = "www.planet-lab.eu"
-port_base = 2000 + (os.getpid() % 1000) * 13
-pl_ssh_key = os.environ.get(
-    "PL_SSH_KEY",
-    "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-pl_user = os.environ.get('PL_USER')
-pl_pwd = os.environ.get('PL_PASS')
-
-exp_desc = ExperimentDescription()
-
-pl_provider = FactoriesProvider("planetlab")
-pl_desc = exp_desc.add_testbed_description(pl_provider)
-pl_desc.set_attribute_value("homeDirectory", root_dir)
-pl_desc.set_attribute_value("slice", slicename)
-pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-pl_desc.set_attribute_value("authUser", pl_user)
-pl_desc.set_attribute_value("authPass", pl_pwd)
-pl_desc.set_attribute_value("plcHost", plchost)
-pl_desc.set_attribute_value("tapPortBase", port_base)
-pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-pl_desc.set_attribute_value("cleanProc", True)
-pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-       
-pl_inet = pl_desc.create("Internet")
-
-pl_node = pl_desc.create("Node")
-pl_iface = pl_desc.create("NodeInterface")
-pl_iface.connector("inet").connect(pl_inet.connector("devs"))
-pl_node.connector("devs").connect(pl_iface.connector("node"))
-
-pl_tap = pl_desc.create("TunInterface")
-pl_tap.enable_trace("pcap")
-pl_tap.enable_trace("packets")
-pl_node.connector("devs").connect(pl_tap.connector("node"))
-
-ip1 = pl_tap.add_address()
-ip1.set_attribute_value("Address", "192.168.3.1")
-ip1.set_attribute_value("NetPrefix", 24)
-
-netns_provider = FactoriesProvider("netns")
-netns_desc = exp_desc.add_testbed_description(netns_provider)
-netns_desc.set_attribute_value("homeDirectory", root_dir)
-netns_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-netns_desc.set_attribute_value(DC.ROOT_DIRECTORY, netns_root_dir)
-netns_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-netns_desc.set_attribute_value(DC.USE_SUDO, True)
-
-netns_node = netns_desc.create("Node")
-netns_node.set_attribute_value("forward_X11", True)
-
-netns_tap = netns_desc.create("TunNodeInterface")
-netns_tap.set_attribute_value("up", True)
-netns_tap.set_attribute_value("mtu", 1448)
-netns_node.connector("devs").connect(netns_tap.connector("node"))
-netns_tunchannel = netns_desc.create("TunChannel")
-netns_tunchannel.connector("->fd").connect(netns_tap.connector("fd->"))
-
-pl_tap.connector("tcp").connect(netns_tunchannel.connector("tcp"))
-
-ip2 = netns_tap.add_address()
-ip2.set_attribute_value("Address", "192.168.3.2")
-ip2.set_attribute_value("NetPrefix", 30)
-
-app = netns_desc.create("Application")
-app.set_attribute_value("command", "xterm")
-app.set_attribute_value("user", user)
-app.connector("node").connect(netns_node.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-controller.start()
-while not controller.is_finished(app.guid):
-    time.sleep(0.5)
-
-controller.stop()
-controller.shutdown()
-
diff --git a/examples/vlc_cross_testbed_experiment.py b/examples/vlc_cross_testbed_experiment.py
deleted file mode 100644 (file)
index eb81eb1..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/env python
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-import getpass
-import tempfile
-import time
-
-def add_ns3_random_mobility(node, ns3_desc, x, y, z, speed, 
-        bounds_width, bounds_height):
-    position = "%d:%d:%d" % (x, y, z)
-    bounds = "0|%d|0|%d" % (bounds_width, bounds_height) 
-    speed = "Constant:%d" % speed
-    mobility = ns3_desc.create("ns3::RandomDirection2dMobilityModel")
-    mobility.set_attribute_value("Position", position)
-    mobility.set_attribute_value("Bounds", bounds)
-    mobility.set_attribute_value("Speed", speed)
-    mobility.set_attribute_value("Pause",  "Constant:1")
-    node.connector("mobility").connect(mobility.connector("node"))
-    return mobility
-
-def add_ns3_constant_mobility(node, ns3_desc, x, y, z):
-    mobility = ns3_desc.create("ns3::ConstantPositionMobilityModel") 
-    position = "%d:%d:%d" % (x, y, z)
-    mobility.set_attribute_value("Position", position)
-    node.connector("mobility").connect(mobility.connector("node"))
-    return mobility
-
-def add_ns3_wifi_channel(ns3_desc):
-    channel = ns3_desc.create("ns3::YansWifiChannel")
-    delay = ns3_desc.create("ns3::ConstantSpeedPropagationDelayModel")
-    loss  = ns3_desc.create("ns3::LogDistancePropagationLossModel")
-    channel.connector("delay").connect(delay.connector("chan"))
-    channel.connector("loss").connect(loss.connector("prev"))
-    return channel
-
-def add_ip_address(iface, address):
-    ip = iface.add_address()
-    ip.set_attribute_value("Address", address)
-
-def add_route(node, destination, netprefix, nexthop):
-    route = node.add_route()
-    route.set_attribute_value("Destination", destination)
-    route.set_attribute_value("NetPrefix", netprefix)
-    route.set_attribute_value("NextHop", nexthop)
-
-def add_ns3_wifi(node, ns3_desc, access_point = False):
-    wifi = ns3_desc.create("ns3::WifiNetDevice")
-    node.connector("devs").connect(wifi.connector("node"))
-
-    phy = ns3_desc.create("ns3::YansWifiPhy")
-    error = ns3_desc.create("ns3::NistErrorRateModel")
-    manager = ns3_desc.create("ns3::ArfWifiManager")
-    if access_point:
-        mac = ns3_desc.create("ns3::ApWifiMac")
-    else:
-        mac = ns3_desc.create("ns3::StaWifiMac")
-
-    phy.set_attribute_value("Standard", "WIFI_PHY_STANDARD_80211a")
-    mac.set_attribute_value("Standard", "WIFI_PHY_STANDARD_80211a")
-    phy.connector("err").connect(error.connector("phy"))
-    wifi.connector("phy").connect(phy.connector("dev"))
-    wifi.connector("mac").connect(mac.connector("dev"))
-    wifi.connector("manager").connect(manager.connector("dev"))
-    #phy.enable_trace("YansWifiPhyPcapTrace")
-    return wifi, phy
-
-def add_netns_tap(node, netns_desc):
-    tap = netns_desc.create("TapNodeInterface")
-    tap.set_attribute_value("up", True)
-    node.connector("devs").connect(tap.connector("node"))
-    return tap
-
-def add_ns3_fdnd(node, ns3_desc):
-    fdnd = ns3_desc.create("ns3::FdNetDevice")
-    node.connector("devs").connect(fdnd.connector("node"))
-    #fdnd.enable_trace("FileDescriptorPcapTrace")
-    return fdnd
-
-def add_ns3_node(ns3_desc):
-    node = ns3_desc.create("ns3::Node")
-    ipv4 = ns3_desc.create("ns3::Ipv4L3Protocol")
-    arp  = ns3_desc.create("ns3::ArpL3Protocol")
-    icmp = ns3_desc.create("ns3::Icmpv4L4Protocol")
-    udp = ns3_desc.create("ns3::UdpL4Protocol")
-    node.connector("protos").connect(ipv4.connector("node"))
-    node.connector("protos").connect(arp.connector("node"))
-    node.connector("protos").connect(icmp.connector("node"))
-    node.connector("protos").connect(udp.connector("node"))
-    return node
-
-user = getpass.getuser()
-root_dir = tempfile.mkdtemp()
-movie = "/home/alina/repos/nepi/big_buck_bunny_240p_mpeg4_lq.ts" 
-bounds_width = bounds_height = 200
-x = y = 100
-speed = 1
-
-exp_desc = ExperimentDescription()
-
-ns3_provider = FactoriesProvider("ns3")
-ns3_desc = exp_desc.add_testbed_description(ns3_provider)
-ns3_dir = tempfile.mkdtemp()
-ns3_desc.set_attribute_value("rootDirectory", ns3_dir)
-ns3_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-ns3_desc.set_attribute_value("ChecksumEnabled", True)
-
-netns_provider = FactoriesProvider("netns")
-netns_desc1 = exp_desc.add_testbed_description(netns_provider)
-netns_dir1 = tempfile.mkdtemp()
-netns_desc1.set_attribute_value("rootDirectory", netns_dir1)
-netns_desc1.set_attribute_value("enableDebug", True)
-netns_desc1.set_attribute_value("useSudo", True)
-netns_desc1.set_attribute_value("deployment_communication", "LOCAL")
-netns_desc1.set_attribute_value("deployment_mode", "DAEMON")
-
-netns_provider = FactoriesProvider("netns")
-netns_desc2 = exp_desc.add_testbed_description(netns_provider)
-netns_dir2 = tempfile.mkdtemp()
-netns_desc2.set_attribute_value("rootDirectory", netns_dir2)
-netns_desc2.set_attribute_value("enableDebug", True)
-netns_desc2.set_attribute_value("useSudo", True)
-netns_desc2.set_attribute_value("deployment_communication", "LOCAL")
-netns_desc2.set_attribute_value("deployment_mode", "DAEMON")
-
-node1 = netns_desc1.create("Node")
-node1.set_attribute_value("forward_X11", True)
-tap1 = netns_desc1.create("TapNodeInterface")
-tap1.set_attribute_value("up", True)
-node1.connector("devs").connect(tap1.connector("node"))
-ip = tap1.add_address()
-ip.set_attribute_value("Address", "10.0.0.1")
-
-node2 = add_ns3_node(ns3_desc)
-fdnd1 = add_ns3_fdnd(node2, ns3_desc)
-add_ip_address(fdnd1, "10.0.0.2")
-
-fdnd1.connector("->fd").connect(tap1.connector("fd->"))
-
-mobility1 = add_ns3_constant_mobility(node2, ns3_desc, x, y, 0)
-wifi1, phy1 = add_ns3_wifi(node2, ns3_desc, access_point = False)
-add_ip_address(wifi1, "10.0.1.1")
-
-node3 = add_ns3_node(ns3_desc)
-mobility2 = add_ns3_random_mobility(node3, ns3_desc, x, y, 0, 
-        speed, bounds_width, bounds_height)
-wifi2, phy2 = add_ns3_wifi(node3, ns3_desc, access_point = True)
-add_ip_address(wifi2, "10.0.1.2")
-
-wifichan = add_ns3_wifi_channel(ns3_desc)
-phy1.connector("chan").connect(wifichan.connector("phys"))
-phy2.connector("chan").connect(wifichan.connector("phys"))
-
-fdnd2 = add_ns3_fdnd(node3, ns3_desc)
-add_ip_address(fdnd2, "10.0.2.1")
-
-node4 = netns_desc2.create("Node")
-node4.set_attribute_value("forward_X11", True)
-tap2 = netns_desc2.create("TapNodeInterface")
-tap2.set_attribute_value("up", True)
-node4.connector("devs").connect(tap2.connector("node"))
-ip = tap2.add_address()
-ip.set_attribute_value("Address", "10.0.2.2")
-
-fdnd2.connector("->fd").connect(tap2.connector("fd->"))
-
-route = node1.add_route()
-route.set_attribute_value("Destination", "10.0.2.0")
-route.set_attribute_value("NextHop", "10.0.0.2")
-
-route = node1.add_route()
-route.set_attribute_value("Destination", "10.0.1.0")
-route.set_attribute_value("NextHop", "10.0.0.2")
-
-route = node2.add_route()
-route.set_attribute_value("Destination", "10.0.2.0")
-route.set_attribute_value("NextHop", "10.0.1.2")
-
-route = node3.add_route()
-route.set_attribute_value("Destination", "10.0.0.0")
-route.set_attribute_value("NextHop", "10.0.1.1")
-
-route = node4.add_route()
-route.set_attribute_value("Destination", "10.0.1.0")
-route.set_attribute_value("NextHop", "10.0.2.1")
-
-route = node4.add_route()
-route.set_attribute_value("Destination", "10.0.0.0")
-route.set_attribute_value("NextHop", "10.0.2.1")
-
-app1 = netns_desc1.create("Application")
-server = "10.0.2.2" 
-command = "xauth -b quit; vlc -I dummy -vvv %s --sout '#rtp{dst=%s,port=5004,mux=ts}' vlc://quit" \
-        % (movie, server)
-#command = "xterm"
-app1.set_attribute_value("command", command)
-app1.set_attribute_value("user", user)
-app1.connector("node").connect(node1.connector("apps"))
-
-app4 = netns_desc2.create("Application")
-command = "xauth -b quit; vlc --ffmpeg-threads=1 rtp://%s:5004/test.ts" % server
-#command = "xterm"
-app4.set_attribute_value("command", command)
-app4.set_attribute_value("user", user)
-app4.connector("node").connect(node4.connector("apps"))
-
-xml = exp_desc.to_xml()
-
-controller = ExperimentController(xml, root_dir)
-
-controller.start()
-while not controller.is_finished(app4.guid):
-    time.sleep(0.5)
-
-controller.stop()
-controller.shutdown()
-
-
index ece0c73..7bae51b 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -1,30 +1,25 @@
 #!/usr/bin/env python
-# vim: set fileencoding=utf-8
 from distutils.core import setup
 import sys
 
 setup(
         name        = "nepi",
-        version     = "0.2",
-        description = "High-level abstraction for running network experiments",
-        author      = "Mathieu Lacage, Alina Quereilhac, Martín Ferrari and Claudio Freire",
-        url         = "http://nepi.inria.fr/code/nepi",
+        version     = "3.0",
+        description = "Network Experiment Controller",
+        author      = "Alina Quereilhac",
+        url         = "",
         license     = "GPLv2",
         platforms   = "Linux",
         packages    = [
             "nepi",
-            "nepi.testbeds",
-            "nepi.testbeds.netns",
-            "nepi.testbeds.ns3",
-            "nepi.testbeds.planetlab",
-            "nepi.core",
-            "nepi.util.parser",
-            "nepi.util.settools",
-            "nepi.util.graphtools",
-            "nepi.util" ],
+            "nepi.design",
+            "nepi.execution",
+            "nepi.resources",
+            "nepi.resources.linux",
+            "nepi.resources.netns",
+            "nepi.resources.ns3",
+            "nepi.resources.omf",
+            "nepi.resources.planetlab",
+            "nepi.util"],
         package_dir = {"": "src"},
-        package_data = {"nepi.testbeds.planetlab" : [
-                                "scripts/*.py", "scripts/*.c", "scripts/*.patch", 
-                        ],
-                        "nepi.util" : ["*.tpl"] },
     )
index 944510b..00e54fe 100644 (file)
@@ -1,6 +1,31 @@
-"""
-NEPI (Network Experiment Programming Interface) v 1.0.0 (22 Feb 2011)
-Licensed under #TODO: Check license
+import logging
+import os
+import traceback
+
+LOGLEVEL = os.environ.get("NEPI_LOGLEVEL", "INFO").upper()
+LOGLEVEL = getattr(logging, LOGLEVEL)
+#FORMAT = "%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
+FORMAT = "%(asctime)s %(name)s %(levelname)-4s %(message)s"
+
+# NEPI_LOG variable contains space separated components 
+# on which logging should be enabled
+LOG = os.environ.get("NEPI_LOG", "ALL").upper()
+
+if LOG != 'ALL':
+    # Set by default loglevel to error
+    logging.basicConfig(format = FORMAT, level = logging.ERROR)
+
+    # Set logging level to that defined by the user
+    # only for the enabled components
+    for component in LOG.split(" "):
+        try:
+           log = logging.getLogger(component)
+           log.setLevel(LOGLEVEL)
+        except:
+            err = traceback.format_exc()
+            print "ERROR ", err
+else:
+    # Set the logging level defined by the user for all
+    # components
+    logging.basicConfig(format = FORMAT, level = LOGLEVEL)
 
-Provides a uniform API to describe, deploy and control network experiments for heterogeneous experimentation platforms.
-"""
diff --git a/src/nepi/core/attributes.py b/src/nepi/core/attributes.py
deleted file mode 100644 (file)
index 063aeac..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-# -*- coding: utf-8 -*-
-
-class Attribute(object):
-    ### Attribute types
-    STRING  = "STRING"
-    BOOL    = "BOOL"
-    ENUM    = "ENUM"
-    DOUBLE  = "DOUBLE"
-    INTEGER = "INTEGER"
-
-    types = [
-        STRING, 
-        BOOL, 
-        ENUM, 
-        DOUBLE, 
-        INTEGER
-    ]
-    
-    type_parsers = {
-        STRING : str,
-        BOOL : lambda x : str(x).lower() in ("1","on","yes","true"),
-        ENUM : str,
-        DOUBLE : float,
-        INTEGER : int,
-    }
-
-    ### Attribute Flags
-    NoFlags          = 0x00
-    # Read-only attribute at design time
-    DesignReadOnly   = 0x01
-    # Invisible attribute at design time
-    DesignInvisible  = 0x02
-    # Read-only attribute at execution time
-    ExecReadOnly     = 0x04
-    # Invisible attribute at execution time
-    ExecInvisible    = 0x08
-    # Attribute doesn't change value during execution time
-    ExecImmutable    = 0x10
-    # Attribute has no default value in the testbed
-    NoDefaultValue   = 0x20
-    # Metadata attribute (is not directly reflected by a real object attribute)
-    Metadata         = 0x40
-
-    def __init__(self, name, help, type, value = None, range = None,
-        allowed = None, flags = None, validation_function = None, 
-        category = None):
-        if not type in Attribute.types:
-            raise AttributeError("invalid type %s " % type)
-        self._name = name
-        self._type = type
-        self._help = help
-        self._value = value
-        self._flags = flags if flags != None else Attribute.NoFlags
-        # range: max and min possible values
-        self._range = range
-        # list of possible values
-        self._allowed = allowed
-        self._validation_function = validation_function
-        self._modified = False
-        self._category = category
-
-    @property
-    def name(self):
-        return self._name
-
-    @property
-    def type(self):
-        return self._type
-
-    @property
-    def help(self):
-        return self._help
-
-    @property
-    def flags(self):
-        return self._flags
-
-    @property
-    def is_design_invisible(self):
-        return self.has_flag(Attribute.DesignInvisible)
-
-    @property
-    def is_design_read_only(self):
-        return self.has_flag(Attribute.DesignReadOnly)
-
-    @property
-    def is_exec_invisible(self):
-        return self.has_flag(Attribute.ExecInvisible)
-
-    @property
-    def is_exec_read_only(self):
-        return self.has_flag(Attribute.ExecReadOnly)
-
-    @property
-    def is_exec_immutable(self):
-        return self.has_flag(Attribute.ExecImmutable)
-
-    @property
-    def is_metadata(self):
-        return self.has_flag(Attribute.Metadata)
-
-    @property
-    def has_no_default_value(self):
-        return self.has_flag(Attribute.NoDefaultValue)
-
-    @property
-    def modified(self):
-        return self._modified
-
-    @property
-    def category(self):
-        return self._category
-
-    @property
-    def range(self):
-        return self._range
-
-    @property
-    def allowed(self):
-        return self._allowed
-
-    @property
-    def validation_function(self):
-        return self._validation_function
-
-    def has_flag(self, flag):
-        return (self._flags & flag) == flag
-
-    def get_value(self):
-        return self._value
-
-    def set_value(self, value):
-        if self.is_valid_value(value):
-            self._value = value
-            self._modified = True
-        else:
-            raise ValueError("Invalid value %s for attribute %s" %
-                    (str(value), self.name))
-
-    value = property(get_value, set_value)
-
-    def is_valid_value(self, value):
-        return self._is_in_range(value) and \
-            self._is_in_allowed_values(value) and \
-                self._is_valid(value)    
-
-    def _is_in_range(self, value):
-        return not self.range or \
-                (value >= self.range[0] and value <= self.range[1])
-
-    def _is_in_allowed_values(self, value):
-        return not self._allowed or value in self._allowed
-
-    def _is_valid(self, value):
-        return not self._validation_function or \
-                self._validation_function(self, value)
-
-class AttributesMap(object):
-    """AttributesMap is the base class for every object whose attributes 
-    are going to be manipulated by the end-user in a script or GUI.
-    """
-    def __init__(self):
-        super(AttributesMap, self).__init__()
-        self._attributes = dict()
-
-    @property
-    def attributes(self):
-        return self._attributes.values()
-
-    def get_attribute_list(self, filter_flags = None, exclude = False):
-        """
-        Returns the list of attributes.
-        
-        Params:
-            filter_flags: if given, only attributes with (all) the specified
-                flags will be returned.
-            
-            exclude: if True, only attributes without (any of) the specified
-                flags will be returned.
-        """
-        attributes = self._attributes
-        if filter_flags != None:
-            def filter_attrs(attr_data):
-                (attr_id, attr) = attr_data
-                return attr.has_flag(filter_flags) == (not exclude)
-            attributes = dict(filter(filter_attrs, attributes.iteritems()))
-        return attributes.keys()
-
-    def set_attribute_value(self, name, value):
-        self._attributes[name].value = value
-
-    def get_attribute_value(self, name):
-        return self._attributes[name].value
-
-    def get_attribute_help(self, name):
-        return self._attributes[name].help
-
-    def get_attribute_type(self, name):
-        return self._attributes[name].type
-
-    def get_attribute_range(self, name):
-        if not self._attributes[name].range:
-            return (None, None)
-        return self._attributes[name].range
-
-    def get_attribute_allowed(self, name):
-        return self._attributes[name].allowed
-
-    def get_attribute_category(self, name):
-        return self._attributes[name].category
-
-    def is_attribute_design_invisible(self, name):
-        return self._attributes[name].is_design_invisible
-
-    def is_attribute_design_read_only(self, name):
-        return self._attributes[name].is_design_read_only
-
-    def is_attribute_exec_invisible(self, name):
-        return self._attributes[name].is_exec_invisible
-
-    def is_attribute_exec_read_only(self, name):
-        return self._attributes[name].is_exec_read_only
-
-    def is_attribute_exec_immutable(self, name):
-        return self._attributes[name].is_exec_immutable
-
-    def is_attribute_metadata(self, name):
-        return self._attributes[name].is_metadata
-
-    def has_attribute_no_default_value(self, name):
-        return self._attributes[name].has_no_default_value
-
-    def is_attribute_modified(self, name):
-        return self._attributes[name].modified
-
-    def is_attribute_value_valid(self, name, value):
-        return self._attributes[name].is_valid_value(value)
-
-    def add_attribute(self, name, help, type, value = None, range = None,
-        allowed = None, flags = Attribute.NoFlags, validation_function = None,
-        category = None):
-        if name in self._attributes:
-            raise AttributeError("Attribute %s already exists" % name)
-        attribute = Attribute(name, help, type, value, range, allowed, flags,
-                validation_function, category)
-        self._attributes[name] = attribute
-
-    def del_attribute(self, name):
-        del self._attributes[name]
-
-    def has_attribute(self, name):
-        return name in self._attributes    
-    
-    def destroy(self):
-        self._attributes = dict()
-
diff --git a/src/nepi/core/connector.py b/src/nepi/core/connector.py
deleted file mode 100644 (file)
index 0d1888f..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-Common connector class
-"""
-
-import sys
-
-class ConnectorType(object):
-    def __init__(self, testbed_id, factory_id, name, help, max = -1, min = 0):
-        super(ConnectorType, self).__init__()
-
-        if max == -1:
-            max = sys.maxint
-        elif max <= 0:
-            raise RuntimeError, "The maximum number of connections allowed need to be more than 0"
-        if min < 0:
-            raise RuntimeError, "The minimum number of connections allowed needs to be at least 0"
-        # max -- maximum amount of connections that this type support, 
-        # -1 for no limit
-        self._max = max
-        # min -- minimum amount of connections required by this type of connector
-        self._min = min
-       
-        # connector_type_id -- univoquely identifies a connector type 
-        # across testbeds
-        self._connector_type_id = self.make_connector_type_id(
-            testbed_id, factory_id, name)
-        
-        # name -- display name for the connector type
-        self._name = name
-
-        # help -- help text
-        self._help = help
-
-        # from_connections -- connections where the other connector is the "From"
-        # to_connections -- connections where the other connector is the "To"
-        # keys in the dictionary correspond to the 
-        # connector_type_id for possible connections. The value is a tuple:
-        # (can_cross, connect)
-        # can_cross: indicates if the connection is allowed accros different
-        #    testbed instances
-        # code: is the connection function to be invoked when the elements
-        #    are connected
-        self._from_connections = dict()
-        self._to_connections = dict()
-
-    def __str__(self):
-        return "ConnectorType%r" % (self._connector_type_id,)
-
-    @property
-    def connector_type_id(self):
-        return self._connector_type_id
-
-    @property
-    def name(self):
-        return self._name
-
-    @property
-    def help(self):
-        return self._help
-
-    @property
-    def max(self):
-        return self._max
-
-    @property
-    def min(self):
-        return self._min
-    
-    @staticmethod
-    def make_connector_type_id(testbed_id, factory_id, name):
-        testbed_id = testbed_id.lower() if testbed_id else None
-        factory_id = factory_id.lower() if factory_id else None
-        name = name.lower() if name else None
-        return (testbed_id, factory_id, name)
-    
-    @staticmethod
-    def _type_resolution_order(connector_type_id):
-        testbed_id, factory_id, name = connector_type_id
-        
-        # the key is always a candidate
-        yield connector_type_id
-        
-        # Try wildcard combinations
-        if (testbed_id, None, name) != connector_type_id:
-            yield (testbed_id, None, name)
-        if (None, factory_id, name) != connector_type_id:
-            yield (None, factory_id, name)
-        if (None, None, name) != connector_type_id:
-            yield (None, None, name)
-
-    def add_from_connection(self, testbed_id, factory_id, name, can_cross, 
-            init_code, compl_code):
-        type_id = self.make_connector_type_id(testbed_id, factory_id, name)
-        self._from_connections[type_id] = (can_cross, init_code, compl_code)
-
-    def add_to_connection(self, testbed_id, factory_id, name, can_cross, 
-            init_code, compl_code):
-        type_id = self.make_connector_type_id(testbed_id, factory_id, name)
-        self._to_connections[type_id] = (can_cross, init_code, compl_code)
-
-    def connect_to_init_code(self, testbed_id, factory_id, name, must_cross):
-        return self._connect_to_code(testbed_id, factory_id, name, must_cross)[0]
-
-    def connect_to_compl_code(self, testbed_id, factory_id, name, must_cross):
-        return self._connect_to_code(testbed_id, factory_id, name, must_cross)[1]
-
-    def _connect_to_code(self, testbed_id, factory_id, name,
-            must_cross):
-        connector_type_id = self.make_connector_type_id(testbed_id, factory_id, name)
-        for lookup_type_id in self._type_resolution_order(connector_type_id):
-            if lookup_type_id in self._to_connections:
-                (can_cross, init_code, compl_code) = self._to_connections[lookup_type_id]
-                if must_cross == can_cross:
-                    return (init_code, compl_code)
-        else:
-            return (False, False)
-    def can_connect(self, testbed_id, factory_id, name, must_cross):
-        connector_type_id = self.make_connector_type_id(testbed_id, factory_id, name)
-        for lookup_type_id in self._type_resolution_order(connector_type_id):
-            if lookup_type_id in self._from_connections:
-                (can_cross, init_code, compl_code) = self._from_connections[lookup_type_id]
-            elif lookup_type_id in self._to_connections:
-                (can_cross, init_code, compl_code) = self._to_connections[lookup_type_id]
-            else:
-                # keep trying
-                continue
-            if must_cross == can_cross:
-                return True
-        else:
-            return False
-
diff --git a/src/nepi/core/design.py b/src/nepi/core/design.py
deleted file mode 100644 (file)
index 7ad5509..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-Experiment design API
-"""
-
-from nepi.core.attributes import AttributesMap, Attribute
-from nepi.core.metadata import Metadata
-from nepi.util import validation
-from nepi.util.guid import GuidGenerator
-from nepi.util.graphical_info import GraphicalInfo
-from nepi.util.parser._xml import XmlExperimentParser
-from nepi.util.tags import Taggable
-import sys
-
-class Connector(object):
-    """A Connector sepcifies the connection points in an Object"""
-    def __init__(self, box, connector_type):
-        super(Connector, self).__init__()
-        self._box = box
-        self._connector_type = connector_type
-        self._connections = list()
-
-    def __str__(self):
-        return "Connector(%s, %s)" % (self.box, self.connector_type)
-
-    @property
-    def box(self):
-        return self._box
-
-    @property
-    def connector_type(self):
-        return self._connector_type
-
-    @property
-    def connections(self):
-        return self._connections
-
-    def is_full(self):
-        """Return True if the connector has the maximum number of connections
-        """
-        return len(self.connections) == self.connector_type.max
-
-    def is_complete(self):
-        """Return True if the connector has the minimum number of connections
-        """
-        return len(self.connections) >= self.connector_type.min
-
-    def is_connected(self, connector):
-        return connector in self._connections
-
-    def connect(self, connector):
-        if not self.can_connect(connector) or not connector.can_connect(self):
-            raise RuntimeError("Could not connect. %s to %s" % (self, connector))
-        self._connections.append(connector)
-        connector._connections.append(self)
-
-    def get_connected_box(self, idx = 0):
-        if len(self._connections) == 0:
-            return None
-        return self._connections[idx].box
-
-    def disconnect(self, connector):
-        if connector not in self._connections or\
-                self not in connector._connections:
-                raise RuntimeError("Could not disconnect.")
-        self._connections.remove(connector)
-        connector._connections.remove(self)
-
-    def can_connect(self, connector):
-        # can't connect with self
-        if self.box.guid == connector.box.guid:
-            return False
-        if self.is_full() or connector.is_full():
-            return False
-        if self.is_connected(connector):
-            return False
-        (testbed_id, factory_id, name) = connector.connector_type.connector_type_id
-        testbed_guid1 = self.box.testbed_guid
-        testbed_guid2 = connector.box.testbed_guid
-        must_cross = (testbed_guid1 != testbed_guid2)
-        return self.connector_type.can_connect(testbed_id, factory_id, name,
-                must_cross)
-
-    def destroy(self):
-        for connector in self.connections:
-            self.disconnect(connector)
-        self._box = self._connectors = None
-
-class Trace(AttributesMap):
-    def __init__(self, name, help, enabled = False):
-        super(Trace, self).__init__()
-        self._name = name
-        self._help = help       
-        self._enabled = enabled
-    
-    @property
-    def name(self):
-        return self._name
-
-    @property
-    def help(self):
-        return self._help
-
-    @property
-    def enabled(self):
-        return self._enabled
-
-    def enable(self):
-        self._enabled = True
-
-    def disable(self):
-        self._enabled = False
-
-class Address(AttributesMap):
-    def __init__(self):
-        super(Address, self).__init__()
-        self.add_attribute(name = "Address",
-                help = "Address number", 
-                type = Attribute.STRING,
-                flags = Attribute.NoDefaultValue,
-                validation_function = validation.is_ip_address)
-        self.add_attribute(name = "NetPrefix",
-                help = "Network prefix for the address", 
-                type = Attribute.INTEGER, 
-                range = (0, 128),
-                value = 24,
-                flags = Attribute.NoDefaultValue,
-                validation_function = validation.is_integer)
-        self.add_attribute(name = "Broadcast",
-                help = "Broadcast address", 
-                type = Attribute.STRING,
-                validation_function = validation.is_ip4_address)
-                
-class Route(AttributesMap):
-    def __init__(self):
-        super(Route, self).__init__()
-        self.add_attribute(name = "Destination", 
-                help = "Network destintation",
-                type = Attribute.STRING, 
-                validation_function = validation.is_ref_address)
-        self.add_attribute(name = "NetPrefix",
-                help = "Network destination prefix", 
-                type = Attribute.INTEGER, 
-                range = (0, 128),
-                value = 24,
-                flags = Attribute.NoDefaultValue,
-                validation_function = validation.is_integer)
-        self.add_attribute(name = "NextHop",
-                help = "Address for the next hop", 
-                type = Attribute.STRING,
-                flags = Attribute.NoDefaultValue,
-                validation_function = validation.is_ref_address)
-        self.add_attribute(name = "Metric",
-                help = "Routing metric", 
-                type = Attribute.INTEGER,
-                value = 0,
-                flags = Attribute.NoDefaultValue,
-                validation_function = validation.is_integer)
-        self.add_attribute(name = "Device",
-                help = "Device name", 
-                type = Attribute.STRING,
-                value = None,
-                flags = Attribute.NoDefaultValue,
-                validation_function = validation.is_string)
-
-class Box(AttributesMap, Taggable):
-    def __init__(self, guid, factory, testbed_guid, container = None):
-        super(Box, self).__init__()
-        # guid -- global unique identifier
-        self._guid = guid
-        # factory_id -- factory identifier or name
-        self._factory_id = factory.factory_id
-        # testbed_guid -- parent testbed guid
-        self._testbed_guid = testbed_guid
-        # container -- boxes can be nested inside other 'container' boxes
-        self._container = container
-        # traces -- list of available traces for the box
-        self._traces = dict()
-        # connectors -- list of available connectors for the box
-        self._connectors = dict()
-        # factory_attributes -- factory attributes for box construction
-        self._factory_attributes = dict()
-        # graphical_info -- GUI position information
-        self.graphical_info = GraphicalInfo()
-
-        for connector_type in factory.connector_types:
-            connector = Connector(self, connector_type)
-            self._connectors[connector_type.name] = connector
-        for (name, help, enabled) in factory.traces:
-            trace = Trace(name, help, enabled)
-            self._traces[name] = trace
-        for tag_id in factory.tags:
-            self.add_tag(tag_id)
-        for attr in factory.box_attributes.attributes:
-            self.add_attribute(attr.name, attr.help, attr.type, attr.value, 
-                    attr.range, attr.allowed, attr.flags, 
-                    attr.validation_function, attr.category)
-        for attr in factory.attributes:
-            if attr.modified or attr.is_metadata:
-                self._factory_attributes[attr.name] = attr.value
-
-    def __str__(self):
-        return "Box(%s, %s, %s)" % (self.guid, self.factory_id, self.testbed_guid)
-
-    @property
-    def guid(self):
-        return self._guid
-
-    @property
-    def factory_id(self):
-        return self._factory_id
-
-    @property
-    def testbed_guid(self):
-        return self._testbed_guid
-
-    @property
-    def container(self):
-        return self._container
-
-    @property
-    def connectors(self):
-        return self._connectors.values()
-
-    @property
-    def traces(self):
-        return self._traces.values()
-
-    @property
-    def traces_list(self):
-        return self._traces.keys()
-
-    @property
-    def factory_attributes(self):
-        return self._factory_attributes
-
-    def trace_help(self, trace_id):
-        return self._traces[trace_id].help
-
-    def enable_trace(self, trace_id):
-        self._traces[trace_id].enable()
-
-    def disable_trace(self, trace_id):
-        self._traces[trace_id].disable()
-
-    def is_trace_enabled(self, trace_id):
-        return self._traces[trace_id].enabled
-
-    def connector(self, name):
-        return self._connectors[name]
-
-    def destroy(self):
-        super(Box, self).destroy()
-        for c in self.connectors:
-            c.destroy()         
-        for t in self.traces:
-            t.destroy()
-        self._connectors = self._traces = self._factory_attributes = None
-
-class FactoriesProvider(object):
-    def __init__(self, testbed_id):
-        super(FactoriesProvider, self).__init__()
-        self._testbed_id = testbed_id
-        self._factories = dict()
-
-        metadata = Metadata(testbed_id) 
-        for factory in metadata.build_factories():
-            self.add_factory(factory)
-
-        self._testbed_version = metadata.testbed_version
-
-    @property
-    def testbed_id(self):
-        return self._testbed_id
-
-    @property
-    def testbed_version(self):
-        return self._testbed_version
-
-    @property
-    def factories(self):
-        return self._factories.values()
-
-    def factory(self, factory_id):
-        return self._factories[factory_id]
-
-    def add_factory(self, factory):
-        self._factories[factory.factory_id] = factory
-
-    def remove_factory(self, factory_id):
-        del self._factories[factory_id]
-
-class TestbedDescription(AttributesMap):
-    def __init__(self, guid_generator, provider, guid = None):
-        super(TestbedDescription, self).__init__()
-        self._guid_generator = guid_generator
-        self._guid = guid_generator.next(guid)
-        self._provider = provider
-        self._boxes = dict()
-        self.graphical_info = GraphicalInfo()
-
-        metadata = Metadata(provider.testbed_id)
-        for attr in metadata.testbed_attributes().attributes:
-            self.add_attribute(attr.name, attr.help, attr.type, attr.value, 
-                    attr.range, attr.allowed, attr.flags, 
-                    attr.validation_function, attr.category)
-
-    @property
-    def guid(self):
-        return self._guid
-
-    @property
-    def provider(self):
-        return self._provider
-
-    @property
-    def boxes(self):
-        return self._boxes.values()
-
-    def box(self, guid):
-        return self._boxes[guid] if guid in self._boxes else None
-
-    def create(self, factory_id, guid = None):
-        guid = self._guid_generator.next(guid)
-        factory = self._provider.factory(factory_id)
-        box = factory.create(guid, self)
-        self._boxes[guid] = box
-        return box
-
-    def delete(self, guid):
-        box = self._boxes[guid]
-        del self._boxes[guid]
-        box.destroy()
-
-    def destroy(self):
-        for guid, box in self._boxes.iteritems():
-            box.destroy()
-        self._boxes = None
-
-class ExperimentDescription(object):
-    def __init__(self):
-        self._guid_generator = GuidGenerator()
-        self._testbed_descriptions = dict()
-
-    @property
-    def testbed_descriptions(self):
-        return self._testbed_descriptions.values()
-
-    def to_xml(self):
-        parser = XmlExperimentParser()
-        return parser.to_xml(self)
-
-    def from_xml(self, xml):
-        parser = XmlExperimentParser()
-        parser.from_xml(self, xml)
-
-    def testbed_description(self, guid):
-        return self._testbed_descriptions[guid] \
-                if guid in self._testbed_descriptions else None
-
-    def box(self, guid):
-        for testbed_description in self._testbed_descriptions.values():
-            box = testbed_description.box(guid)
-            if box: return box
-        return None
-
-    def get_element(self, guid):
-        if guid in self._testbed_descriptions:
-            return self._testbed_descriptions[guid]
-        for testbed_description in self._testbed_descriptions.values():
-            box = testbed_description.box(guid)
-            if box: return box
-        return None
-
-    def get_element_by_label(self, label):
-        for tbd_desc in self._testbed_descriptions.values():
-            l = tbd_desc.get_attribute_value("label")
-            if label == l:
-                return tbd_desc
-            for box in tbd_desc.boxes:
-                l = box.get_attribute_value("label")
-                if label == l:
-                    return box
-        return None
-    
-    def add_testbed_description(self, provider, guid = None):
-        testbed_description = TestbedDescription(self._guid_generator, 
-                provider, guid)
-        guid = testbed_description.guid
-        self._testbed_descriptions[guid] = testbed_description
-        return testbed_description
-
-    def remove_testbed_description(self, guid):
-        testbed_description = self._testbed_descriptions[guid]
-        del self._testbed_descriptions[guid]
-        testbed_description.destroy()
-
-    def destroy(self):
-        for testbed_description in self.testbed_descriptions:
-            testbed_description.destroy()
-
-
diff --git a/src/nepi/core/execute.py b/src/nepi/core/execute.py
deleted file mode 100644 (file)
index 5e0c5ea..0000000
+++ /dev/null
@@ -1,1180 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from nepi.core.attributes import Attribute, AttributesMap
-from nepi.util import validation
-from nepi.util.constants import ApplicationStatus as AS, TestbedStatus as TS, TIME_NOW, DeploymentConfiguration as DC
-from nepi.util.parser._xml import XmlExperimentParser
-import sys
-import re
-import threading
-import ConfigParser
-import os
-import collections
-import functools
-import time
-import logging
-logging.basicConfig()
-
-ATTRIBUTE_PATTERN_BASE = re.compile(r"\{#\[(?P<label>[-a-zA-Z0-9._]*)\](?P<expr>(?P<component>\.addr\[[0-9]+\]|\.route\[[0-9]+\]|\.trace\[[-a-zA-Z0-9._]+\])?.\[(?P<attribute>[-a-zA-Z0-9._]*)\])#}")
-ATTRIBUTE_PATTERN_GUID_SUB = r"{#[%(guid)s]%(expr)s#}"
-COMPONENT_PATTERN = re.compile(r"(?P<kind>[a-z]*)\[(?P<index>.*)\]")
-
-def _undefer(deferred):
-    if hasattr(deferred, '_get'):
-        return deferred._get()
-    else:
-        return deferred
-
-
-class TestbedController(object):
-    def __init__(self, testbed_id, testbed_version):
-        self._testbed_id = testbed_id
-        self._testbed_version = testbed_version
-
-    @property
-    def testbed_id(self):
-        return self._testbed_id
-
-    @property
-    def testbed_version(self):
-        return self._testbed_version
-
-    @property
-    def guids(self):
-        raise NotImplementedError
-
-    def defer_configure(self, name, value):
-        """Instructs setting a configuartion attribute for the testbed instance"""
-        raise NotImplementedError
-
-    def defer_create(self, guid, factory_id):
-        """Instructs creation of element """
-        raise NotImplementedError
-
-    def defer_create_set(self, guid, name, value):
-        """Instructs setting an initial attribute on an element"""
-        raise NotImplementedError
-
-    def defer_factory_set(self, guid, name, value):
-        """Instructs setting an attribute on a factory"""
-        raise NotImplementedError
-
-    def defer_connect(self, guid1, connector_type_name1, guid2, 
-            connector_type_name2): 
-        """Instructs creation of a connection between the given connectors"""
-        raise NotImplementedError
-
-    def defer_cross_connect(self, 
-            guid, connector_type_name,
-            cross_guid, cross_testbed_guid,
-            cross_testbed_id, cross_factory_id,
-            cross_connector_type_name):
-        """
-        Instructs creation of a connection between the given connectors 
-        of different testbed instances
-        """
-        raise NotImplementedError
-
-    def defer_add_trace(self, guid, trace_id):
-        """Instructs the addition of a trace"""
-        raise NotImplementedError
-
-    def defer_add_address(self, guid, address, netprefix, broadcast): 
-        """Instructs the addition of an address"""
-        raise NotImplementedError
-
-    def defer_add_route(self, guid, destination, netprefix, nexthop, 
-            metric = 0, device = None):
-        """Instructs the addition of a route"""
-        raise NotImplementedError
-
-    def do_setup(self):
-        """After do_setup the testbed initial configuration is done"""
-        raise NotImplementedError
-
-    def do_create(self):
-        """
-        After do_create all instructed elements are created and 
-        attributes setted
-        """
-        raise NotImplementedError
-
-    def do_connect_init(self):
-        """
-        After do_connect_init all internal connections between testbed elements
-        are initiated
-        """
-        raise NotImplementedError
-
-    def do_connect_compl(self):
-        """
-        After do_connect all internal connections between testbed elements
-        are completed
-        """
-        raise NotImplementedError
-
-    def do_preconfigure(self):
-        """
-        Done just before resolving netrefs, after connection, before cross connections,
-        useful for early stages of configuration, for setting up stuff that might be
-        required for netref resolution.
-        """
-        raise NotImplementedError
-
-    def do_configure(self):
-        """After do_configure elements are configured"""
-        raise NotImplementedError
-
-    def do_prestart(self):
-        """Before do_start elements are prestart-configured"""
-        raise NotImplementedError
-
-    def do_cross_connect_init(self, cross_data):
-        """
-        After do_cross_connect_init initiation of all external connections 
-        between different testbed elements is performed
-        """
-        raise NotImplementedError
-
-    def do_cross_connect_compl(self, cross_data):
-        """
-        After do_cross_connect_compl completion of all external connections 
-        between different testbed elements is performed
-        """
-        raise NotImplementedError
-
-    def start(self):
-        raise NotImplementedError
-
-    def stop(self):
-        raise NotImplementedError
-
-    def recover(self):
-        """
-        On testbed recovery (if recovery is a supported policy), the controller
-        instance will be re-created and the following sequence invoked:
-        
-            do_setup
-            defer_X - programming the testbed with persisted execution values
-                (not design values). Execution values (ExecImmutable attributes)
-                should be enough to recreate the testbed's state.
-            *recover*
-            <cross-connection methods>
-            
-        Start will not be called, and after cross connection invocations,
-        the testbed is supposed to be fully functional again.
-        """
-        raise NotImplementedError
-
-    def set(self, guid, name, value, time = TIME_NOW):
-        raise NotImplementedError
-
-    def get(self, guid, name, time = TIME_NOW):
-        raise NotImplementedError
-    
-    def get_route(self, guid, index, attribute):
-        """
-        Params:
-            
-            guid: guid of box to query
-            index: number of routing entry to fetch
-            attribute: one of Destination, NextHop, NetPrefix
-        """
-        raise NotImplementedError
-
-    def get_address(self, guid, index, attribute='Address'):
-        """
-        Params:
-            
-            guid: guid of box to query
-            index: number of inteface to select
-            attribute: one of Address, NetPrefix, Broadcast
-        """
-        raise NotImplementedError
-
-    def get_attribute_list(self, guid, filter_flags = None, exclude = False):
-        raise NotImplementedError
-
-    def get_factory_id(self, guid):
-        raise NotImplementedError
-
-    def action(self, time, guid, action):
-        raise NotImplementedError
-
-    def status(self, guid):
-        raise NotImplementedError
-    
-    def testbed_status(self):
-        raise NotImplementedError
-
-    def trace(self, guid, trace_id, attribute='value'):
-        raise NotImplementedError
-
-    def traces_info(self):
-        """ dictionary of dictionaries:
-            traces_info = dict({
-                guid = dict({
-                    trace_id = dict({
-                            host = host,
-                            filepath = filepath,
-                            filesize = size in bytes,
-                        })
-                })
-            })"""
-        raise NotImplementedError
-
-    def shutdown(self):
-        raise NotImplementedError
-
-class ExperimentController(object):
-    def __init__(self, experiment_xml, root_dir):
-        self._experiment_design_xml = experiment_xml
-        self._experiment_execute_xml = None
-        self._testbeds = dict()
-        self._deployment_config = dict()
-        self._netrefs = collections.defaultdict(set)
-        self._testbed_netrefs = collections.defaultdict(set)
-        self._cross_data = dict()
-        self._root_dir = root_dir
-        self._netreffed_testbeds = set()
-        self._guids_in_testbed_cache = dict()
-        self._failed_testbeds = set()
-        self._started_time = None
-        self._stopped_time = None
-        self._testbed_order = []
-      
-        self._logger = logging.getLogger('nepi.core.execute')
-        level = logging.ERROR
-        if os.environ.get("NEPI_CONTROLLER_LOGLEVEL", 
-                DC.ERROR_LEVEL) == DC.DEBUG_LEVEL:
-            level = logging.DEBUG
-        self._logger.setLevel(level)
-        if experiment_xml is None and root_dir is not None:
-            # Recover
-            self.load_experiment_xml()
-            self.load_execute_xml()
-        else:
-            self.persist_experiment_xml()
-
-    @property
-    def experiment_design_xml(self):
-        return self._experiment_design_xml
-
-    @property
-    def experiment_execute_xml(self):
-        return self._experiment_execute_xml
-
-    @property
-    def started_time(self):
-        return self._started_time
-
-    @property
-    def stopped_time(self):
-        return self._stopped_time
-
-    @property
-    def guids(self):
-        guids = list()
-        for testbed_guid in self._testbeds.keys():
-            _guids = self._guids_in_testbed(testbed_guid)
-            if _guids:
-                guids.extend(_guids)
-        return guids
-
-    def persist_experiment_xml(self):
-        xml_path = os.path.join(self._root_dir, "experiment-design.xml")
-        f = open(xml_path, "w")
-        f.write(self._experiment_design_xml)
-        f.close()
-
-    def persist_execute_xml(self):
-        xml_path = os.path.join(self._root_dir, "experiment-execute.xml")
-        f = open(xml_path, "w")
-        f.write(self._experiment_execute_xml)
-        f.close()
-
-    def load_experiment_xml(self):
-        xml_path = os.path.join(self._root_dir, "experiment-design.xml")
-        f = open(xml_path, "r")
-        self._experiment_design_xml = f.read()
-        f.close()
-
-    def load_execute_xml(self):
-        xml_path = os.path.join(self._root_dir, "experiment-execute.xml")
-        f = open(xml_path, "r")
-        self._experiment_execute_xml = f.read()
-        f.close()
-
-    def trace(self, guid, trace_id, attribute='value'):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.trace(guid, trace_id, attribute)
-        raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def traces_info(self):
-        traces_info = dict()
-        for guid, testbed in self._testbeds.iteritems():
-            tinfo = testbed.traces_info()
-            if tinfo:
-                traces_info[guid] = testbed.traces_info()
-        return traces_info
-
-    @staticmethod
-    def _parallel(callables):
-        excs = []
-        def wrap(callable):
-            def wrapped(*p, **kw):
-                try:
-                    callable(*p, **kw)
-                except:
-                    logging.exception("Exception occurred in asynchronous thread:")
-                    excs.append(sys.exc_info())
-            try:
-                wrapped = functools.wraps(callable)(wrapped)
-            except:
-                # functools.partial not wrappable
-                pass
-            return wrapped
-        threads = [ threading.Thread(target=wrap(callable)) for callable in callables ]
-        for thread in threads:
-            thread.start()
-        for thread in threads:
-            thread.join()
-        for exc in excs:
-            eTyp, eVal, eLoc = exc
-            raise eTyp, eVal, eLoc
-
-    def start(self):
-        self._started_time = time.time() 
-        self._start()
-
-    def _start(self, recover = False):
-        parser = XmlExperimentParser()
-        
-        if recover:
-            xml = self._experiment_execute_xml
-        else:
-            xml = self._experiment_design_xml
-        data = parser.from_xml_to_data(xml)
-
-        # instantiate testbed controllers
-        to_recover, to_restart = self._init_testbed_controllers(data, recover)
-        all_restart = set(to_restart)
-        
-        if not recover:
-            # persist testbed connection data, for potential recovery
-            self._persist_testbed_proxies()
-        else:
-            # recover recoverable controllers
-            for guid in to_recover:
-                try:
-                    self._testbeds[guid].do_setup()
-                    self._testbeds[guid].recover()
-                except:
-                    self._logger.exception("During recovery of testbed %s", guid)
-                    
-                    # Mark failed
-                    self._failed_testbeds.add(guid)
-    
-        def steps_to_configure(self, allowed_guids):
-            # perform setup in parallel for all test beds,
-            # wait for all threads to finish
-
-            self._logger.debug("ExperimentController: Starting parallel do_setup")
-            self._parallel([testbed.do_setup 
-                            for guid,testbed in self._testbeds.iteritems()
-                            if guid in allowed_guids])
-       
-            # perform create-connect in parallel, wait
-            # (internal connections only)
-            self._logger.debug("ExperimentController: Starting parallel do_create")
-            self._parallel([testbed.do_create
-                            for guid,testbed in self._testbeds.iteritems()
-                            if guid in allowed_guids])
-
-            self._logger.debug("ExperimentController: Starting parallel do_connect_init")
-            self._parallel([testbed.do_connect_init
-                            for guid,testbed in self._testbeds.iteritems()
-                            if guid in allowed_guids])
-
-            self._logger.debug("ExperimentController: Starting parallel do_connect_fin")
-            self._parallel([testbed.do_connect_compl
-                            for guid,testbed in self._testbeds.iteritems()
-                            if guid in allowed_guids])
-
-            self._logger.debug("ExperimentController: Starting parallel do_preconfigure")
-            self._parallel([testbed.do_preconfigure
-                            for guid,testbed in self._testbeds.iteritems()
-                            if guid in allowed_guids])
-            self._clear_caches()
-            
-            # Store testbed order
-            self._testbed_order.append(allowed_guids)
-
-        steps_to_configure(self, to_restart)
-
-        if self._netreffed_testbeds:
-            self._logger.debug("ExperimentController: Resolving netreffed testbeds")
-            # initally resolve netrefs
-            self.do_netrefs(data, fail_if_undefined=False)
-            
-            # rinse and repeat, for netreffed testbeds
-            netreffed_testbeds = set(self._netreffed_testbeds)
-
-            to_recover, to_restart = self._init_testbed_controllers(data, recover)
-            all_restart.update(to_restart)
-            
-            if not recover:
-                # persist testbed connection data, for potential recovery
-                self._persist_testbed_proxies()
-            else:
-                # recover recoverable controllers
-                for guid in to_recover:
-                    try:
-                        self._testbeds[guid].do_setup()
-                        self._testbeds[guid].recover()
-                    except:
-                        self._logger.exception("During recovery of testbed %s", guid)
-
-                        # Mark failed
-                        self._failed_testbeds.add(guid)
-
-            # configure dependant testbeds
-            steps_to_configure(self, to_restart)
-        
-        all_restart = [ self._testbeds[guid] for guid in all_restart ]
-            
-        # final netref step, fail if anything's left unresolved
-        self._logger.debug("ExperimentController: Resolving do_netrefs")
-        self.do_netrefs(data, fail_if_undefined=False)
-       
-        # Only now, that netref dependencies have been solve, it is safe to
-        # program cross_connections
-        self._logger.debug("ExperimentController: Programming testbed cross-connections")
-        self._program_testbed_cross_connections(data)
-        # perform do_configure in parallel for al testbeds
-        # (it's internal configuration for each)
-        self._logger.debug("ExperimentController: Starting parallel do_configure")
-        self._parallel([testbed.do_configure
-                        for testbed in all_restart])
-
-        self._clear_caches()
-
-        #print >>sys.stderr, "DO IT"
-        #import time
-        #time.sleep(60)
-        
-        # cross-connect (cannot be done in parallel)
-        self._logger.debug("ExperimentController: Starting cross-connect")
-        for guid, testbed in self._testbeds.iteritems():
-            cross_data = self._get_cross_data(guid)
-            testbed.do_cross_connect_init(cross_data)
-        for guid, testbed in self._testbeds.iteritems():
-            cross_data = self._get_cross_data(guid)
-            testbed.do_cross_connect_compl(cross_data)
-       
-        self._clear_caches()
-
-        # Last chance to configure (parallel on all testbeds)
-        self._logger.debug("ExperimentController: Starting parallel do_prestart")
-        self._parallel([testbed.do_prestart
-                        for testbed in all_restart])
-
-        # final netref step, fail if anything's left unresolved
-        self.do_netrefs(data, fail_if_undefined=True)
-        self._clear_caches()
-        
-        if not recover:
-            # update execution xml with execution-specific values
-            # TODO: BUG! BUggy code! cant stand all serializing all attribute values (ej: tun_key which is non ascci)"
-            self._update_execute_xml()
-            self.persist_execute_xml()
-
-        # start experiment (parallel start on all testbeds)
-        self._logger.debug("ExperimentController: Starting parallel do_start")
-        self._parallel([testbed.start
-                        for testbed in all_restart])
-
-        self._clear_caches()
-
-    def _clear_caches(self):
-        # Cleaning cache for safety.
-        self._guids_in_testbed_cache = dict()
-
-    def _persist_testbed_proxies(self):
-        TRANSIENT = (DC.RECOVER,)
-        
-        # persist access configuration for all testbeds, so that
-        # recovery mode can reconnect to them if it becomes necessary
-        conf = ConfigParser.RawConfigParser()
-        for testbed_guid, testbed_config in self._deployment_config.iteritems():
-            testbed_guid = str(testbed_guid)
-            conf.add_section(testbed_guid)
-            for attr in testbed_config.get_attribute_list():
-                if attr not in TRANSIENT:
-                    value = testbed_config.get_attribute_value(attr)
-                    if value is not None:
-                        conf.set(testbed_guid, attr, value)
-        
-        f = open(os.path.join(self._root_dir, 'deployment_config.ini'), 'w')
-        conf.write(f)
-        f.close()
-    
-    def _load_testbed_proxies(self):
-        TYPEMAP = {
-            Attribute.STRING : 'get',
-            Attribute.BOOL : 'getboolean',
-            Attribute.ENUM : 'get',
-            Attribute.DOUBLE : 'getfloat',
-            Attribute.INTEGER : 'getint',
-        }
-        
-        TRANSIENT = (DC.RECOVER,)
-        
-        # deferred import because proxy needs
-        # our class definitions to define proxies
-        import nepi.util.proxy as proxy
-        
-        conf = ConfigParser.RawConfigParser()
-        conf.read(os.path.join(self._root_dir, 'deployment_config.ini'))
-        for testbed_guid in conf.sections():
-            testbed_config = proxy.AccessConfiguration()
-            testbed_guid = str(testbed_guid)
-            for attr in testbed_config.get_attribute_list():
-                if attr not in TRANSIENT:
-                    getter = getattr(conf, TYPEMAP.get(
-                        testbed_config.get_attribute_type(attr),
-                        'get') )
-                    try:
-                        value = getter(testbed_guid, attr)
-                        testbed_config.set_attribute_value(attr, value)
-                    except ConfigParser.NoOptionError:
-                        # Leave default
-                        pass
-    
-    def _unpersist_testbed_proxies(self):
-        try:
-            os.remove(os.path.join(self._root_dir, 'deployment_config.ini'))
-        except:
-            # Just print exceptions, this is just cleanup
-            self._logger.exception("Loading testbed configuration")
-
-    def _update_execute_xml(self):
-        # For all testbeds,
-        #   For all elements in testbed,
-        #       - gather immutable execute-readable attribuets lists
-        #         asynchronously
-        # Generate new design description from design xml
-        # (Wait for attributes lists - implicit syncpoint)
-        # For all testbeds,
-        #   For all elements in testbed,
-        #       - gather all immutable execute-readable attribute
-        #         values, asynchronously
-        # (Wait for attribute values - implicit syncpoint)
-        # For all testbeds,
-        #   For all elements in testbed,
-        #       - inject non-None values into new design
-        # Generate execute xml from new design
-
-        attribute_lists = dict(
-            (testbed_guid, collections.defaultdict(dict))
-            for testbed_guid in self._testbeds
-        )
-        
-        for testbed_guid, testbed in self._testbeds.iteritems():
-            guids = self._guids_in_testbed(testbed_guid)
-            for guid in guids:
-                attribute_lists[testbed_guid][guid] = \
-                    testbed.get_attribute_list_deferred(guid, Attribute.ExecImmutable)
-        
-        parser = XmlExperimentParser()
-        execute_data = parser.from_xml_to_data(self._experiment_design_xml)
-
-        attribute_values = dict(
-            (testbed_guid, collections.defaultdict(dict))
-            for testbed_guid in self._testbeds
-        )
-        
-        for testbed_guid, testbed_attribute_lists in attribute_lists.iteritems():
-            testbed = self._testbeds[testbed_guid]
-            for guid, attribute_list in testbed_attribute_lists.iteritems():
-                attribute_list = _undefer(attribute_list)
-                attribute_values[testbed_guid][guid] = dict(
-                    (attribute, testbed.get_deferred(guid, attribute))
-                    for attribute in attribute_list
-                )
-        
-        for testbed_guid, testbed_attribute_values in attribute_values.iteritems():
-            for guid, attribute_values in testbed_attribute_values.iteritems():
-                for attribute, value in attribute_values.iteritems():
-                    value = _undefer(value)
-                    if value is not None:
-                        execute_data.add_attribute_data(guid, attribute, value)
-        
-        self._experiment_execute_xml = parser.to_xml(data=execute_data)
-
-    def stop(self):
-       for testbed in self._testbeds.values():
-           testbed.stop()
-       self._unpersist_testbed_proxies()
-       self._stopped_time = time.time() 
-   
-    def recover(self):
-        # reload perviously persisted testbed access configurations
-        self._failed_testbeds.clear()
-        self._load_testbed_proxies()
-
-        # re-program testbeds that need recovery
-        self._start(recover = True)
-
-    def is_finished(self, guid):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.status(guid) == AS.STATUS_FINISHED
-        raise RuntimeError("No element exists with guid %d" % guid)    
-    
-    def _testbed_recovery_policy(self, guid, data = None):
-        if data is None:
-            parser = XmlExperimentParser()
-            data = parser.from_xml_to_data(self._experiment_design_xml)
-        
-        return data.get_attribute_data(guid, DC.RECOVERY_POLICY)
-
-    def status(self, guid):
-        if guid in self._testbeds:
-            # guid is a testbed
-            # report testbed status
-            if guid in self._failed_testbeds:
-                return TS.STATUS_FAILED
-            else:
-                try:
-                    return self._testbeds[guid].status()
-                except:
-                    return TS.STATUS_UNRESPONSIVE
-        else:
-            # guid is an element
-            testbed = self._testbed_for_guid(guid)
-            if testbed is not None:
-                return testbed.status(guid)
-            else:
-                return AS.STATUS_UNDETERMINED
-
-    def set(self, guid, name, value, time = TIME_NOW):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            testbed.set(guid, name, value, time)
-        else:
-            raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def get(self, guid, name, time = TIME_NOW):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.get(guid, name, time)
-        raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def get_deferred(self, guid, name, time = TIME_NOW):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.get_deferred(guid, name, time)
-        raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def get_factory_id(self, guid):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.get_factory_id(guid)
-        raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def get_testbed_id(self, guid):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.testbed_id
-        raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def get_testbed_version(self, guid):
-        testbed = self._testbed_for_guid(guid)
-        if testbed != None:
-            return testbed.testbed_version
-        raise RuntimeError("No element exists with guid %d" % guid)    
-
-    def shutdown(self):
-        exceptions = list()
-        ordered_testbeds = set()
-
-        def shutdown_testbed(guid):
-            try:
-                testbed = self._testbeds[guid]
-                ordered_testbeds.add(guid)
-                testbed.shutdown()
-            except:
-                exceptions.append(sys.exc_info())
-                
-        self._logger.debug("ExperimentController: Starting parallel shutdown")
-        
-        for testbed_guids in reversed(self._testbed_order):
-            testbed_guids = set(testbed_guids) - ordered_testbeds
-            self._logger.debug("ExperimentController: Shutting down %r", testbed_guids)
-            self._parallel([functools.partial(shutdown_testbed, guid)
-                            for guid in testbed_guids])
-        remaining_guids = set(self._testbeds) - ordered_testbeds
-        if remaining_guids:
-            self._logger.debug("ExperimentController: Shutted down %r", ordered_testbeds)
-            self._logger.debug("ExperimentController: Shutting down %r", remaining_guids)
-            self._parallel([functools.partial(shutdown_testbed, guid)
-                            for guid in remaining_guids])
-            
-        for exc_info in exceptions:
-            raise exc_info[0], exc_info[1], exc_info[2]
-
-    def _testbed_for_guid(self, guid):
-        for testbed_guid in self._testbeds.keys():
-            if guid in self._guids_in_testbed(testbed_guid):
-                if testbed_guid in self._failed_testbeds:
-                    return None
-                return self._testbeds[testbed_guid]
-        return None
-
-    def _guids_in_testbed(self, testbed_guid):
-        if testbed_guid not in self._testbeds:
-            return set()
-        if testbed_guid not in self._guids_in_testbed_cache:
-            self._guids_in_testbed_cache[testbed_guid] = \
-                set(self._testbeds[testbed_guid].guids)
-        return self._guids_in_testbed_cache[testbed_guid]
-
-    @staticmethod
-    def _netref_component_split(component):
-        match = COMPONENT_PATTERN.match(component)
-        if match:
-            return match.group("kind"), match.group("index")
-        else:
-            return component, None
-
-    _NETREF_COMPONENT_GETTERS = {
-        'addr':
-            lambda testbed, guid, index, name: 
-                testbed.get_address(guid, int(index), name),
-        'route' :
-            lambda testbed, guid, index, name: 
-                testbed.get_route(guid, int(index), name),
-        'trace' :
-            lambda testbed, guid, index, name: 
-                testbed.trace(guid, index, attribute = name),
-        '' : 
-            lambda testbed, guid, index, name: 
-                testbed.get(guid, name),
-    }
-    
-    def resolve_netref_value(self, value, failval = None):
-        rv = failval
-        while True:
-            for match in ATTRIBUTE_PATTERN_BASE.finditer(value):
-                label = match.group("label")
-                if label.startswith('GUID-'):
-                    ref_guid = int(label[5:])
-                    if ref_guid:
-                        expr = match.group("expr")
-                        component = (match.group("component") or "")[1:] # skip the dot
-                        attribute = match.group("attribute")
-                        
-                        # split compound components into component kind and index
-                        # eg: 'addr[0]' -> ('addr', '0')
-                        component, component_index = self._netref_component_split(component)
-
-                        # find object and resolve expression
-                        for ref_testbed_guid, ref_testbed in self._testbeds.iteritems():
-                            if component not in self._NETREF_COMPONENT_GETTERS:
-                                raise ValueError, "Malformed netref: %r - unknown component" % (expr,)
-                            elif ref_guid not in self._guids_in_testbed(ref_testbed_guid):
-                                pass
-                            else:
-                                ref_value = self._NETREF_COMPONENT_GETTERS[component](
-                                    ref_testbed, ref_guid, component_index, attribute)
-                                if ref_value:
-                                    value = rv = value.replace(match.group(), ref_value)
-                                    break
-                        else:
-                            # unresolvable netref
-                            return failval
-                        break
-            else:
-                break
-        return rv
-    
-    def do_netrefs(self, data, fail_if_undefined = False):
-        # element netrefs
-        for (testbed_guid, guid), attrs in self._netrefs.items():
-            testbed = self._testbeds.get(testbed_guid)
-            if testbed is not None:
-                for name in set(attrs):
-                    value = testbed.get(guid, name)
-                    if isinstance(value, basestring):
-                        ref_value = self.resolve_netref_value(value)
-                        if ref_value is not None:
-                            testbed.set(guid, name, ref_value)
-                            attrs.remove(name)
-                        elif fail_if_undefined:
-                            raise ValueError, "Unresolvable netref in: %r=%r" % (name,value,)
-                if not attrs:
-                    del self._netrefs[(testbed_guid, guid)]
-        
-        # testbed netrefs
-        for testbed_guid, attrs in self._testbed_netrefs.items():
-            tb_data = dict(data.get_attribute_data(testbed_guid))
-            if data:
-                for name in set(attrs):
-                    value = tb_data.get(name)
-                    if isinstance(value, basestring):
-                        ref_value = self.resolve_netref_value(value)
-                        if ref_value is not None:
-                            data.set_attribute_data(testbed_guid, name, ref_value)
-                            attrs.remove(name)
-                        elif fail_if_undefined:
-                            raise ValueError, "Unresolvable netref in: %r" % (value,)
-                if not attrs:
-                    del self._testbed_netrefs[testbed_guid]
-        
-
-    def _init_testbed_controllers(self, data, recover = False):
-        blacklist_testbeds = set(self._testbeds)
-        element_guids = list()
-        label_guids = dict()
-        data_guids = data.guids
-        to_recover = set()
-        to_restart = set()
-
-        # gather label associations
-        for guid in data_guids:
-            if not data.is_testbed_data(guid):
-                (testbed_guid, factory_id) = data.get_box_data(guid)
-                label = data.get_attribute_data(guid, "label")
-                if label is not None:
-                    if label in label_guids:
-                        raise RuntimeError, "Label %r is not unique" % (label,)
-                    label_guids[label] = guid
-
-        # create testbed controllers
-        for guid in data_guids:
-            if data.is_testbed_data(guid):
-                if guid not in self._testbeds:
-                    try:
-                        self._create_testbed_controller(
-                            guid, data, element_guids, recover)
-                        if recover:
-                            # Already programmed
-                            blacklist_testbeds.add(guid)
-                        else:
-                            to_restart.add(guid)
-                    except:
-                        if recover:
-                            policy = self._testbed_recovery_policy(guid, data=data)
-                            if policy == DC.POLICY_RECOVER:
-                                self._create_testbed_controller(
-                                    guid, data, element_guids, False)
-                                to_recover.add(guid)
-                            elif policy == DC.POLICY_RESTART:
-                                self._create_testbed_controller(
-                                    guid, data, element_guids, False)
-                                to_restart.add(guid)
-                            else:
-                                # Mark failed
-                                self._failed_testbeds.add(guid)
-                        else:
-                            raise
-        
-        # queue programmable elements
-        #  - that have not been programmed already (blacklist_testbeds)
-        #  - including recovered or restarted testbeds
-        #  - but those that have no unresolved netrefs
-        for guid in data_guids:
-            if not data.is_testbed_data(guid):
-                (testbed_guid, factory_id) = data.get_box_data(guid)
-                if testbed_guid not in blacklist_testbeds:
-                    element_guids.append(guid)
-
-        # replace references to elements labels for its guid
-        self._resolve_labels(data, data_guids, label_guids)
-    
-        # program testbed controllers
-        if element_guids:
-            self._program_testbed_controllers(element_guids, data)
-        
-        return to_recover, to_restart
-
-    def _resolve_labels(self, data, data_guids, label_guids):
-        netrefs = self._netrefs
-        testbed_netrefs = self._testbed_netrefs
-        for guid in data_guids:
-            for name, value in data.get_attribute_data(guid):
-                if isinstance(value, basestring):
-                    while True:
-                        for match in ATTRIBUTE_PATTERN_BASE.finditer(value):
-                            label = match.group("label")
-                            if not label.startswith('GUID-'):
-                                ref_guid = label_guids.get(label)
-                                if ref_guid is not None:
-                                    value = value.replace(
-                                        match.group(),
-                                        ATTRIBUTE_PATTERN_GUID_SUB % dict(
-                                            guid = 'GUID-%d' % (ref_guid,),
-                                            expr = match.group("expr"),
-                                            label = label)
-                                    )
-                                    data.set_attribute_data(guid, name, value)
-                                    
-                                    # memorize which guid-attribute pairs require
-                                    # postprocessing, to avoid excessive controller-testbed
-                                    # communication at configuration time
-                                    # (which could require high-latency network I/O)
-                                    if not data.is_testbed_data(guid):
-                                        (testbed_guid, factory_id) = data.get_box_data(guid)
-                                        netrefs[(testbed_guid, guid)].add(name)
-                                    else:
-                                        testbed_netrefs[guid].add(name)
-                                    
-                                    break
-                        else:
-                            break
-
-    def _create_testbed_controller(self, guid, data, element_guids, recover):
-        (testbed_id, testbed_version) = data.get_testbed_data(guid)
-        deployment_config = self._deployment_config.get(guid)
-        
-        # deferred import because proxy needs
-        # our class definitions to define proxies
-        import nepi.util.proxy as proxy
-        
-        if deployment_config is None:
-            # need to create one
-            deployment_config = proxy.AccessConfiguration()
-            
-            for (name, value) in data.get_attribute_data(guid):
-                if value is not None and deployment_config.has_attribute(name):
-                    # if any deployment config attribute has a netref, we can't
-                    # create this controller yet
-                    if isinstance(value, basestring) and ATTRIBUTE_PATTERN_BASE.search(value):
-                        # remember to re-issue this one
-                        self._netreffed_testbeds.add(guid)
-                        return
-                    
-                    # copy deployment config attribute
-                    deployment_config.set_attribute_value(name, value)
-            
-            # commit config
-            self._deployment_config[guid] = deployment_config
-        
-        if deployment_config is not None:
-            # force recovery mode 
-            deployment_config.set_attribute_value("recover",recover)
-        
-        testbed = proxy.create_testbed_controller(testbed_id, testbed_version,
-                deployment_config)
-        for (name, value) in data.get_attribute_data(guid):
-            testbed.defer_configure(name, value)
-        self._testbeds[guid] = testbed
-        if guid in self._netreffed_testbeds:
-            self._netreffed_testbeds.remove(guid)
-
-    def _program_testbed_controllers(self, element_guids, data):
-        def resolve_create_netref(data, guid, name, value): 
-            # Try to resolve create-time netrefs, if possible
-            if isinstance(value, basestring) and ATTRIBUTE_PATTERN_BASE.search(value):
-                try:
-                    nuvalue = self.resolve_netref_value(value)
-                except:
-                    # Any trouble means we're not in shape to resolve the netref yet
-                    nuvalue = None
-                if nuvalue is not None:
-                    # Only if we succeed we remove the netref deferral entry
-                    value = nuvalue
-                    data.set_attribute_data(guid, name, value)
-                    if (testbed_guid, guid) in self._netrefs:
-                        self._netrefs[(testbed_guid, guid)].discard(name)
-            return value
-
-        for guid in element_guids:
-            (testbed_guid, factory_id) = data.get_box_data(guid)
-            testbed = self._testbeds.get(testbed_guid)
-            if testbed is not None:
-                # create
-                testbed.defer_create(guid, factory_id)
-                # set attributes
-                for (name, value) in data.get_attribute_data(guid):
-                    value = resolve_create_netref(data, guid, name, value)
-                    testbed.defer_create_set(guid, name, value)
-
-        for guid in element_guids:
-            (testbed_guid, factory_id) = data.get_box_data(guid)
-            testbed = self._testbeds.get(testbed_guid)
-            if testbed is not None:
-                # traces
-                for trace_id in data.get_trace_data(guid):
-                    testbed.defer_add_trace(guid, trace_id)
-                # addresses
-                for (address, netprefix, broadcast) in data.get_address_data(guid):
-                    if address != None:
-                        testbed.defer_add_address(guid, address, netprefix, 
-                                broadcast)
-                # routes
-                for (destination, netprefix, nexthop, metric, device) in \
-                        data.get_route_data(guid):
-                    testbed.defer_add_route(guid, destination, netprefix, nexthop, 
-                            metric, device)
-                # store connections data
-                for (connector_type_name, other_guid, other_connector_type_name) \
-                        in data.get_connection_data(guid):
-                    (other_testbed_guid, other_factory_id) = data.get_box_data(
-                            other_guid)
-                    if testbed_guid == other_testbed_guid:
-                        # each testbed should take care of enforcing internal
-                        # connection simmetry, so each connection is only
-                        # added in one direction
-                        testbed.defer_connect(guid, connector_type_name, 
-                                other_guid, other_connector_type_name)
-
-    def _program_testbed_cross_connections(self, data):
-        data_guids = data.guids
-        for guid in data_guids: 
-            if not data.is_testbed_data(guid):
-                (testbed_guid, factory_id) = data.get_box_data(guid)
-                testbed = self._testbeds.get(testbed_guid)
-                if testbed is not None:
-                    for (connector_type_name, cross_guid, cross_connector_type_name) \
-                            in data.get_connection_data(guid):
-                        (testbed_guid, factory_id) = data.get_box_data(guid)
-                        (cross_testbed_guid, cross_factory_id) = data.get_box_data(
-                                cross_guid)
-                        if testbed_guid != cross_testbed_guid:
-                            cross_testbed = self._testbeds[cross_testbed_guid]
-                            cross_testbed_id = cross_testbed.testbed_id
-                            testbed.defer_cross_connect(guid, connector_type_name, cross_guid, 
-                                    cross_testbed_guid, cross_testbed_id, cross_factory_id, 
-                                    cross_connector_type_name)
-                            # save cross data for later
-                            self._logger.debug("ExperimentController: adding cross_connection data tbd=%d:guid=%d - tbd=%d:guid=%d" % \
-                                    (testbed_guid, guid, cross_testbed_guid, cross_guid))
-                            self._add_crossdata(testbed_guid, guid, cross_testbed_guid,
-                                    cross_guid)
-
-    def _add_crossdata(self, testbed_guid, guid, cross_testbed_guid, cross_guid):
-        if testbed_guid not in self._cross_data:
-            self._cross_data[testbed_guid] = dict()
-        if cross_testbed_guid not in self._cross_data[testbed_guid]:
-            self._cross_data[testbed_guid][cross_testbed_guid] = set()
-        self._cross_data[testbed_guid][cross_testbed_guid].add(cross_guid)
-
-    def _get_cross_data(self, testbed_guid):
-        cross_data = dict()
-        if not testbed_guid in self._cross_data:
-            return cross_data
-
-        # fetch attribute lists in one batch
-        attribute_lists = dict()
-        for cross_testbed_guid, guid_list in \
-                self._cross_data[testbed_guid].iteritems():
-            cross_testbed = self._testbeds[cross_testbed_guid]
-            for cross_guid in guid_list:
-                attribute_lists[(cross_testbed_guid, cross_guid)] = \
-                    cross_testbed.get_attribute_list_deferred(cross_guid)
-
-        # fetch attribute values in another batch
-        for cross_testbed_guid, guid_list in \
-                self._cross_data[testbed_guid].iteritems():
-            cross_data[cross_testbed_guid] = dict()
-            cross_testbed = self._testbeds[cross_testbed_guid]
-            for cross_guid in guid_list:
-                elem_cross_data = dict(
-                    _guid = cross_guid,
-                    _testbed_guid = cross_testbed_guid,
-                    _testbed_id = cross_testbed.testbed_id,
-                    _testbed_version = cross_testbed.testbed_version)
-                cross_data[cross_testbed_guid][cross_guid] = elem_cross_data
-                attribute_list = attribute_lists[(cross_testbed_guid,cross_guid)]
-                for attr_name in attribute_list:
-                    attr_value = cross_testbed.get_deferred(cross_guid, attr_name)
-                    elem_cross_data[attr_name] = attr_value
-        
-        # undefer all values - we'll have to serialize them probably later
-        for cross_testbed_guid, testbed_cross_data in cross_data.iteritems():
-            for cross_guid, elem_cross_data in testbed_cross_data.iteritems():
-                for attr_name, attr_value in elem_cross_data.iteritems():
-                    elem_cross_data[attr_name] = _undefer(attr_value)
-        
-        return cross_data
-
-class ExperimentSuite(object):
-    def __init__(self, experiment_xml, access_config, repetitions = None,
-            duration = None, wait_guids = None):
-        self._experiment_xml = experiment_xml
-        self._access_config = access_config
-        self._controllers = dict()
-        self._access_configs = dict()
-        self._repetitions = 1 if not repetitions else repetitions
-        self._duration = duration
-        self._wait_guids = wait_guids
-        self._current = None
-        self._status = TS.STATUS_ZERO
-        self._thread = None
-
-    def current(self):
-        return self._current
-
-    def status(self):
-        return self._status
-
-    def is_finished(self):
-        return self._status == TS.STATUS_STOPPED
-
-    def get_access_configurations(self):
-        return self._access_configs.values()
-
-    def start(self):
-        self._status  = TS.STATUS_STARTED
-        self._thread = threading.Thread(target = self._run_experiment_suite)
-        self._thread.start()
-
-    def shutdown(self):
-        if self._thread:
-            self._thread.join()
-            self._thread = None
-        for controller in self._controllers.values():
-            controller.shutdown()
-
-    def get_current_access_config(self):
-        return self._access_configs[self._current]
-
-    def _run_experiment_suite(self):
-        for i in xrange(1, self._repetitions):
-            self._current = i
-            self._run_one_experiment()
-        self._status = TS.STATUS_STOPPED
-
-    def _run_one_experiment(self):
-        from nepi.util import proxy
-        access_config = proxy.AccessConfiguration()
-        for attr in self._access_config.attributes:
-            if attr.value:
-                access_config.set_attribute_value(attr.name, attr.value)
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        root_dir = "%s_%d" % (
-                access_config.get_attribute_value(DC.ROOT_DIRECTORY), 
-                self._current)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, root_dir)
-        controller = proxy.create_experiment_controller(self._experiment_xml,
-                access_config)
-        self._access_configs[self._current] = access_config
-        self._controllers[self._current] = controller
-        controller.start()
-        started_at = time.time()
-        # wait until all specified guids have finished execution
-        if self._wait_guids:
-            while all(itertools.imap(controller.is_finished, self._wait_guids)):
-                time.sleep(0.5)
-        # wait until the minimum experiment duration time has elapsed 
-        if self._duration:
-            while (time.time() - started_at) < self._duration:
-                time.sleep(0.5)
-        controller.stop()
-
diff --git a/src/nepi/core/factory.py b/src/nepi/core/factory.py
deleted file mode 100644 (file)
index 50243e4..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from nepi.core.attributes import AttributesMap, Attribute
-from nepi.util import tags
-from nepi.util.tags import Taggable
-
-class AddressableMixin(object):
-    def __init__(self, guid, factory, testbed_guid, container = None):
-        super(AddressableMixin, self).__init__(guid, factory, testbed_guid, 
-                container)
-        max_addr = self._factory_attributes["maxAddresses"]
-        self.set_attribute_value("maxAddresses", max_addr)
-        self._addresses = list()
-
-    @property
-    def addresses(self):
-        return self._addresses
-
-    @property
-    def max_addresses(self):
-        return self.get_attribute_value("maxAddresses")
-
-class UserAddressableMixin(AddressableMixin):
-    def __init__(self, guid, factory, testbed_guid, container = None):
-        super(UserAddressableMixin, self).__init__(guid, factory, testbed_guid, 
-                container)
-
-    def add_address(self):
-        if len(self._addresses) == self.max_addresses:
-            raise RuntimeError("Maximun number of addresses for this box reached.")
-        from nepi.core.design import Address
-        address = Address()
-        self._addresses.append(address)
-        return address
-
-    def delete_address(self, address):
-        self._addresses.remove(address)
-        del address
-
-    def destroy(self):
-        super(UserAddressableMixin, self).destroy()
-        for address in list(self.addresses):
-            self.delete_address(address)
-        self._addresses = None
-
-class RoutableMixin(object):
-    def __init__(self, guid, factory, testbed_guid, container = None):
-        super(RoutableMixin, self).__init__(guid, factory, testbed_guid, 
-            container)
-        self._routes = list()
-
-    @property
-    def routes(self):
-        return self._routes
-
-class UserRoutableMixin(RoutableMixin):
-    def __init__(self, guid, factory, testbed_guid, container = None):
-        super(UserRoutableMixin, self).__init__(guid, factory, testbed_guid, 
-            container)
-
-    def add_route(self):
-        from nepi.core.design import Route
-        route = Route()
-        self._routes.append(route)
-        return route
-
-    def delete_route(self, route):
-        self._routes.remove(route)
-        del route
-
-    def destroy(self):
-        super(UserRoutableMixin, self).destroy()
-        for route in list(self.routes):
-            self.delete_route(route)
-        self._route = None
-
-def MixIn(MyClass, MixIn):
-    # Mixins are installed BEFORE "Box" because
-    # Box inherits from non-cooperative classes,
-    # so the MRO chain gets broken when it gets
-    # to Box.
-
-    # Install mixin
-    MyClass.__bases__ = (MixIn,) + MyClass.__bases__
-    
-    # Add properties
-    # Somehow it doesn't work automatically
-    for name in dir(MixIn):
-        prop = getattr(MixIn,name,None)
-        if isinstance(prop, property):
-            setattr(MyClass, name, prop)
-    
-    # Update name
-    MyClass.__name__ = MyClass.__name__.replace(
-        'Box',
-        MixIn.__name__.replace('MixIn','')+'Box',
-        1)
-
-class Factory(AttributesMap, Taggable):
-    _box_class_cache = {}
-
-    def __init__(self, factory_id,
-            create_function, 
-            start_function, 
-            stop_function, 
-            status_function, 
-            configure_function, 
-            preconfigure_function,
-            prestart_function,
-            help = None,
-            category = None):
-
-        super(Factory, self).__init__()
-
-        self._factory_id = factory_id
-        self._create_function = create_function
-        self._start_function = start_function
-        self._stop_function = stop_function
-        self._status_function = status_function
-        self._configure_function = configure_function
-        self._preconfigure_function = preconfigure_function
-        self._prestart_function = prestart_function
-        self._help = help
-        self._category = category
-        self._connector_types = dict()
-        self._traces = dict()
-        self._box_attributes = AttributesMap()
-        self._factory = None
-
-    @property
-    def factory(self):
-        if self._factory:
-            return self._factory
-
-        from nepi.core.design import Box
-
-        if not self.has_addresses and not self.has_routes:
-            self._factory = Box
-        else:
-            addresses = 'w' if self.allow_addresses else ('r' if self.has_addresses else '-')
-            routes    = 'w' if self.allow_routes else ('r' if self.has_routes else '-')
-            key = addresses+routes
-            
-            if key in self._box_class_cache:
-                self._factory = self._box_class_cache[key]
-            else:
-                # Create base class
-                class _factory(Box):
-                    def __init__(self, guid, factory, testbed_guid, container = None):
-                        super(_factory, self).__init__(guid, factory, testbed_guid, container)
-                
-                # Add mixins, one by one
-                if self.allow_addresses:
-                    MixIn(_factory, UserAddressableMixin)
-                elif self.has_addresses:
-                    MixIn(_factory, AddressableMixin)
-                    
-                if self.allow_routes:
-                    MixIn(_factory, UserRoutableMixin)
-                elif self.has_routes:
-                    MixIn(_factory, RoutableMixin)
-                
-                # Put into cache
-                self._box_class_cache[key] = self._factory = _factory
-        return self._factory
-
-    @property
-    def factory_id(self):
-        return self._factory_id
-
-    @property
-    def allow_addresses(self):
-        return self.has_tag(tags.ALLOW_ADDRESSES)
-
-    @property
-    def allow_routes(self):
-        return self.has_tag(tags.ALLOW_ROUTES)
-
-    @property
-    def has_addresses(self):
-        return self.has_tag(tags.HAS_ADDRESSES) or \
-                self.allow_addresses
-
-    @property
-    def has_routes(self):
-        return self.has_tag(tags.HAS_ROUTES) or \
-                self.allow_routes
-
-    @property
-    def help(self):
-        return self._help
-
-    @property
-    def category(self):
-        return self._category
-
-    @property
-    def connector_types(self):
-        return self._connector_types.values()
-
-    @property
-    def traces(self):
-        return self._traces.values()
-
-    @property
-    def traces_list(self):
-        return self._traces.keys()
-
-    @property
-    def box_attributes(self):
-        return self._box_attributes
-
-    @property
-    def create_function(self):
-        return self._create_function
-
-    @property
-    def prestart_function(self):
-        return self._prestart_function
-
-    @property
-    def start_function(self):
-        return self._start_function
-
-    @property
-    def stop_function(self):
-        return self._stop_function
-
-    @property
-    def status_function(self):
-        return self._status_function
-
-    @property
-    def configure_function(self):
-        return self._configure_function
-
-    @property
-    def preconfigure_function(self):
-        return self._preconfigure_function
-
-    def connector_type(self, name):
-        return self._connector_types[name]
-
-    def add_connector_type(self, connector_type):
-        self._connector_types[connector_type.name] = connector_type
-
-    def add_trace(self, name, help, enabled = False):
-        self._traces[name] = (name, help, enabled)
-
-    def add_box_attribute(self, name, help, type, value = None, range = None,
-        allowed = None, flags = Attribute.NoFlags, validation_function = None,
-        category = None):
-        self._box_attributes.add_attribute(name, help, type, value, range,
-                allowed, flags, validation_function, category)
-
-    def create(self, guid, testbed_description):
-        return self.factory(guid, self, testbed_description.guid)
-
-    def destroy(self):
-        super(Factory, self).destroy()
-        self._connector_types = None
-
diff --git a/src/nepi/core/metadata.py b/src/nepi/core/metadata.py
deleted file mode 100644 (file)
index 0dbf39e..0000000
+++ /dev/null
@@ -1,719 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from nepi.core.attributes import Attribute, AttributesMap
-from nepi.core.connector import ConnectorType
-from nepi.core.factory import Factory
-import sys
-import getpass
-import nepi.util.environ
-from nepi.util import tags, validation
-from nepi.util.constants import ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP, \
-        DeploymentConfiguration as DC, \
-        AttributeCategories as AC
-
-class Parallel(object):
-    def __init__(self, factory, maxthreads = 64):
-        self.factory = factory
-        self.maxthreads = maxthreads
-
-class MetadataInfo(object):
-    @property
-    def connector_types(self):
-        """ dictionary of dictionaries with allowed connection information.
-            connector_id: dict({
-                "help": help text, 
-                "name": connector type name,
-                "max": maximum number of connections allowed (-1 for no limit),
-                "min": minimum number of connections allowed
-            }),
-        """
-        raise NotImplementedError
-
-    @property
-    def connections(self):
-        """ array of dictionaries with allowed connection information.
-        dict({
-            "from": (testbed_id1, factory_id1, connector_type_name1),
-            "to": (testbed_id2, factory_id2, connector_type_name2),
-            "init_code": connection function to invoke for connection initiation
-            "compl_code": connection function to invoke for connection 
-                completion
-            "can_cross": whether the connection can be done across testbed 
-                            instances
-         }),
-        """
-        raise NotImplementedError
-
-    @property
-    def attributes(self):
-        """ dictionary of dictionaries of all available attributes.
-            attribute_id: dict({
-                "name": attribute name,
-                "help": help text,
-                "type": attribute type, 
-                "value": default attribute value,
-                "range": (maximum, minimun) values else None if not defined,
-                "allowed": array of posible values,
-                "flags": attributes flags,
-                "validation_function": validation function for the attribute
-                "category": category for the attribute
-            })
-        """
-        raise NotImplementedError
-
-    @property
-    def traces(self):
-        """ dictionary of dictionaries of all available traces.
-            trace_id: dict({
-                "name": trace name,
-                "help": help text
-            })
-        """
-        raise NotImplementedError
-
-    @property
-    def create_order(self):
-        """ list of factory ids that indicates the order in which the elements
-        should be instantiated. If wrapped within a Parallel instance, they
-        will be instantiated in parallel.
-        """
-        raise NotImplementedError
-
-    @property
-    def configure_order(self):
-        """ list of factory ids that indicates the order in which the elements
-        should be configured. If wrapped within a Parallel instance, they
-        will be configured in parallel.
-        """
-        raise NotImplementedError
-
-    @property
-    def preconfigure_order(self):
-        """ list of factory ids that indicates the order in which the elements
-        should be preconfigured. If wrapped within a Parallel instance, they
-        will be configured in parallel.
-        
-        Default: same as configure_order
-        """
-        return self.configure_order
-
-    @property
-    def prestart_order(self):
-        """ list of factory ids that indicates the order in which the elements
-        should be prestart-configured. If wrapped within a Parallel instance, they
-        will be configured in parallel.
-        
-        Default: same as configure_order
-        """
-        return self.configure_order
-
-    @property
-    def start_order(self):
-        """ list of factory ids that indicates the order in which the elements
-        should be started. If wrapped within a Parallel instance, they
-        will be started in parallel.
-        
-        Default: same as configure_order
-        """
-        return self.configure_order
-
-    @property
-    def factories_info(self):
-        """ dictionary of dictionaries of factory specific information
-            factory_id: dict({
-                "help": help text,
-                "category": category the element belongs to,
-                "create_function": function for element instantiation,
-                "start_function": function for element starting,
-                "stop_function": function for element stoping,
-                "status_function": function for retrieving element status,
-                "preconfigure_function": function for element preconfiguration,
-                    (just after connections are made, 
-                    just before netrefs are resolved)
-                "configure_function": function for element configuration,
-                "prestart_function": function for pre-start
-                    element configuration (just before starting applications),
-                    useful for synchronization of background setup tasks or
-                    lazy instantiation or configuration of attributes
-                    that require connection/cross-connection state before
-                    being created.
-                    After this point, all applications should be able to run.
-                "factory_attributes": list of references to attribute_ids,
-                "box_attributes": list of regerences to attribute_ids,
-                "traces": list of references to trace_id
-                "tags": list of references to tag_id
-                "connector_types": list of references to connector_types
-           })
-        """
-        raise NotImplementedError
-
-    @property
-    def testbed_attributes(self):
-        """ dictionary of attributes for testbed instance configuration
-            attributes_id = dict({
-                "name": attribute name,
-                "help": help text,
-                "type": attribute type, 
-                "value": default attribute value,
-                "range": (maximum, minimun) values else None if not defined,
-                "allowed": array of posible values,
-                "flags": attributes flags,
-                "validation_function": validation function for the attribute
-                "category": category for the attribute
-             })
-            ]
-        """
-        raise NotImplementedError
-
-    @property
-    def testbed_id(self):
-        """ ID for the testbed """
-        raise NotImplementedError
-
-    @property
-    def testbed_version(self):
-        """ version for the testbed """
-        raise NotImplementedError
-
-class Metadata(object):
-    # These attributes should be added to all boxes
-    STANDARD_BOX_ATTRIBUTES = dict({
-        "label" : dict({
-            "name" : "label",
-            "validation_function" : validation.is_string,
-            "type" : Attribute.STRING,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "help" : "A unique identifier for referring to this box",
-        }),
-     })
-
-    # These are the attribute definitions for tagged attributes
-    STANDARD_TAGGED_ATTRIBUTES_DEFINITIONS = dict({
-        "maxAddresses" : dict({
-            "name" : "maxAddresses",
-            "validation_function" : validation.is_integer,
-            "type" : Attribute.INTEGER,
-            "value" : 1,
-            "flags" : Attribute.DesignReadOnly |\
-                    Attribute.ExecInvisible |\
-                    Attribute.Metadata,
-            "help" : "The maximum allowed number of addresses",
-            }),
-        })
-
-    # Attributes to be added to all boxes with specific tags
-    STANDARD_TAGGED_BOX_ATTRIBUTES = dict({
-        tags.ALLOW_ADDRESSES : ["maxAddresses"],
-        tags.HAS_ADDRESSES : ["maxAddresses"],
-    })
-
-    # These attributes should be added to all testbeds
-    STANDARD_TESTBED_ATTRIBUTES = dict({
-        "home_directory" : dict({
-            "name" : "homeDirectory",
-            "validation_function" : validation.is_string,
-            "help" : "Path to the directory where traces and other files will be stored",
-            "type" : Attribute.STRING,
-            "value" : "",
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            }),
-        "label" : dict({
-            "name" : "label",
-            "validation_function" : validation.is_string,
-            "type" : Attribute.STRING,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "help" : "A unique identifier for referring to this testbed",
-            }),
-        })
-    
-    # These attributes should be added to all testbeds
-    DEPLOYMENT_ATTRIBUTES = dict({
-        # TESTBED DEPLOYMENT ATTRIBUTES
-        DC.DEPLOYMENT_ENVIRONMENT_SETUP : dict({
-            "name" : DC.DEPLOYMENT_ENVIRONMENT_SETUP,
-            "validation_function" : validation.is_string,
-            "help" : "Shell commands to run before spawning TestbedController processes",
-            "type" : Attribute.STRING,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-        }),
-        DC.DEPLOYMENT_MODE: dict({
-            "name" : DC.DEPLOYMENT_MODE,
-            "help" : "Instance execution mode",
-            "type" : Attribute.ENUM,
-            "value" : DC.MODE_SINGLE_PROCESS,
-            "allowed" : [
-                    DC.MODE_DAEMON,
-                    DC.MODE_SINGLE_PROCESS
-                ],
-           "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_enum,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.DEPLOYMENT_COMMUNICATION : dict({
-            "name" : DC.DEPLOYMENT_COMMUNICATION,
-            "help" : "Instance communication mode",
-            "type" : Attribute.ENUM,
-            "value" : DC.ACCESS_LOCAL,
-            "allowed" : [
-                    DC.ACCESS_LOCAL,
-                    DC.ACCESS_SSH
-                ],
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_enum,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.DEPLOYMENT_HOST : dict({
-            "name" : DC.DEPLOYMENT_HOST,
-            "help" : "Host where the testbed will be executed",
-            "type" : Attribute.STRING,
-            "value" : "localhost",
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.DEPLOYMENT_USER : dict({
-            "name" : DC.DEPLOYMENT_USER,
-            "help" : "User on the Host to execute the testbed",
-            "type" : Attribute.STRING,
-            "value" : getpass.getuser(),
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.DEPLOYMENT_KEY : dict({
-            "name" : DC.DEPLOYMENT_KEY,
-            "help" : "Path to SSH key to use for connecting",
-            "type" : Attribute.STRING,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.DEPLOYMENT_PORT : dict({
-            "name" : DC.DEPLOYMENT_PORT,
-            "help" : "Port on the Host",
-            "type" : Attribute.INTEGER,
-            "value" : 22,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_integer,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.ROOT_DIRECTORY : dict({
-            "name" : DC.ROOT_DIRECTORY,
-            "help" : "Root directory for storing process files",
-            "type" : Attribute.STRING,
-            "value" : ".",
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string, # TODO: validation.is_path
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.USE_AGENT : dict({
-            "name" : DC.USE_AGENT,
-            "help" : "Use -A option for forwarding of the authentication agent, if ssh access is used", 
-            "type" : Attribute.BOOL,
-            "value" : False,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_bool,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.USE_SUDO : dict({
-            "name" : DC.USE_SUDO,
-            "help" : "Use sudo to run the deamon process. This option only take flace when the server runs in daemon mode.", 
-            "type" : Attribute.BOOL,
-            "value" : False,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_bool,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.CLEAN_ROOT : dict({
-            "name" : DC.CLEAN_ROOT,
-            "help" : "Clean server root directory (Warning: This will erase previous data).", 
-            "type" : Attribute.BOOL,
-            "value" : False,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_bool,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),        
-        DC.LOG_LEVEL : dict({
-            "name" : DC.LOG_LEVEL,
-            "help" : "Log level for instance",
-            "type" : Attribute.ENUM,
-            "value" : DC.ERROR_LEVEL,
-            "allowed" : [
-                    DC.ERROR_LEVEL,
-                    DC.DEBUG_LEVEL
-                ],
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_enum,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        DC.RECOVERY_POLICY : dict({
-            "name" : DC.RECOVERY_POLICY,
-            "help" : "Specifies what action to take in the event of a failure.", 
-            "type" : Attribute.ENUM,
-            "value" : DC.POLICY_FAIL,
-            "allowed" : [
-                    DC.POLICY_FAIL,
-                    DC.POLICY_RECOVER,
-                    DC.POLICY_RESTART,
-                ],
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_enum,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        })
-    PROXY_ATTRIBUTES = dict({
-        DC.RECOVER : dict({
-            "name" : DC.RECOVER,
-            "help" : "Do not intantiate testbeds, rather, reconnect to already-running instances. Used to recover from a dead controller.", 
-            "type" : Attribute.BOOL,
-            "value" : False,
-            "flags" : Attribute.ExecReadOnly |\
-                    Attribute.ExecImmutable |\
-                    Attribute.Metadata,
-            "validation_function" : validation.is_bool,
-            "category" : AC.CATEGORY_DEPLOYMENT,
-            }),
-        })
-    PROXY_ATTRIBUTES.update(DEPLOYMENT_ATTRIBUTES)
-  
-    # These attributes could appear in the boxes attribute list
-    STANDARD_BOX_ATTRIBUTE_DEFINITIONS = dict({
-        "tun_proto" : dict({
-            "name" : "tun_proto", 
-            "help" : "TUNneling protocol used",
-            "type" : Attribute.STRING,
-            "flags" : Attribute.DesignInvisible | \
-                    Attribute.ExecInvisible | \
-                    Attribute.ExecImmutable | \
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string,
-            }),
-        "tun_key" : dict({
-            "name" : "tun_key", 
-            "help" : "Randomly selected TUNneling protocol cryptographic key. "
-                     "Endpoints must agree to use the minimum (in lexicographic order) "
-                     "of both the remote and local sides.",
-            "type" : Attribute.STRING,
-            "flags" : Attribute.DesignInvisible | \
-                    Attribute.ExecInvisible | \
-                    Attribute.ExecImmutable | \
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string,
-            }),
-        "tun_addr" : dict({
-            "name": "tun_addr", 
-            "help" : "Address (IP, unix socket, whatever) of the tunnel endpoint",
-            "type" : Attribute.STRING,
-            "flags" : Attribute.DesignInvisible | \
-                    Attribute.ExecInvisible | \
-                    Attribute.ExecImmutable | \
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string,
-            }),
-        "tun_port" : dict({
-            "name" : "tun_port", 
-            "help" : "IP port of the tunnel endpoint",
-            "type" : Attribute.INTEGER,
-            "flags" : Attribute.DesignInvisible | \
-                    Attribute.ExecInvisible | \
-                    Attribute.ExecImmutable | \
-                    Attribute.Metadata,
-            "validation_function" : validation.is_integer,
-            }),
-        "tun_cipher" : dict({
-            "name" : "tun_cipher", 
-            "help" : "Cryptographic cipher used for tunnelling",
-            "type" : Attribute.ENUM,
-            "value" : "AES",
-            "allowed" : [
-                "AES",
-                "Blowfish",
-                "DES3",
-                "DES",
-                "PLAIN",
-            ],
-            "flags" : Attribute.ExecImmutable | \
-                      Attribute.Metadata,
-            "validation_function" : validation.is_enum,
-            }),
-        ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP : dict({
-            "name" : ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,
-            "help" : "Commands to set up the environment needed to run NEPI testbeds",
-            "type" : Attribute.STRING,
-            "flags" : Attribute.DesignInvisible | \
-                    Attribute.ExecInvisible | \
-                    Attribute.ExecImmutable | \
-                    Attribute.Metadata,
-            "validation_function" : validation.is_string
-            }),
-        })
-    
-    STANDARD_TESTBED_ATTRIBUTES.update(DEPLOYMENT_ATTRIBUTES.copy())
-
-    def __init__(self, testbed_id):
-        self._testbed_id = testbed_id
-        metadata_module = self._load_metadata_module()
-        self._metadata = metadata_module.MetadataInfo()
-        if testbed_id != self._metadata.testbed_id:
-            raise RuntimeError("Bad testbed id. Asked for %s, got %s" % \
-                    (testbed_id, self._metadata.testbed_id ))
-
-    @property
-    def create_order(self):
-        return self._metadata.create_order
-
-    @property
-    def configure_order(self):
-        return self._metadata.configure_order
-
-    @property
-    def preconfigure_order(self):
-        return self._metadata.preconfigure_order
-
-    @property
-    def prestart_order(self):
-        return self._metadata.prestart_order
-
-    @property
-    def start_order(self):
-        return self._metadata.start_order
-
-    @property
-    def testbed_version(self):
-        return self._metadata.testbed_version
-
-    @property
-    def testbed_id(self):
-        return self._testbed_id
-    
-    @property
-    def supported_recovery_policies(self):
-        return self._metadata.supported_recovery_policies
-
-    def testbed_attributes(self):
-        attributes = AttributesMap()
-        testbed_attributes = self._testbed_attributes()
-        self._add_attributes(attributes.add_attribute, testbed_attributes)
-        return attributes
-
-    def build_factories(self):
-        factories = list()
-        for factory_id, info in self._metadata.factories_info.iteritems():
-            create_function = info.get("create_function")
-            start_function = info.get("start_function")
-            stop_function = info.get("stop_function")
-            status_function = info.get("status_function")
-            configure_function = info.get("configure_function")
-            preconfigure_function = info.get("preconfigure_function")
-            prestart_function = info.get("prestart_function")
-            help = info["help"]
-            category = info["category"]
-            factory = Factory(factory_id, 
-                    create_function, 
-                    start_function,
-                    stop_function, 
-                    status_function, 
-                    configure_function, 
-                    preconfigure_function,
-                    prestart_function,
-                    help,
-                    category)
-                    
-            factory_attributes = self._factory_attributes(info)
-            self._add_attributes(factory.add_attribute, factory_attributes)
-            box_attributes = self._box_attributes(info)
-            self._add_attributes(factory.add_box_attribute, box_attributes)
-            
-            self._add_traces(factory, info)
-            self._add_tags(factory, info)
-            self._add_connector_types(factory, info)
-            factories.append(factory)
-        return factories
-
-    def _load_metadata_module(self):
-        mod_name = nepi.util.environ.find_testbed(self._testbed_id) + ".metadata"
-        if not mod_name in sys.modules:
-            __import__(mod_name)
-        return sys.modules[mod_name]
-
-    def _testbed_attributes(self):
-        # standar attributes
-        attributes = self.STANDARD_TESTBED_ATTRIBUTES.copy()
-        # custom attributes
-        attributes.update(self._metadata.testbed_attributes.copy())
-        return attributes
-        
-    def _factory_attributes(self, info):
-        tagged_attributes = self._tagged_attributes(info)
-        if "factory_attributes" in info:
-            definitions = self._metadata.attributes.copy()
-            # filter attributes corresponding to the factory_id
-            factory_attributes = self._filter_attributes(info["factory_attributes"], 
-                definitions)
-        else:
-            factory_attributes = dict()
-        attributes = dict(tagged_attributes.items() + \
-                factory_attributes.items())
-        return attributes
-
-    def _box_attributes(self, info):
-        tagged_attributes = self._tagged_attributes(info)
-        if "box_attributes" in info:
-            definitions = self.STANDARD_BOX_ATTRIBUTE_DEFINITIONS.copy()
-            definitions.update(self._metadata.attributes)
-            box_attributes = self._filter_attributes(info["box_attributes"], 
-                definitions)
-        else:
-            box_attributes = dict()
-        attributes = dict(tagged_attributes.items() + \
-                box_attributes.items())
-        attributes.update(self.STANDARD_BOX_ATTRIBUTES.copy())
-        return attributes
-
-    def _tagged_attributes(self, info):
-        tagged_attributes = dict()
-        for tag_id in info.get("tags", []):
-            if tag_id in self.STANDARD_TAGGED_BOX_ATTRIBUTES:
-                attr_list = self.STANDARD_TAGGED_BOX_ATTRIBUTES[tag_id]
-                attributes = self._filter_attributes(attr_list,
-                    self.STANDARD_TAGGED_ATTRIBUTES_DEFINITIONS)
-                tagged_attributes.update(attributes)
-        return tagged_attributes
-
-    def _filter_attributes(self, attr_list, definitions):
-        # filter attributes not corresponding to the factory
-        attributes = dict((attr_id, definitions[attr_id]) \
-           for attr_id in attr_list)
-        return attributes
-
-    def _add_attributes(self, add_attr_func, attributes):
-        for attr_id, attr_info in attributes.iteritems():
-            name = attr_info["name"]
-            help = attr_info["help"]
-            type = attr_info["type"] 
-            value = attr_info.get("value")
-            range = attr_info.get("range")
-            allowed = attr_info.get("allowed")
-            flags = attr_info.get("flags")
-            validation_function = attr_info["validation_function"]
-            category = attr_info.get("category")
-            add_attr_func(name, help, type, value, range, allowed, flags, 
-                    validation_function, category)
-
-    def _add_traces(self, factory, info):
-        for trace_id in info.get("traces", []):
-            trace_info = self._metadata.traces[trace_id]
-            name = trace_info["name"]
-            help = trace_info["help"]
-            factory.add_trace(name, help)
-
-    def _add_tags(self, factory, info):
-        for tag_id in info.get("tags", []):
-            factory.add_tag(tag_id)
-
-    def _add_connector_types(self, factory, info):
-        if "connector_types" in info:
-            from_connections = dict()
-            to_connections = dict()
-            for connection in self._metadata.connections:
-                froms = connection["from"]
-                tos = connection["to"]
-                can_cross = connection["can_cross"]
-                init_code = connection.get("init_code")
-                compl_code = connection.get("compl_code")
-                
-                for from_ in _expand(froms):
-                    for to in _expand(tos):
-                        if from_ not in from_connections:
-                            from_connections[from_] = list()
-                        if to not in to_connections:
-                            to_connections[to] = list()
-                        from_connections[from_].append((to, can_cross, init_code, 
-                            compl_code))
-                        to_connections[to].append((from_, can_cross, init_code,
-                            compl_code))
-            for connector_id in info["connector_types"]:
-                connector_type_info = self._metadata.connector_types[
-                        connector_id]
-                name = connector_type_info["name"]
-                help = connector_type_info["help"]
-                max = connector_type_info["max"]
-                min = connector_type_info["min"]
-                testbed_id = self._testbed_id
-                factory_id = factory.factory_id
-                connector_type = ConnectorType(testbed_id, factory_id, name, 
-                        help, max, min)
-                connector_key = (testbed_id, factory_id, name)
-                if connector_key in to_connections:
-                    for (from_, can_cross, init_code, compl_code) in \
-                            to_connections[connector_key]:
-                        (testbed_id_from, factory_id_from, name_from) = from_
-                        connector_type.add_from_connection(testbed_id_from, 
-                                factory_id_from, name_from, can_cross, 
-                                init_code, compl_code)
-                if connector_key in from_connections:
-                    for (to, can_cross, init_code, compl_code) in \
-                            from_connections[(testbed_id, factory_id, name)]:
-                        (testbed_id_to, factory_id_to, name_to) = to
-                        connector_type.add_to_connection(testbed_id_to, 
-                                factory_id_to, name_to, can_cross, init_code,
-                                compl_code)
-                factory.add_connector_type(connector_type)
-
-def _expand(val):
-    """
-    Expands multiple values in the "val" tuple to create cross products:
-    
-    >>> list(_expand((1,2,3)))
-    [(1, 2, 3)]
-    >>> list(_expand((1,(2,4,5),3)))
-    [(1, 2, 3), (1, 4, 3), (1, 5, 3)]
-    >>> list(_expand(((1,2),(2,4,5),3)))
-    [(1, 2, 3), (1, 4, 3), (1, 5, 3), (2, 2, 3), (2, 4, 3), (2, 5, 3)]
-    """
-    if not val:
-        yield ()
-    elif isinstance(val[0], (list,set,tuple)):
-        for x in val[0]:
-            x = (x,)
-            for e_val in _expand(val[1:]):
-                yield x + e_val
-    else:
-        x = (val[0],)
-        for e_val in _expand(val[1:]):
-            yield x + e_val
-
diff --git a/src/nepi/core/testbed_impl.py b/src/nepi/core/testbed_impl.py
deleted file mode 100644 (file)
index d27588d..0000000
+++ /dev/null
@@ -1,660 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from nepi.core import execute
-from nepi.core.metadata import Metadata, Parallel
-from nepi.util import validation
-from nepi.util.constants import TIME_NOW, \
-        ApplicationStatus as AS, \
-        TestbedStatus as TS, \
-        CONNECTION_DELAY
-from nepi.util.parallel import ParallelRun
-
-import collections
-import copy
-import logging
-
-class TestbedController(execute.TestbedController):
-    def __init__(self, testbed_id, testbed_version):
-        super(TestbedController, self).__init__(testbed_id, testbed_version)
-        self._status = TS.STATUS_ZERO
-        # testbed attributes for validation
-        self._attributes = None
-        # element factories for validation
-        self._factories = dict()
-
-        # experiment construction instructions
-        self._create = dict()
-        self._create_set = dict()
-        self._factory_set = dict()
-        self._connect = dict()
-        self._cross_connect = dict()
-        self._add_trace = dict()
-        self._add_address = dict()
-        self._add_route = dict()
-        self._configure = dict()
-
-        # log of set operations
-        self._setlog = dict()
-        # last set operations
-        self._set = dict()
-
-        # testbed element instances
-        self._elements = dict()
-
-        self._metadata = Metadata(self._testbed_id)
-        if self._metadata.testbed_version != testbed_version:
-            raise RuntimeError("Bad testbed version on testbed %s. Asked for %s, got %s" % \
-                    (testbed_id, testbed_version, self._metadata.testbed_version))
-        for factory in self._metadata.build_factories():
-            self._factories[factory.factory_id] = factory
-        self._attributes = self._metadata.testbed_attributes()
-        self._root_directory = None
-        
-        # Logging
-        self._logger = logging.getLogger("nepi.core.testbed_impl")
-    
-    @property
-    def root_directory(self):
-        return self._root_directory
-
-    @property
-    def guids(self):
-        return self._create.keys()
-
-    @property
-    def elements(self):
-        return self._elements
-    
-    def defer_configure(self, name, value):
-        self._validate_testbed_attribute(name)
-        self._validate_testbed_value(name, value)
-        self._attributes.set_attribute_value(name, value)
-        self._configure[name] = value
-
-    def defer_create(self, guid, factory_id):
-        self._validate_factory_id(factory_id)
-        self._validate_not_guid(guid)
-        self._create[guid] = factory_id
-
-    def defer_create_set(self, guid, name, value):
-        self._validate_guid(guid)
-        self._validate_box_attribute(guid, name)
-        self._validate_box_value(guid, name, value)
-        if guid not in self._create_set:
-            self._create_set[guid] = dict()
-        self._create_set[guid][name] = value
-
-    def defer_factory_set(self, guid, name, value):
-        self._validate_guid(guid)
-        self._validate_factory_attribute(guid, name)
-        self._validate_factory_value(guid, name, value)
-        if guid not in self._factory_set:
-            self._factory_set[guid] = dict()
-        self._factory_set[guid][name] = value
-
-    def defer_connect(self, guid1, connector_type_name1, guid2, 
-            connector_type_name2):
-        self._validate_guid(guid1)
-        self._validate_guid(guid2)
-        factory1 = self._get_factory(guid1)
-        factory_id2 = self._create[guid2]
-        connector_type = factory1.connector_type(connector_type_name1)
-        connector_type.can_connect(self._testbed_id, factory_id2, 
-                connector_type_name2, False)
-        self._validate_connection(guid1, connector_type_name1, guid2, 
-            connector_type_name2)
-
-        if not guid1 in self._connect:
-            self._connect[guid1] = dict()
-        if not connector_type_name1 in self._connect[guid1]:
-             self._connect[guid1][connector_type_name1] = dict()
-        self._connect[guid1][connector_type_name1][guid2] = \
-               connector_type_name2
-        if not guid2 in self._connect:
-            self._connect[guid2] = dict()
-        if not connector_type_name2 in self._connect[guid2]:
-             self._connect[guid2][connector_type_name2] = dict()
-        self._connect[guid2][connector_type_name2][guid1] = \
-               connector_type_name1
-
-    def defer_cross_connect(self, guid, connector_type_name, cross_guid, 
-            cross_testbed_guid, cross_testbed_id, cross_factory_id, 
-            cross_connector_type_name):
-        self._validate_guid(guid)
-        factory = self._get_factory(guid)
-        connector_type = factory.connector_type(connector_type_name)
-        connector_type.can_connect(cross_testbed_id, cross_factory_id, 
-                cross_connector_type_name, True)
-        self._validate_connection(guid, connector_type_name, cross_guid, 
-            cross_connector_type_name)
-
-        if not guid in self._cross_connect:
-            self._cross_connect[guid] = dict()
-        if not connector_type_name in self._cross_connect[guid]:
-             self._cross_connect[guid][connector_type_name] = dict()
-        self._cross_connect[guid][connector_type_name] = \
-                (cross_guid, cross_testbed_guid, cross_testbed_id, 
-                cross_factory_id, cross_connector_type_name)
-
-    def defer_add_trace(self, guid, trace_name):
-        self._validate_guid(guid)
-        self._validate_trace(guid, trace_name)
-        if not guid in self._add_trace:
-            self._add_trace[guid] = list()
-        self._add_trace[guid].append(trace_name)
-
-    def defer_add_address(self, guid, address, netprefix, broadcast):
-        self._validate_guid(guid)
-        self._validate_allow_addresses(guid)
-        if guid not in self._add_address:
-            self._add_address[guid] = list()
-        self._add_address[guid].append((address, netprefix, broadcast))
-
-    def defer_add_route(self, guid, destination, netprefix, nexthop, 
-            metric = 0, device = None):
-        self._validate_guid(guid)
-        self._validate_allow_routes(guid)
-        if not guid in self._add_route:
-            self._add_route[guid] = list()
-        self._add_route[guid].append((destination, netprefix, nexthop, 
-            metric, device)) 
-
-    def do_setup(self):
-        self._root_directory = self._attributes.\
-            get_attribute_value("rootDirectory")
-        self._status = TS.STATUS_SETUP
-
-    def do_create(self):
-        def set_params(self, guid):
-            parameters = self._get_parameters(guid)
-            for name, value in parameters.iteritems():
-                self.set(guid, name, value)
-            
-        self._do_in_factory_order(
-            'create_function',
-            self._metadata.create_order,
-            postaction = set_params )
-        self._status = TS.STATUS_CREATED
-
-    def _do_connect(self, init = True):
-        unconnected = copy.deepcopy(self._connect)
-        
-        while unconnected:
-            for guid1, connections in unconnected.items():
-                factory1 = self._get_factory(guid1)
-                for connector_type_name1, connections2 in connections.items():
-                    connector_type1 = factory1.connector_type(connector_type_name1)
-                    for guid2, connector_type_name2 in connections2.items():
-                        factory_id2 = self._create[guid2]
-                        # Connections are executed in a "From -> To" direction only
-                        # This explicitly ignores the "To -> From" (mirror) 
-                        # connections of every connection pair.
-                        if init:
-                            connect_code = connector_type1.connect_to_init_code(
-                                    self._testbed_id, factory_id2, 
-                                    connector_type_name2,
-                                    False)
-                        else:
-                            connect_code = connector_type1.connect_to_compl_code(
-                                    self._testbed_id, factory_id2, 
-                                    connector_type_name2,
-                                    False)
-                        delay = None
-                        if connect_code:
-                            delay = connect_code(self, guid1, guid2)
-
-                        if delay is not CONNECTION_DELAY:
-                            del unconnected[guid1][connector_type_name1][guid2]
-                    if not unconnected[guid1][connector_type_name1]:
-                        del unconnected[guid1][connector_type_name1]
-                if not unconnected[guid1]:
-                    del unconnected[guid1]
-
-    def do_connect_init(self):
-        self._do_connect()
-
-    def do_connect_compl(self):
-        self._do_connect(init = False)
-        self._status = TS.STATUS_CONNECTED
-
-    def _do_in_factory_order(self, action, order, postaction = None, poststep = None):
-        logger = self._logger
-        
-        guids = collections.defaultdict(list)
-        # order guids (elements) according to factory_id
-        for guid, factory_id in self._create.iteritems():
-            guids[factory_id].append(guid)
-        
-        # configure elements following the factory_id order
-        for factory_id in order:
-            # Create a parallel runner if we're given a Parallel() wrapper
-            runner = None
-            if isinstance(factory_id, Parallel):
-                runner = ParallelRun(factory_id.maxthreads)
-                factory_id = factory_id.factory
-            
-            # omit the factories that have no element to create
-            if factory_id not in guids:
-                continue
-            
-            # configure action
-            factory = self._factories[factory_id]
-            if isinstance(action, basestring) and not getattr(factory, action):
-                continue
-            def perform_action(guid):
-                if isinstance(action, basestring):
-                    getattr(factory, action)(self, guid)
-                else:
-                    action(self, guid)
-                if postaction:
-                    postaction(self, guid)
-
-            # perform the action on all elements, in parallel if so requested
-            if runner:
-                logger.debug("TestbedController: Starting parallel %s", action)
-                runner.start()
-
-            for guid in guids[factory_id]:
-                if runner:
-                    logger.debug("TestbedController: Scheduling %s on %s", action, guid)
-                    runner.put(perform_action, guid)
-                else:
-                    logger.debug("TestbedController: Performing %s on %s", action, guid)
-                    perform_action(guid)
-
-            # sync
-            if runner:
-                runner.sync()
-            
-            # post hook
-            if poststep:
-                for guid in guids[factory_id]:
-                    if runner:
-                        logger.debug("TestbedController: Scheduling post-%s on %s", action, guid)
-                        runner.put(poststep, self, guid)
-                    else:
-                        logger.debug("TestbedController: Performing post-%s on %s", action, guid)
-                        poststep(self, guid)
-
-            # sync
-            if runner:
-                runner.join()
-                logger.debug("TestbedController: Finished parallel %s", action)
-
-    @staticmethod
-    def do_poststep_preconfigure(self, guid):
-        # dummy hook for implementations interested in
-        # two-phase configuration
-        pass
-
-    def do_preconfigure(self):
-        self._do_in_factory_order(
-            'preconfigure_function',
-            self._metadata.preconfigure_order,
-            poststep = self.do_poststep_preconfigure )
-
-    @staticmethod
-    def do_poststep_configure(self, guid):
-        # dummy hook for implementations interested in
-        # two-phase configuration
-        pass
-
-    def do_configure(self):
-        self._do_in_factory_order(
-            'configure_function',
-            self._metadata.configure_order,
-            poststep = self.do_poststep_configure )
-        self._status = TS.STATUS_CONFIGURED
-
-    def do_prestart(self):
-        self._do_in_factory_order(
-            'prestart_function',
-            self._metadata.prestart_order )
-
-    def _do_cross_connect(self, cross_data, init = True):
-        for guid, cross_connections in self._cross_connect.iteritems():
-            factory = self._get_factory(guid)
-            for connector_type_name, cross_connection in \
-                    cross_connections.iteritems():
-                connector_type = factory.connector_type(connector_type_name)
-                (cross_guid, cross_testbed_guid, cross_testbed_id,
-                    cross_factory_id, cross_connector_type_name) = cross_connection
-                if init:
-                    connect_code = connector_type.connect_to_init_code(
-                        cross_testbed_id, cross_factory_id, 
-                        cross_connector_type_name,
-                        True)
-                else:
-                    connect_code = connector_type.connect_to_compl_code(
-                        cross_testbed_id, cross_factory_id, 
-                        cross_connector_type_name,
-                        True)
-                if connect_code:
-                    if hasattr(connect_code, "func"):
-                        func_name = connect_code.func.__name__
-                    elif hasattr(connect_code, "__name__"):
-                        func_name = connect_code.__name__
-                    else:
-                        func_name = repr(connect_code)
-                    self._logger.debug("Cross-connect - guid: %d, connect_code: %s " % (
-                        guid, func_name))
-                    elem_cross_data = cross_data[cross_testbed_guid][cross_guid]
-                    connect_code(self, guid, elem_cross_data)       
-
-    def do_cross_connect_init(self, cross_data):
-        self._do_cross_connect(cross_data)
-
-    def do_cross_connect_compl(self, cross_data):
-        self._do_cross_connect(cross_data, init = False)
-        self._status = TS.STATUS_CROSS_CONNECTED
-
-    def set(self, guid, name, value, time = TIME_NOW):
-        self._validate_guid(guid)
-        self._validate_box_attribute(guid, name)
-        self._validate_box_value(guid, name, value)
-        self._validate_modify_box_value(guid, name)
-        if guid not in self._set:
-            self._set[guid] = dict()
-            self._setlog[guid] = dict()
-        if time not in self._setlog[guid]:
-            self._setlog[guid][time] = dict()
-        self._setlog[guid][time][name] = value
-        self._set[guid][name] = value
-
-    def get(self, guid, name, time = TIME_NOW):
-        """
-        gets an attribute from box definitions if available. 
-        Throws KeyError if the GUID wasn't created
-        through the defer_create interface, and AttributeError if the
-        attribute isn't available (doesn't exist or is design-only)
-        """
-        self._validate_guid(guid)
-        self._validate_box_attribute(guid, name)
-        if guid in self._set and name in self._set[guid]:
-            return self._set[guid][name]
-        if guid in self._create_set and name in self._create_set[guid]:
-            return self._create_set[guid][name]
-        # if nothing else found, returns the factory default value
-        factory = self._get_factory(guid)
-        return factory.box_attributes.get_attribute_value(name)
-
-    def get_route(self, guid, index, attribute):
-        """
-        returns information given to defer_add_route.
-        
-        Raises AttributeError if an invalid attribute is requested
-            or if the indexed routing rule does not exist.
-        
-        Raises KeyError if the GUID has not been seen by
-            defer_add_route
-        """
-        ATTRIBUTES = ['Destination', 'NetPrefix', 'NextHop']
-        
-        if attribute not in ATTRIBUTES:
-            raise AttributeError, "Attribute %r invalid for addresses of %r" % (attribute, guid)
-        
-        attribute_index = ATTRIBUTES.index(attribute)
-        
-        routes = self._add_route.get(guid)
-        if not routes:
-            raise KeyError, "GUID %r not found in %s" % (guid, self._testbed_id)
-       
-        index = int(index)
-        if not (0 <= index < len(addresses)):
-            raise AttributeError, "GUID %r at %s does not have a routing entry #%s" % (
-                guid, self._testbed_id, index)
-        
-        return routes[index][attribute_index]
-
-    def get_address(self, guid, index, attribute='Address'):
-        """
-        returns information given to defer_add_address
-        
-        Raises AttributeError if an invalid attribute is requested
-            or if the indexed routing rule does not exist.
-        
-        Raises KeyError if the GUID has not been seen by
-            defer_add_address
-        """
-        ATTRIBUTES = ['Address', 'NetPrefix', 'Broadcast']
-        
-        if attribute not in ATTRIBUTES:
-            raise AttributeError, "Attribute %r invalid for addresses of %r" % (attribute, guid)
-        
-        attribute_index = ATTRIBUTES.index(attribute)
-        
-        addresses = self._add_address.get(guid)
-        if not addresses:
-            raise KeyError, "GUID %r not found in %s" % (guid, self._testbed_id)
-        
-        index = int(index)
-        if not (0 <= index < len(addresses)):
-            raise AttributeError, "GUID %r at %s does not have an address #%s" % (
-                guid, self._testbed_id, index)
-        
-        return addresses[index][attribute_index]
-
-    def get_attribute_list(self, guid, filter_flags = None, exclude = False):
-        factory = self._get_factory(guid)
-        attribute_list = list()
-        return factory.box_attributes.get_attribute_list(filter_flags, exclude)
-
-    def get_factory_id(self, guid):
-        factory = self._get_factory(guid)
-        return factory.factory_id
-
-    def start(self, time = TIME_NOW):
-        self._do_in_factory_order(
-            'start_function',
-            self._metadata.start_order )
-        self._status = TS.STATUS_STARTED
-
-    #action: NotImplementedError
-
-    def stop(self, time = TIME_NOW):
-        self._do_in_factory_order(
-            'stop_function',
-            reversed(self._metadata.start_order) )
-        self._status = TS.STATUS_STOPPED
-
-    def status(self, guid = None):
-        if not guid:
-            return self._status
-        self._validate_guid(guid)
-        factory = self._get_factory(guid)
-        status_function = factory.status_function
-        if status_function:
-            return status_function(self, guid)
-        return AS.STATUS_UNDETERMINED
-    
-    def testbed_status(self):
-        return self._status
-
-    def trace(self, guid, trace_id, attribute='value'):
-        if attribute == 'value':
-            fd = open("%s" % self.trace_filepath(guid, trace_id), "r")
-            content = fd.read()
-            fd.close()
-        elif attribute == 'path':
-            content = self.trace_filepath(guid, trace_id)
-        elif attribute == 'filename':
-            content = self.trace_filename(guid, trace_id)
-        else:
-            content = None
-        return content
-
-    def traces_info(self):
-        traces_info = dict()
-        host = self._attributes.get_attribute_value("deployment_host")
-        user = self._attributes.get_attribute_value("deployment_user")
-        for guid, trace_list in self._add_trace.iteritems(): 
-            traces_info[guid] = dict()
-            for trace_id in trace_list:
-                traces_info[guid][trace_id] = dict()
-                filepath = self.trace(guid, trace_id, attribute = "path")
-                traces_info[guid][trace_id]["host"] = host
-                traces_info[guid][trace_id]["user"] = user
-                traces_info[guid][trace_id]["filepath"] = filepath
-        return traces_info
-
-    def trace_filepath(self, guid, trace_id):
-        """
-        Return a trace's file path, for TestbedController's default 
-        implementation of trace()
-        """
-        raise NotImplementedError
-
-    def trace_filename(self, guid, trace_id):
-        """
-        Return a trace's file name, for TestbedController's default 
-        implementation of trace()
-        """
-        raise NotImplementedError
-
-    #shutdown: NotImplementedError
-
-    def get_connected(self, guid, connector_type_name, 
-            other_connector_type_name):
-        """searchs the connected elements for the specific connector_type_name 
-        pair"""
-        if guid not in self._connect:
-            return []
-        # all connections for all connectors for guid
-        all_connections = self._connect[guid]
-        if connector_type_name not in all_connections:
-            return []
-        # all connections for the specific connector
-        connections = all_connections[connector_type_name]
-        specific_connections = [otr_guid for otr_guid, otr_connector_type_name \
-                in connections.iteritems() if \
-                otr_connector_type_name == other_connector_type_name]
-        return specific_connections
-
-    def _get_connection_count(self, guid, connection_type_name):
-        count = 0
-        cross_count = 0
-        if guid in self._connect and connection_type_name in \
-                self._connect[guid]:
-            count = len(self._connect[guid][connection_type_name])
-        if guid in self._cross_connect and connection_type_name in \
-                self._cross_connect[guid]:
-            cross_count = len(self._cross_connect[guid][connection_type_name])
-        return count + cross_count
-
-    def _get_traces(self, guid):
-        return [] if guid not in self._add_trace else self._add_trace[guid]
-
-    def _get_parameters(self, guid):
-        return dict() if guid not in self._create_set else \
-                self._create_set[guid]
-
-    def _get_factory(self, guid):
-        factory_id = self._create[guid]
-        return self._factories[factory_id]
-
-    def _get_factory_id(self, guid):
-        """ Returns the factory ID of the (perhaps not yet) created object """
-        return self._create.get(guid, None)
-
-    def _validate_guid(self, guid):
-        if not guid in self._create:
-            raise RuntimeError("Element guid %d doesn't exist" % guid)
-
-    def _validate_not_guid(self, guid):
-        if guid in self._create:
-            raise AttributeError("Cannot add elements with the same guid: %d" %
-                    guid)
-
-    def _validate_factory_id(self, factory_id):
-        if factory_id not in self._factories:
-            raise AttributeError("Invalid element type %s for testbed version %s" %
-                    (factory_id, self._testbed_version))
-
-    def _validate_testbed_attribute(self, name):
-        if not self._attributes.has_attribute(name):
-            raise AttributeError("Invalid testbed attribute %s for testbed" % \
-                    name)
-
-    def _validate_testbed_value(self, name, value):
-        if not self._attributes.is_attribute_value_valid(name, value):
-            raise AttributeError("Invalid value %r for testbed attribute %s" % \
-                (value, name))
-
-    def _validate_box_attribute(self, guid, name):
-        factory = self._get_factory(guid)
-        if not factory.box_attributes.has_attribute(name):
-            raise AttributeError("Invalid attribute %s for element type %s" %
-                    (name, factory.factory_id))
-
-    def _validate_box_value(self, guid, name, value):
-        factory = self._get_factory(guid)
-        if not factory.box_attributes.is_attribute_value_valid(name, value):
-            raise AttributeError("Invalid value %r for attribute %s" % \
-                (value, name))
-
-    def _validate_factory_attribute(self, guid, name):
-        factory = self._get_factory(guid)
-        if not factory.has_attribute(name):
-            raise AttributeError("Invalid attribute %s for element type %s" %
-                    (name, factory.factory_id))
-
-    def _validate_factory_value(self, guid, name, value):
-        factory = self._get_factory(guid)
-        if not factory.is_attribute_value_valid(name, value):
-            raise AttributeError("Invalid value %r for attribute %s" % \
-                (value, name))
-
-    def _validate_trace(self, guid, trace_name):
-        factory = self._get_factory(guid)
-        if not trace_name in factory.traces_list:
-            raise RuntimeError("Element type '%s' has no trace '%s'" %
-                    (factory.factory_id, trace_name))
-
-    def _validate_allow_addresses(self, guid):
-        factory = self._get_factory(guid)
-        if not factory.allow_addresses:
-            raise RuntimeError("Element type '%s' doesn't support addresses" %
-                    factory.factory_id)
-        attr_name = "maxAddresses"
-        if guid in self._create_set and attr_name in self._create_set[guid]:
-            max_addresses = self._create_set[guid][attr_name]
-        else:
-            factory = self._get_factory(guid)
-            max_addresses = factory.box_attributes.get_attribute_value(attr_name)
-        if guid in self._add_address:
-            count_addresses = len(self._add_address[guid])
-            if max_addresses == count_addresses:
-                raise RuntimeError("Element guid %d of type '%s' can't accept \
-                        more addresses" % (guid, factory.factory_id))
-
-    def _validate_allow_routes(self, guid):
-        factory = self._get_factory(guid)
-        if not factory.allow_routes:
-            raise RuntimeError("Element type '%s' doesn't support routes" %
-                    factory.factory_id)
-
-    def _validate_connection(self, guid1, connector_type_name1, guid2, 
-            connector_type_name2, cross = False):
-        # can't connect with self
-        if guid1 == guid2:
-            raise AttributeError("Can't connect guid %d to self" % \
-                (guid1))
-        # the connection is already done, so ignore
-        connected = self.get_connected(guid1, connector_type_name1, 
-                connector_type_name2)
-        if guid2 in connected:
-            return
-        count1 = self._get_connection_count(guid1, connector_type_name1)
-        factory1 = self._get_factory(guid1)
-        connector_type1 = factory1.connector_type(connector_type_name1)
-        if count1 == connector_type1.max:
-            raise AttributeError("Connector %s is full for guid %d" % \
-                (connector_type_name1, guid1))
-
-    def _validate_modify_box_value(self, guid, name):
-        factory = self._get_factory(guid)
-        if self._status > TS.STATUS_STARTED and \
-                (factory.box_attributes.is_attribute_exec_read_only(name) or \
-                factory.box_attributes.is_attribute_exec_immutable(name)):
-            raise AttributeError("Attribute %s can only be modified during experiment design" % name)
-
diff --git a/src/nepi/design/box.py b/src/nepi/design/box.py
new file mode 100644 (file)
index 0000000..82bb899
--- /dev/null
@@ -0,0 +1,101 @@
+from nepi.util import guid
+
+guid_gen = guid.GuidGenerator()
+
+class Attributes(object):
+    def __init__(self):
+        super(Attributes, self).__init__()
+        self._attributes = dict()
+
+    def __getattr__(self, name):
+        try:
+            return self._attributes[name]
+        except:
+            return super(Attributes, self).__getattribute__(name)
+
+    def __setattr__(self, name, value):
+        try:
+            if value == None:
+                old = self._attributes[name]
+                del self._attributes[name]
+                return old
+
+            self._attributes[name] = value
+            return value
+        except:
+            return super(Attributes, self).__setattr__(name, value)
+
+class Connections(object):
+    def __init__(self):
+        super(Connections, self).__init__()
+        self._connections = set()
+
+    def __getattr__(self, guid_or_label):
+        try:
+            for b in self._connections:
+                if guid_or_label in [b.guid, b.label]:
+                    return b
+        except:
+            return super(Connections, self).__getattribute__(guid_or_label)
+
+class Box(object):
+    def __init__(self, label = None, guid = None):
+        super(Box, self).__init__()
+        self._guid = guid_gen.next(guid)
+        self._a = Attributes()
+        self._c = Connections()
+        self._tags = set()
+        self.label = label or self._guid
+
+        # Graphical information to draw box
+        self.x = 0
+        self.y = 0
+        self.width = 4
+        self.height = 4
+
+    @property
+    def tags(self):
+        return self._tags
+
+    @property
+    def attributes(self):
+        return self._a._attributes.keys()
+
+    @property
+    def a(self):
+        return self._a
+
+    @property
+    def c(self):
+        return self._c
+
+    @property
+    def guid(self):
+        return self._guid
+
+    @property
+    def connections(self):
+        return set(self._c._connections)
+
+    def tadd(self, name):
+        self._tags.add(name)
+
+    def tdel(self, name):
+        self._tags.remove(name)
+
+    def thas(self, name):
+        return name in self._tags
+
+    def connect(self, box, cascade = True):
+        self._c._connections.add(box)
+        if cascade:
+            box.connect(self, cascade = False)
+
+    def disconnect(self, box, cascade = True):
+        self._c._connections.remove(box)
+        if cascade:
+            box.disconnect(self, cascade = False)
+
+    def is_connected(self, box):
+        return box in self.connections
+
diff --git a/src/nepi/execution/attribute.py b/src/nepi/execution/attribute.py
new file mode 100644 (file)
index 0000000..1282dfe
--- /dev/null
@@ -0,0 +1,89 @@
+
+### Attribute Types
+class Types:
+    String  = "STRING"
+    Bool    = "BOOL"
+    Enum    = "ENUM"
+    Double  = "DOUBLE"
+    Integer = "INTEGER"
+
+### Attribute Flags
+class Flags:
+    # Attribute can be modified by the user 
+    NoFlags         = 0x00
+    # Attribute is not modifiable by the user
+    ReadOnly        = 0x01
+    # Attribute is not modifiable by the user during runtime
+    ExecReadOnly        = 0x02
+    # Attribute is an access credential
+    Credential      = 0x04
+
+class Attribute(object):
+    def __init__(self, name, help, type = Types.String,
+            flags = Flags.NoFlags, default = None, allowed = None,
+            set_hook = None):
+        self._name = name
+        self._help = help
+        self._type = type
+        self._flags = flags
+        self._allowed = allowed
+        self._default = self._value = default
+        # callback to be invoked upon changing the 
+        # attribute value
+        self.set_hook = set_hook
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def default(self):
+        return self._default
+
+    @property
+    def type(self):
+        return self._type
+
+    @property
+    def help(self):
+        return self._help
+
+    @property
+    def flags(self):
+        return self._flags
+
+    @property
+    def allowed(self):
+        return self._allowed
+
+    def has_flag(self, flag):
+        return (self._flags & flag) == flag
+
+    def get_value(self):
+        return self._value
+
+    def set_value(self, value):
+        valid = True
+
+        if self.type == Types.Enum:
+            valid = value in self._allowed
+        
+        valid = valid and self.is_valid_value(value)
+
+        if valid: 
+            if self.set_hook:
+                # Hook receives old value, new value
+                value = self.set_hook(self._value, value)
+
+            self._value = value
+        else:
+            raise ValueError("Invalid value %s for attribute %s" %
+                    (str(value), self.name))
+
+    value = property(get_value, set_value)
+
+    def is_valid_value(self, value):
+        """ Attribute subclasses will override this method to add 
+        adequate validation"""
+        return True
+
diff --git a/src/nepi/execution/ec.py b/src/nepi/execution/ec.py
new file mode 100644 (file)
index 0000000..0e76650
--- /dev/null
@@ -0,0 +1,466 @@
+import logging
+import os
+import random
+import sys
+import time
+import threading
+
+from nepi.util import guid
+from nepi.util.parallel import ParallelRun
+from nepi.util.timefuncs import strfnow, strfdiff, strfvalid 
+from nepi.execution.resource import ResourceFactory, ResourceAction, \
+        ResourceState
+from nepi.execution.scheduler import HeapScheduler, Task, TaskStatus
+from nepi.execution.trace import TraceAttr
+
+# TODO: use multiprocessing instead of threading
+# TODO: When a failure occurrs during deployment scp and ssh processes are left running behind!!
+
+class ECState(object):
+    RUNNING = 1
+    FAILED = 2
+    TERMINATED = 3
+
+class ExperimentController(object):
+    def __init__(self, exp_id = None, root_dir = "/tmp"): 
+        super(ExperimentController, self).__init__()
+        # root directory to store files
+        self._root_dir = root_dir
+
+        # experiment identifier given by the user
+        self._exp_id = exp_id or "nepi-exp-%s" % os.urandom(8).encode('hex')
+
+        # generator of globally unique ids
+        self._guid_generator = guid.GuidGenerator()
+        
+        # Resource managers
+        self._resources = dict()
+
+        # Scheduler
+        self._scheduler = HeapScheduler()
+
+        # Tasks
+        self._tasks = dict()
+
+        # Event processing thread
+        self._cond = threading.Condition()
+        self._thread = threading.Thread(target = self._process)
+        self._thread.setDaemon(True)
+        self._thread.start()
+
+        # EC state
+        self._state = ECState.RUNNING
+
+        # Logging
+        self._logger = logging.getLogger("ExperimentController")
+
+    @property
+    def logger(self):
+        return self._logger
+
+    @property
+    def ecstate(self):
+        return self._state
+
+    @property
+    def exp_id(self):
+        exp_id = self._exp_id
+        if not exp_id.startswith("nepi-"):
+            exp_id = "nepi-" + exp_id
+        return exp_id
+
+    @property
+    def finished(self):
+        return self.ecstate in [ECState.FAILED, ECState.TERMINATED]
+
+    def wait_finished(self, guids):
+        while not all([self.state(guid) == ResourceState.FINISHED \
+                for guid in guids]) and not self.finished:
+            # We keep the sleep as large as possible to 
+            # decrese the number of RM state requests
+            time.sleep(2)
+    
+    def get_task(self, tid):
+        return self._tasks.get(tid)
+
+    def get_resource(self, guid):
+        return self._resources.get(guid)
+
+    @property
+    def resources(self):
+        return self._resources.keys()
+
+    def register_resource(self, rtype, guid = None):
+        # Get next available guid
+        guid = self._guid_generator.next(guid)
+        
+        # Instantiate RM
+        rm = ResourceFactory.create(rtype, self, guid)
+
+        # Store RM
+        self._resources[guid] = rm
+
+        return guid
+
+    def get_attributes(self, guid):
+        rm = self.get_resource(guid)
+        return rm.get_attributes()
+
+    def get_filters(self, guid):
+        rm = self.get_resource(guid)
+        return rm.get_filters()
+
+    def register_connection(self, guid1, guid2):
+        rm1 = self.get_resource(guid1)
+        rm2 = self.get_resource(guid2)
+
+        rm1.connect(guid2)
+        rm2.connect(guid1)
+
+    def register_condition(self, group1, action, group2, state,
+            time = None):
+        """ Registers an action START or STOP for all RM on group1 to occur 
+            time 'time' after all elements in group2 reached state 'state'.
+
+            :param group1: List of guids of RMs subjected to action
+            :type group1: list
+
+            :param action: Action to register (either START or STOP)
+            :type action: ResourceAction
+
+            :param group2: List of guids of RMs to we waited for
+            :type group2: list
+
+            :param state: State to wait for on RMs (STARTED, STOPPED, etc)
+            :type state: ResourceState
+
+            :param time: Time to wait after group2 has reached status 
+            :type time: string
+
+        """
+        if isinstance(group1, int):
+            group1 = [group1]
+        if isinstance(group2, int):
+            group2 = [group2]
+
+        for guid1 in group1:
+            rm = self.get_resource(guid1)
+            rm.register_condition(action, group2, state, time)
+
+    def register_trace(self, guid, name):
+        """ Enable trace
+
+        :param name: Name of the trace
+        :type name: str
+        """
+        rm = self.get_resource(guid)
+        rm.register_trace(name)
+
+    def trace(self, guid, name, attr = TraceAttr.ALL, block = 512, offset = 0):
+        """ Get information on collected trace
+
+        :param name: Name of the trace
+        :type name: str
+
+        :param attr: Can be one of:
+                         - TraceAttr.ALL (complete trace content), 
+                         - TraceAttr.STREAM (block in bytes to read starting at offset), 
+                         - TraceAttr.PATH (full path to the trace file),
+                         - TraceAttr.SIZE (size of trace file). 
+        :type attr: str
+
+        :param block: Number of bytes to retrieve from trace, when attr is TraceAttr.STREAM 
+        :type name: int
+
+        :param offset: Number of 'blocks' to skip, when attr is TraceAttr.STREAM 
+        :type name: int
+
+        :rtype: str
+        """
+        rm = self.get_resource(guid)
+        return rm.trace(name, attr, block, offset)
+
+    def discover(self, guid, filters):
+        rm = self.get_resource(guid)
+        return rm.discover(filters)
+
+    def provision(self, guid, filters):
+        rm = self.get_resource(guid)
+        return rm.provision(filters)
+
+    def get(self, guid, name):
+        rm = self.get_resource(guid)
+        return rm.get(name)
+
+    def set(self, guid, name, value):
+        rm = self.get_resource(guid)
+        return rm.set(name, value)
+
+    def state(self, guid):
+        rm = self.get_resource(guid)
+        return rm.state
+
+    def stop(self, guid):
+        rm = self.get_resource(guid)
+        return rm.stop()
+
+    def start(self, guid):
+        rm = self.get_resource(guid)
+        return rm.start()
+
+    def set_with_conditions(self, name, value, group1, group2, state,
+            time = None):
+        """ Set value 'value' on attribute with name 'name' on all RMs of
+            group1 when 'time' has elapsed since all elements in group2 
+            have reached state 'state'.
+
+            :param name: Name of attribute to set in RM
+            :type name: string
+
+            :param value: Value of attribute to set in RM
+            :type name: string
+
+            :param group1: List of guids of RMs subjected to action
+            :type group1: list
+
+            :param action: Action to register (either START or STOP)
+            :type action: ResourceAction
+
+            :param group2: List of guids of RMs to we waited for
+            :type group2: list
+
+            :param state: State to wait for on RMs (STARTED, STOPPED, etc)
+            :type state: ResourceState
+
+            :param time: Time to wait after group2 has reached status 
+            :type time: string
+
+        """
+        if isinstance(group1, int):
+            group1 = [group1]
+        if isinstance(group2, int):
+            group2 = [group2]
+
+        for guid1 in group1:
+            rm = self.get_resource(guid)
+            rm.set_with_conditions(name, value, group2, state, time)
+
+    def stop_with_conditions(self, guid):
+        rm = self.get_resource(guid)
+        return rm.stop_with_conditions()
+
+    def start_with_conditions(self, guid):
+        rm = self.get_resource(guid)
+        return rm.start_with_condition()
+
+    def deploy(self, group = None, wait_all_ready = True):
+        """ Deploy all resource manager in group
+
+        :param group: List of guids of RMs to deploy
+        :type group: list
+
+        :param wait_all_ready: Wait until all RMs are ready in
+            order to start the RMs
+        :type guid: int
+
+        """
+        self.logger.debug(" ------- DEPLOY START ------ ")
+
+        stop = []
+
+        def steps(rm):
+            try:
+                rm.deploy()
+                rm.start_with_conditions()
+
+                # Only if the RM has STOP conditions we
+                # schedule a stop. Otherwise the RM will stop immediately
+                if rm.conditions.get(ResourceAction.STOP):
+                    rm.stop_with_conditions()
+            except:
+                import traceback
+                err = traceback.format_exc()
+                
+                self._logger.error("Error occurred while deploying resources: %s" % err)
+
+                # stop deployment
+                stop.append(None)
+
+        if not group:
+            group = self.resources
+
+        # Before starting deployment we disorder the group list with the
+        # purpose of speeding up the whole deployment process.
+        # It is likely that the user inserted in the 'group' list closely
+        # resources resources one after another (e.g. all applications
+        # connected to the same node can likely appear one after another).
+        # This can originate a slow down in the deployment since the N 
+        # threads the parallel runner uses to processes tasks may all
+        # be taken up by the same family of resources waiting for the 
+        # same conditions. 
+        # If we disorder the group list, this problem can be mitigated
+        random.shuffle(group)
+
+        threads = []
+        for guid in group:
+            rm = self.get_resource(guid)
+
+            if wait_all_ready:
+                towait = list(group)
+                towait.remove(guid)
+                self.register_condition(guid, ResourceAction.START, 
+                        towait, ResourceState.READY)
+
+            thread = threading.Thread(target = steps, args = (rm,))
+            threads.append(thread)
+            thread.setDaemon(True)
+            thread.start()
+
+        while list(threads) and not self.finished and not stop:
+            thread = threads[0]
+            # Time out after 5 seconds to check EC not terminated
+            thread.join(1)
+            if not thread.is_alive():
+                threads.remove(thread)
+
+        if stop:
+            # stop the scheduler
+            self._stop_scheduler()
+
+            if self._thread.is_alive():
+               self._thread.join()
+
+            raise RuntimeError, "Error occurred, interrupting deployment " 
+
+    def release(self, group = None):
+        if not group:
+            group = self.resources
+
+        threads = []
+        for guid in group:
+            rm = self.get_resource(guid)
+            thread = threading.Thread(target=rm.release)
+            threads.append(thread)
+            thread.setDaemon(True)
+            thread.start()
+
+        while list(threads) and not self.finished:
+            thread = threads[0]
+            # Time out after 5 seconds to check EC not terminated
+            thread.join(5)
+            if not thread.is_alive():
+                threads.remove(thread)
+
+    def shutdown(self):
+        self.release()
+
+        self._stop_scheduler()
+        
+        if self._thread.is_alive():
+           self._thread.join()
+
+    def schedule(self, date, callback, track = False):
+        """ Schedule a callback to be executed at time date.
+
+            date    string containing execution time for the task.
+                    It can be expressed as an absolute time, using
+                    timestamp format, or as a relative time matching
+                    ^\d+.\d+(h|m|s|ms|us)$
+
+            callback    code to be executed for the task. Must be a
+                        Python function, and receives args and kwargs
+                        as arguments.
+
+            track   if set to True, the task will be retrivable with
+                    the get_task() method
+        """
+        timestamp = strfvalid(date)
+        
+        task = Task(timestamp, callback)
+        task = self._scheduler.schedule(task)
+
+        if track:
+            self._tasks[task.id] = task
+  
+        # Notify condition to wake up the processing thread
+        self._cond.acquire()
+        self._cond.notify()
+        self._cond.release()
+
+        return task.id
+     
+    def _process(self):
+        runner = ParallelRun(maxthreads = 50)
+        runner.start()
+
+        try:
+            while not self.finished:
+                self._cond.acquire()
+                task = self._scheduler.next()
+                self._cond.release()
+
+                if not task:
+                    # It there are not tasks in the tasks queue we need to 
+                    # wait until a call to schedule wakes us up
+                    self._cond.acquire()
+                    self._cond.wait()
+                    self._cond.release()
+                else: 
+                    # If the task timestamp is in the future the thread needs to wait
+                    # until time elapse or until another task is scheduled
+                    now = strfnow()
+                    if now < task.timestamp:
+                        # Calculate time difference in seconds
+                        timeout = strfdiff(task.timestamp, now)
+                        # Re-schedule task with the same timestamp
+                        self._scheduler.schedule(task)
+                        # Sleep until timeout or until a new task awakes the condition
+                        self._cond.acquire()
+                        self._cond.wait(timeout)
+                        self._cond.release()
+                    else:
+                        # Process tasks in parallel
+                        runner.put(self._execute, task)
+                
+        except: 
+            import traceback
+            err = traceback.format_exc()
+            self._logger.error("Error while processing tasks in the EC: %s" % err)
+
+            self._state = ECState.FAILED
+        finally:
+            runner.sync()
+   
+        # Mark EC state as terminated
+        if self.ecstate == ECState.RUNNING:
+            self._state = ECState.TERMINATED
+
+    def _execute(self, task):
+        # Invoke callback
+        task.status = TaskStatus.DONE
+
+        try:
+            task.result = task.callback()
+        except:
+            import traceback
+            err = traceback.format_exc()
+            task.result = err
+            task.status = TaskStatus.ERROR
+            
+            self._logger.error("Error occurred while executing task: %s" % err)
+
+            self._stop_scheduler()
+
+            # Propage error to the ParallelRunner
+            raise
+
+    def _stop_scheduler(self):
+        # Mark the EC as failed
+        self._state = ECState.FAILED
+
+        # Wake up the EC in case it was sleeping
+        self._cond.acquire()
+        self._cond.notify()
+        self._cond.release()
+
+
diff --git a/src/nepi/execution/resource.py b/src/nepi/execution/resource.py
new file mode 100644 (file)
index 0000000..dfa219e
--- /dev/null
@@ -0,0 +1,607 @@
+from nepi.util.timefuncs import strfnow, strfdiff, strfvalid
+from nepi.execution.trace import TraceAttr
+
+import copy
+import functools
+import inspect
+import logging
+import os
+import pkgutil
+import weakref
+
+reschedule_delay = "0.5s"
+
+class ResourceAction:
+    DEPLOY = 0
+    START = 1
+    STOP = 2
+
+class ResourceState:
+    NEW = 0
+    DISCOVERED = 1
+    PROVISIONED = 2
+    READY = 3
+    STARTED = 4
+    STOPPED = 5
+    FINISHED = 6
+    FAILED = 7
+    RELEASED = 8
+
+def clsinit(cls):
+    cls._clsinit()
+    return cls
+
+# Decorator to invoke class initialization method
+@clsinit
+class ResourceManager(object):
+    _rtype = "Resource"
+    _filters = None
+    _attributes = None
+    _traces = None
+
+    @classmethod
+    def _register_filter(cls, attr):
+        """ Resource subclasses will invoke this method to add a 
+        filter attribute
+
+        """
+        cls._filters[attr.name] = attr
+
+    @classmethod
+    def _register_attribute(cls, attr):
+        """ Resource subclasses will invoke this method to add a 
+        resource attribute
+
+        """
+        cls._attributes[attr.name] = attr
+
+    @classmethod
+    def _register_trace(cls, trace):
+        """ Resource subclasses will invoke this method to add a 
+        resource trace
+
+        """
+        cls._traces[trace.name] = trace
+
+
+    @classmethod
+    def _register_filters(cls):
+        """ Resource subclasses will invoke this method to register 
+        resource filters
+
+        """
+        pass
+
+    @classmethod
+    def _register_attributes(cls):
+        """ Resource subclasses will invoke this method to register
+        resource attributes
+
+        """
+        pass
+
+    @classmethod
+    def _register_traces(cls):
+        """ Resource subclasses will invoke this method to register
+        resource traces
+
+        """
+        pass
+
+    @classmethod
+    def _clsinit(cls):
+        """ Create a new dictionnary instance of the dictionnary 
+        with the same template.
+        Each ressource should have the same registration dictionary
+        template with different instances.
+        """
+        # static template for resource filters
+        cls._filters = dict()
+        cls._register_filters()
+
+        # static template for resource attributes
+        cls._attributes = dict()
+        cls._register_attributes()
+
+        # static template for resource traces
+        cls._traces = dict()
+        cls._register_traces()
+
+    @classmethod
+    def rtype(cls):
+        return cls._rtype
+
+    @classmethod
+    def get_filters(cls):
+        """ Returns a copy of the filters
+
+        """
+        return copy.deepcopy(cls._filters.values())
+
+    @classmethod
+    def get_attributes(cls):
+        """ Returns a copy of the attributes
+
+        """
+        return copy.deepcopy(cls._attributes.values())
+
+    @classmethod
+    def get_traces(cls):
+        """ Returns a copy of the traces
+
+        """
+        return copy.deepcopy(cls._traces.values())
+
+    def __init__(self, ec, guid):
+        self._guid = guid
+        self._ec = weakref.ref(ec)
+        self._connections = set()
+        self._conditions = dict() 
+
+        # the resource instance gets a copy of all attributes
+        self._attrs = copy.deepcopy(self._attributes)
+
+        # the resource instance gets a copy of all traces
+        self._trcs = copy.deepcopy(self._traces)
+
+        self._state = ResourceState.NEW
+
+        self._start_time = None
+        self._stop_time = None
+        self._discover_time = None
+        self._provision_time = None
+        self._ready_time = None
+        self._release_time = None
+
+        # Logging
+        self._logger = logging.getLogger("Resource")
+
+    def debug(self, msg, out = None, err = None):
+        self.log(msg, logging.DEBUG, out, err)
+
+    def error(self, msg, out = None, err = None):
+        self.log(msg, logging.ERROR, out, err)
+
+    def warn(self, msg, out = None, err = None):
+        self.log(msg, logging.WARNING, out, err)
+
+    def info(self, msg, out = None, err = None):
+        self.log(msg, logging.INFO, out, err)
+
+    def log(self, msg, level, out = None, err = None):
+        if out:
+            msg += " - OUT: %s " % out
+
+        if err:
+            msg += " - ERROR: %s " % err
+
+        msg = self.log_message(msg)
+
+        self.logger.log(level, msg)
+
+    def log_message(self, msg):
+        return " %s guid: %d - %s " % (self._rtype, self.guid, msg)
+
+    @property
+    def logger(self):
+        return self._logger
+
+    @property
+    def guid(self):
+        return self._guid
+
+    @property
+    def ec(self):
+        return self._ec()
+
+    @property
+    def connections(self):
+        return self._connections
+
+    @property
+    def conditions(self):
+        return self._conditions
+
+    @property
+    def start_time(self):
+        """ Returns timestamp with the time the RM started """
+        return self._start_time
+
+    @property
+    def stop_time(self):
+        """ Returns timestamp with the time the RM stopped """
+        return self._stop_time
+
+    @property
+    def discover_time(self):
+        """ Returns timestamp with the time the RM passed to state discovered """
+        return self._discover_time
+
+    @property
+    def provision_time(self):
+        """ Returns timestamp with the time the RM passed to state provisioned """
+        return self._provision_time
+
+    @property
+    def ready_time(self):
+        """ Returns timestamp with the time the RM passed to state ready  """
+        return self._ready_time
+
+    @property
+    def release_time(self):
+        """ Returns timestamp with the time the RM was released """
+        return self._release_time
+
+    @property
+    def state(self):
+        return self._state
+
+    def connect(self, guid):
+        if self.valid_connection(guid):
+            self._connections.add(guid)
+
+    def discover(self, filters = None):
+        self._discover_time = strfnow()
+        self._state = ResourceState.DISCOVERED
+
+    def provision(self, filters = None):
+        self._provision_time = strfnow()
+        self._state = ResourceState.PROVISIONED
+
+    def start(self):
+        """ Start the Resource Manager
+
+        """
+        if not self._state in [ResourceState.READY, ResourceState.STOPPED]:
+            self.error("Wrong state %s for start" % self.state)
+            return
+
+        self._start_time = strfnow()
+        self._state = ResourceState.STARTED
+
+    def stop(self):
+        """ Start the Resource Manager
+
+        """
+        if not self._state in [ResourceState.STARTED]:
+            self.error("Wrong state %s for stop" % self.state)
+            return
+
+        self._stop_time = strfnow()
+        self._state = ResourceState.STOPPED
+
+    def set(self, name, value):
+        """ Set the value of the attribute
+
+        :param name: Name of the attribute
+        :type name: str
+        :param name: Value of the attribute
+        :type name: str
+        """
+        attr = self._attrs[name]
+        attr.value = value
+
+    def get(self, name):
+        """ Start the Resource Manager
+
+        :param name: Name of the attribute
+        :type name: str
+        :rtype: str
+        """
+        attr = self._attrs[name]
+        return attr.value
+
+    def register_trace(self, name):
+        """ Enable trace
+
+        :param name: Name of the trace
+        :type name: str
+        """
+        trace = self._trcs[name]
+        trace.enabled = True
+
+    def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
+        """ Get information on collected trace
+
+        :param name: Name of the trace
+        :type name: str
+
+        :param attr: Can be one of:
+                         - TraceAttr.ALL (complete trace content), 
+                         - TraceAttr.STREAM (block in bytes to read starting at offset), 
+                         - TraceAttr.PATH (full path to the trace file),
+                         - TraceAttr.SIZE (size of trace file). 
+        :type attr: str
+
+        :param block: Number of bytes to retrieve from trace, when attr is TraceAttr.STREAM 
+        :type name: int
+
+        :param offset: Number of 'blocks' to skip, when attr is TraceAttr.STREAM 
+        :type name: int
+
+        :rtype: str
+        """
+        pass
+
+    def register_condition(self, action, group, state, 
+            time = None):
+        """ Registers a condition on the resource manager to allow execution 
+        of 'action' only after 'time' has elapsed from the moment all resources 
+        in 'group' reached state 'state'
+
+        :param action: Action to restrict to condition (either 'START' or 'STOP')
+        :type action: str
+        :param group: Group of RMs to wait for (list of guids)
+        :type group: int or list of int
+        :param state: State to wait for on all RM in group. (either 'STARTED' or 'STOPPED')
+        :type state: str
+        :param time: Time to wait after 'state' is reached on all RMs in group. (e.g. '2s')
+        :type time: str
+
+        """
+        conditions = self.conditions.get(action)
+        if not conditions:
+            conditions = list()
+            self._conditions[action] = conditions
+
+        # For each condition to register a tuple of (group, state, time) is 
+        # added to the 'action' list
+        if not isinstance(group, list):
+            group = [group]
+
+        conditions.append((group, state, time))
+
+    def get_connected(self, rtype):
+        connected = []
+        for guid in self.connections:
+            rm = self.ec.get_resource(guid)
+            if rm.rtype() == rtype:
+                connected.append(rm)
+        return connected
+
+    def _needs_reschedule(self, group, state, time):
+        """ Internal method that verify if 'time' has elapsed since 
+        all elements in 'group' have reached state 'state'.
+
+        :param group: Group of RMs to wait for (list of guids)
+        :type group: int or list of int
+        :param state: State to wait for on all RM in group. (either 'STARTED' or 'STOPPED')
+        :type state: str
+        :param time: Time to wait after 'state' is reached on all RMs in group. (e.g. '2s')
+        :type time: str
+
+        .. note : time should be written like "2s" or "3m" with s for seconds, m for minutes, h for hours, ...
+        If for example, you need to wait 2min 30sec, time could be "150s" or "2.5m".
+        For the moment, 2m30s is not a correct syntax.
+
+        """
+        reschedule = False
+        delay = reschedule_delay 
+
+        # check state and time elapsed on all RMs
+        for guid in group:
+            rm = self.ec.get_resource(guid)
+            # If the RM state is lower than the requested state we must
+            # reschedule (e.g. if RM is READY but we required STARTED)
+            if rm.state < state:
+                reschedule = True
+                break
+
+            # If there is a time restriction, we must verify the
+            # restriction is satisfied 
+            if time:
+                if state == ResourceState.DISCOVERED:
+                    t = rm.discover_time
+                if state == ResourceState.PROVISIONED:
+                    t = rm.provision_time
+                elif state == ResourceState.READY:
+                    t = rm.ready_time
+                elif state == ResourceState.STARTED:
+                    t = rm.start_time
+                elif state == ResourceState.STOPPED:
+                    t = rm.stop_time
+                else:
+                    # Only keep time information for START and STOP
+                    break
+
+                d = strfdiff(strfnow(), t)
+                wait = strfdiff(strfvalid(time),strfvalid(str(d)+"s"))
+                if wait > 0.001:
+                    reschedule = True
+                    delay = "%fs" % wait
+                    break
+        return reschedule, delay
+
+    def set_with_conditions(self, name, value, group, state, time):
+        """ Set value 'value' on attribute with name 'name' when 'time' 
+            has elapsed since all elements in 'group' have reached state
+           'state'
+
+        :param name: Name of the attribute to set
+        :type name: str
+        :param name: Value of the attribute to set
+        :type name: str
+        :param group: Group of RMs to wait for (list of guids)
+        :type group: int or list of int
+        :param state: State to wait for on all RM in group. (either 'STARTED', 'STOPPED' or 'READY')
+        :type state: str
+        :param time: Time to wait after 'state' is reached on all RMs in group. (e.g. '2s')
+        :type time: str
+
+        """
+
+        reschedule = False
+        delay = reschedule_delay 
+
+        ## evaluate if set conditions are met
+
+        # only can set with conditions after the RM is started
+        if self.state != ResourceState.STARTED:
+            reschedule = True
+        else:
+            reschedule, delay = self._needs_reschedule(group, state, time)
+
+        if reschedule:
+            callback = functools.partial(self.set_with_conditions, 
+                    name, value, group, state, time)
+            self.ec.schedule(delay, callback)
+        else:
+            self.set(name, value)
+
+    def start_with_conditions(self):
+        """ Starts RM when all the conditions in self.conditions for
+        action 'START' are satisfied.
+
+        """
+        reschedule = False
+        delay = reschedule_delay 
+
+        ## evaluate if set conditions are met
+
+        # only can start when RM is either STOPPED or READY
+        if self.state not in [ResourceState.STOPPED, ResourceState.READY]:
+            reschedule = True
+            self.debug("---- RESCHEDULING START ---- state %s " % self.state )
+        else:
+            start_conditions = self.conditions.get(ResourceAction.START, [])
+            
+            self.debug("---- START CONDITIONS ---- %s" % start_conditions) 
+            
+            # Verify all start conditions are met
+            for (group, state, time) in start_conditions:
+                # Uncomment for debug
+                #unmet = []
+                #for guid in group:
+                #    rm = self.ec.get_resource(guid)
+                #    unmet.append((guid, rm._state))
+                #
+                #self.debug("---- WAITED STATES ---- %s" % unmet )
+
+                reschedule, delay = self._needs_reschedule(group, state, time)
+                if reschedule:
+                    break
+
+        if reschedule:
+            self.ec.schedule(delay, self.start_with_conditions)
+        else:
+            self.debug("----- STARTING ---- ")
+            self.start()
+
+    def stop_with_conditions(self):
+        """ Stops RM when all the conditions in self.conditions for
+        action 'STOP' are satisfied.
+
+        """
+        reschedule = False
+        delay = reschedule_delay 
+
+        ## evaluate if set conditions are met
+
+        # only can stop when RM is STARTED
+        if self.state != ResourceState.STARTED:
+            reschedule = True
+        else:
+            self.debug(" ---- STOP CONDITIONS ---- %s" % 
+                    self.conditions.get(ResourceAction.STOP))
+
+            stop_conditions = self.conditions.get(ResourceAction.STOP, []) 
+            for (group, state, time) in stop_conditions:
+                reschedule, delay = self._needs_reschedule(group, state, time)
+                if reschedule:
+                    break
+
+        if reschedule:
+            callback = functools.partial(self.stop_with_conditions)
+            self.ec.schedule(delay, callback)
+        else:
+            self.logger.debug(" ----- STOPPING ---- ") 
+            self.stop()
+
+    def deploy(self):
+        """ Execute all steps required for the RM to reach the state READY
+
+        """
+        if self._state > ResourceState.READY:
+            self.error("Wrong state %s for deploy" % self.state)
+            return
+
+        self.debug("----- DEPLOYING ---- ")
+        self._ready_time = strfnow()
+        self._state = ResourceState.READY
+
+    def release(self):
+        """Clean the resource at the end of the Experiment and change the status
+
+        """
+        self._release_time = strfnow()
+        self._state = ResourceState.RELEASED
+
+    def valid_connection(self, guid):
+        """Check if the connection is available.
+
+        :param guid: Guid of the current Resource Manager
+        :type guid: int
+        :rtype:  Boolean
+
+        """
+        # TODO: Validate!
+        return True
+
+class ResourceFactory(object):
+    _resource_types = dict()
+
+    @classmethod
+    def resource_types(cls):
+        return cls._resource_types
+
+    @classmethod
+    def register_type(cls, rclass):
+        cls._resource_types[rclass.rtype()] = rclass
+
+    @classmethod
+    def create(cls, rtype, ec, guid):
+        rclass = cls._resource_types[rtype]
+        return rclass(ec, guid)
+
+def populate_factory():
+    for rclass in find_types():
+        ResourceFactory.register_type(rclass)
+
+def find_types():
+    search_path = os.environ.get("NEPI_SEARCH_PATH", "")
+    search_path = set(search_path.split(" "))
+   
+    import nepi.resources 
+    path = os.path.dirname(nepi.resources.__file__)
+    search_path.add(path)
+
+    types = []
+
+    for importer, modname, ispkg in pkgutil.walk_packages(search_path):
+        loader = importer.find_module(modname)
+        try:
+            module = loader.load_module(loader.fullname)
+            for attrname in dir(module):
+                if attrname.startswith("_"):
+                    continue
+
+                attr = getattr(module, attrname)
+
+                if attr == ResourceManager:
+                    continue
+
+                if not inspect.isclass(attr):
+                    continue
+
+                if issubclass(attr, ResourceManager):
+                    types.append(attr)
+        except:
+            import traceback
+            err = traceback.format_exc()
+            logger = logging.getLogger("Resource.find_types()")
+            logger.error("Error while lading Resource Managers %s" % err)
+
+    return types
+
+
diff --git a/src/nepi/execution/scheduler.py b/src/nepi/execution/scheduler.py
new file mode 100644 (file)
index 0000000..0060926
--- /dev/null
@@ -0,0 +1,54 @@
+import itertools
+import heapq
+
+class TaskStatus:
+    NEW = 0
+    DONE = 1
+    ERROR = 2
+
+
+class Task(object):
+    def __init__(self, timestamp, callback):
+        self.id = None 
+        self.timestamp = timestamp
+        self.callback = callback
+        self.result = None
+        self.status = TaskStatus.NEW
+
+class HeapScheduler(object):
+    """ This class is thread safe.
+    All calls to C Extensions are made atomic by the GIL in the CPython implementation.
+    heapq.heappush, heapq.heappop, and list access are therefore thread-safe """
+
+    def __init__(self):
+        super(HeapScheduler, self).__init__()
+        self._queue = list() 
+        self._valid = set()
+        self._idgen = itertools.count(1)
+
+    def schedule(self, task):
+        if task.id == None:
+            task.id = self._idgen.next()
+        entry = (task.timestamp, task.id, task)
+        self._valid.add(task.id)
+        heapq.heappush(self._queue, entry)
+        return task
+
+    def remove(self, tid):
+        try:
+            self._valid.remove(tid)
+        except:
+            pass
+
+    def next(self):
+        while self._queue:
+            try:
+                timestamp, tid, task = heapq.heappop(self._queue)
+                if tid in self._valid:
+                    self.remove(tid)
+                    return task
+            except IndexError:
+                # heap empty
+                pass
+        return None
+
diff --git a/src/nepi/execution/tags.py b/src/nepi/execution/tags.py
new file mode 100644 (file)
index 0000000..244713a
--- /dev/null
@@ -0,0 +1,21 @@
+NODE = "node"
+NETWORK_INTERFACE = "network interface"
+SWITCH = "switch"
+TUNNEL = "tunnel"
+APPLICATION = "application"
+CHANNEL = "channel"
+CPU = "cpu"
+
+IP4ADDRESS = "ipv4"
+IP6ADDRESS = "ipv6"
+MACADDRESS = "mac"
+IPADDRESS = "ip"
+ROUTE = "route"
+FLOW = "flow"
+
+WIRELESS = "wireless"
+ETHERNET = "ethernet"
+SIMULATED = "simulated"
+VIRTUAL = "virtual"
+MOBILE = "mobile"
+
diff --git a/src/nepi/execution/trace.py b/src/nepi/execution/trace.py
new file mode 100644 (file)
index 0000000..382a6bb
--- /dev/null
@@ -0,0 +1,20 @@
+class TraceAttr:
+    ALL = 'all'
+    STREAM = 'stream'
+    PATH = 'path'
+    SIZE = 'size'
+
+class Trace(object):
+    def __init__(self, name, help):
+        self._name = name
+        self._help = help
+        self.enabled = False
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def help(self):
+        return self._help
+
diff --git a/src/nepi/resources/linux/__init__.py b/src/nepi/resources/linux/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/resources/linux/application.py b/src/nepi/resources/linux/application.py
new file mode 100644 (file)
index 0000000..0364939
--- /dev/null
@@ -0,0 +1,502 @@
+from nepi.execution.attribute import Attribute, Flags, Types
+from nepi.execution.trace import Trace, TraceAttr
+from nepi.execution.resource import ResourceManager, clsinit, ResourceState
+from nepi.resources.linux.node import LinuxNode
+from nepi.util import sshfuncs 
+from nepi.util.timefuncs import strfnow, strfdiff
+
+import logging
+import os
+
+reschedule_delay = "0.5s"
+state_check_delay = 1
+
+# TODO: Resolve wildcards in commands!! 
+
+@clsinit
+class LinuxApplication(ResourceManager):
+    _rtype = "LinuxApplication"
+
+    @classmethod
+    def _register_attributes(cls):
+        command = Attribute("command", "Command to execute", 
+                flags = Flags.ExecReadOnly)
+        forward_x11 = Attribute("forwardX11", " Enables X11 forwarding for SSH connections", 
+                flags = Flags.ExecReadOnly)
+        env = Attribute("env", "Environment variables string for command execution",
+                flags = Flags.ExecReadOnly)
+        sudo = Attribute("sudo", "Run with root privileges", 
+                flags = Flags.ExecReadOnly)
+        depends = Attribute("depends", 
+                "Space-separated list of packages required to run the application",
+                flags = Flags.ExecReadOnly)
+        sources = Attribute("sources", 
+                "Space-separated list of regular files to be deployed in the working "
+                "path prior to building. Archives won't be expanded automatically.",
+                flags = Flags.ExecReadOnly)
+        code = Attribute("code", 
+                "Plain text source code to be uploaded to the server. It will be stored "
+                "under ${SOURCES}/code",
+                flags = Flags.ExecReadOnly)
+        build = Attribute("build", 
+                "Build commands to execute after deploying the sources. "
+                "Sources will be in the ${SOURCES} folder. "
+                "Example: tar xzf ${SOURCES}/my-app.tgz && cd my-app && ./configure && make && make clean.\n"
+                "Try to make the commands return with a nonzero exit code on error.\n"
+                "Also, do not install any programs here, use the 'install' attribute. This will "
+                "help keep the built files constrained to the build folder (which may "
+                "not be the home folder), and will result in faster deployment. Also, "
+                "make sure to clean up temporary files, to reduce bandwidth usage between "
+                "nodes when transferring built packages.",
+                flags = Flags.ReadOnly)
+        install = Attribute("install", 
+                "Commands to transfer built files to their final destinations. "
+                "Sources will be in the initial working folder, and a special "
+                "tag ${SOURCES} can be used to reference the experiment's "
+                "home folder (where the application commands will run).\n"
+                "ALL sources and targets needed for execution must be copied there, "
+                "if building has been enabled.\n"
+                "That is, 'slave' nodes will not automatically get any source files. "
+                "'slave' nodes don't get build dependencies either, so if you need "
+                "make and other tools to install, be sure to provide them as "
+                "actual dependencies instead.",
+                flags = Flags.ReadOnly)
+        stdin = Attribute("stdin", "Standard input", flags = Flags.ExecReadOnly)
+        stdout = Attribute("stdout", "Standard output", flags = Flags.ExecReadOnly)
+        stderr = Attribute("stderr", "Standard error", flags = Flags.ExecReadOnly)
+        tear_down = Attribute("tearDown", "Bash script to be executed before "
+                "releasing the resource", 
+                flags = Flags.ReadOnly)
+
+        cls._register_attribute(command)
+        cls._register_attribute(forward_x11)
+        cls._register_attribute(env)
+        cls._register_attribute(sudo)
+        cls._register_attribute(depends)
+        cls._register_attribute(sources)
+        cls._register_attribute(code)
+        cls._register_attribute(build)
+        cls._register_attribute(install)
+        cls._register_attribute(stdin)
+        cls._register_attribute(stdout)
+        cls._register_attribute(stderr)
+        cls._register_attribute(tear_down)
+
+    @classmethod
+    def _register_traces(cls):
+        stdout = Trace("stdout", "Standard output stream")
+        stderr = Trace("stderr", "Standard error stream")
+        buildlog = Trace("buildlog", "Output of the build process")
+
+        cls._register_trace(stdout)
+        cls._register_trace(stderr)
+        cls._register_trace(buildlog)
+
+    def __init__(self, ec, guid):
+        super(LinuxApplication, self).__init__(ec, guid)
+        self._pid = None
+        self._ppid = None
+        self._home = "app-%s" % self.guid
+
+        # timestamp of last state check of the application
+        self._last_state_check = strfnow()
+
+        self._logger = logging.getLogger("LinuxApplication")
+    
+    def log_message(self, msg):
+        return " guid %d - host %s - %s " % (self.guid, 
+                self.node.get("hostname"), msg)
+
+    @property
+    def node(self):
+        node = self.get_connected(LinuxNode.rtype())
+        if node: return node[0]
+        return None
+
+    @property
+    def app_home(self):
+        return os.path.join(self.node.exp_home, self._home)
+
+    @property
+    def src_dir(self):
+        return os.path.join(self.app_home, 'src')
+
+    @property
+    def build_dir(self):
+        return os.path.join(self.app_home, 'build')
+
+    @property
+    def pid(self):
+        return self._pid
+
+    @property
+    def ppid(self):
+        return self._ppid
+
+    def trace(self, name, attr = TraceAttr.ALL, block = 512, offset = 0):
+        self.info("Retrieving '%s' trace %s " % (name, attr))
+
+        path = os.path.join(self.app_home, name)
+        
+        command = "(test -f %s && echo 'success') || echo 'error'" % path
+        (out, err), proc = self.node.execute(command)
+
+        if (err and proc.poll()) or out.find("error") != -1:
+            msg = " Couldn't find trace %s " % name
+            self.error(msg, out, err)
+            return None
+    
+        if attr == TraceAttr.PATH:
+            return path
+
+        if attr == TraceAttr.ALL:
+            (out, err), proc = self.node.check_output(self.app_home, name)
+            
+            if err and proc.poll():
+                msg = " Couldn't read trace %s " % name
+                self.error(msg, out, err)
+                return None
+
+            return out
+
+        if attr == TraceAttr.STREAM:
+            cmd = "dd if=%s bs=%d count=1 skip=%d" % (path, block, offset)
+        elif attr == TraceAttr.SIZE:
+            cmd = "stat -c%%s %s " % path
+
+        (out, err), proc = self.node.execute(cmd)
+
+        if err and proc.poll():
+            msg = " Couldn't find trace %s " % name
+            self.error(msg, out, err)
+            return None
+        
+        if attr == TraceAttr.SIZE:
+            out = int(out.strip())
+
+        return out
+            
+    def provision(self, filters = None):
+        # create home dir for application
+        self.node.mkdir(self.app_home)
+
+        # upload sources
+        self.upload_sources()
+
+        # upload code
+        self.upload_code()
+
+        # upload stdin
+        self.upload_stdin()
+
+        # install dependencies
+        self.install_dependencies()
+
+        # build
+        self.build()
+
+        # Install
+        self.install()
+
+        command = self.get("command")
+        x11 = self.get("forwardX11")
+        if not x11 and command:
+            self.info("Uploading command '%s'" % command)
+
+            # Export environment
+            environ = ""
+            env = self.get("env") or ""
+            for var in env.split(" "):
+                environ += 'export %s\n' % var
+
+            command = environ + command
+
+            # If the command runs asynchronous, pre upload the command 
+            # to the app.sh file in the remote host
+            dst = os.path.join(self.app_home, "app.sh")
+            command = self.replace_paths(command)
+            self.node.upload(command, dst, text = True)
+
+        super(LinuxApplication, self).provision()
+
+    def upload_sources(self):
+        # TODO: check if sources need to be uploaded and upload them
+        sources = self.get("sources")
+        if sources:
+            self.info(" Uploading sources ")
+
+            # create dir for sources
+            self.node.mkdir(self.src_dir)
+
+            sources = sources.split(' ')
+
+            http_sources = list()
+            for source in list(sources):
+                if source.startswith("http") or source.startswith("https"):
+                    http_sources.append(source)
+                    sources.remove(source)
+
+            # Download http sources
+            if http_sources:
+                cmd = " wget -c --directory-prefix=${SOURCES} "
+                verif = ""
+
+                for source in http_sources:
+                    cmd += " %s " % (source)
+                    verif += " ls ${SOURCES}/%s ;" % os.path.basename(source)
+                
+                # Wget output goes to stderr :S
+                cmd += " 2> /dev/null ; "
+
+                # Add verification
+                cmd += " %s " % verif
+
+                # Upload the command to a file, and execute asynchronously
+                self.upload_and_run(cmd, 
+                        "http_sources.sh", "http_sources_pid", 
+                        "http_sources_out", "http_sources_err")
+            if sources:
+                self.node.upload(sources, self.src_dir)
+
+    def upload_code(self):
+        code = self.get("code")
+        if code:
+            # create dir for sources
+            self.node.mkdir(self.src_dir)
+
+            self.info(" Uploading code ")
+
+            dst = os.path.join(self.src_dir, "code")
+            self.node.upload(sources, dst, text = True)
+
+    def upload_stdin(self):
+        stdin = self.get("stdin")
+        if stdin:
+            # create dir for sources
+            self.info(" Uploading stdin ")
+
+            dst = os.path.join(self.app_home, "stdin")
+            self.node.upload(stdin, dst, text = True)
+
+    def install_dependencies(self):
+        depends = self.get("depends")
+        if depends:
+            self.info(" Installing dependencies %s" % depends)
+            self.node.install_packages(depends, home = self.app_home)
+
+    def build(self):
+        build = self.get("build")
+        if build:
+            self.info(" Building sources ")
+            
+            # create dir for build
+            self.node.mkdir(self.build_dir)
+
+            # Upload the command to a file, and execute asynchronously
+            self.upload_and_run(build, 
+                    "build.sh", "build_pid", 
+                    "build_out", "build_err")
+    def install(self):
+        install = self.get("install")
+        if install:
+            self.info(" Installing sources ")
+
+            # Upload the command to a file, and execute asynchronously
+            self.upload_and_run(install, 
+                    "install.sh", "install_pid", 
+                    "install_out", "install_err")
+
+    def deploy(self):
+        # Wait until node is associated and deployed
+        node = self.node
+        if not node or node.state < ResourceState.READY:
+            self.debug("---- RESCHEDULING DEPLOY ---- node state %s " % self.node.state )
+            self.ec.schedule(reschedule_delay, self.deploy)
+        else:
+            try:
+                command = self.get("command") or ""
+                self.info(" Deploying command '%s' " % command)
+                self.discover()
+                self.provision()
+            except:
+                self._state = ResourceState.FAILED
+                raise
+
+            super(LinuxApplication, self).deploy()
+
+    def start(self):
+        command = self.get('command')
+        env = self.get('env')
+        stdin = 'stdin' if self.get('stdin') else None
+        stdout = 'stdout' if self.get('stdout') else 'stdout'
+        stderr = 'stderr' if self.get('stderr') else 'stderr'
+        sudo = self.get('sudo') or False
+        x11 = self.get('forwardX11') or False
+        failed = False
+
+        super(LinuxApplication, self).start()
+
+        if not command:
+            self.info("No command to start ")
+            self._state = ResourceState.FINISHED
+            return 
+    
+        self.info("Starting command '%s'" % command)
+
+        if x11:
+            if env:
+                # Export environment
+                environ = ""
+                for var in env.split(" "):
+                    environ += ' %s ' % var
+
+                command = "(" + environ + " ; " + command + ")"
+                command = self.replace_paths(command)
+
+            # If the command requires X11 forwarding, we
+            # can't run it asynchronously
+            (out, err), proc = self.node.execute(command,
+                    sudo = sudo,
+                    stdin = stdin,
+                    forward_x11 = x11)
+
+            self._state = ResourceState.FINISHED
+
+            if proc.poll() and err:
+                failed = True
+        else:
+            # Command was  previously uploaded, now run the remote
+            # bash file asynchronously
+            cmd = "bash ./app.sh"
+            (out, err), proc = self.node.run(cmd, self.app_home, 
+                stdin = stdin, 
+                stdout = stdout,
+                stderr = stderr,
+                sudo = sudo)
+
+            if proc.poll() and err:
+                failed = True
+        
+            if not failed:
+                pid, ppid = self.node.wait_pid(home = self.app_home)
+                if pid: self._pid = int(pid)
+                if ppid: self._ppid = int(ppid)
+
+            if not self.pid or not self.ppid:
+                failed = True
+            (out, chkerr), proc = self.node.check_output(self.app_home, 'stderr')
+
+            if failed or out or chkerr:
+                # check if execution errors occurred
+                msg = " Failed to start command '%s' " % command
+                out = out
+                if err:
+                    err = err
+                elif chkerr:
+                    err = chkerr
+
+                self.error(msg, out, err)
+
+                msg2 = " Setting state to Failed"
+                self.debug(msg2)
+                self._state = ResourceState.FAILED
+
+                raise RuntimeError, msg
+
+    def stop(self):
+        command = self.get('command') or ''
+        state = self.state
+        
+        if state == ResourceState.STARTED:
+            self.info("Stopping command '%s'" % command)
+
+            (out, err), proc = self.node.kill(self.pid, self.ppid)
+
+            if out or err:
+                # check if execution errors occurred
+                msg = " Failed to STOP command '%s' " % self.get("command")
+                self.error(msg, out, err)
+                self._state = ResourceState.FAILED
+                stopped = False
+            else:
+                super(LinuxApplication, self).stop()
+
+    def release(self):
+        self.info("Releasing resource")
+
+        tear_down = self.get("tearDown")
+        if tear_down:
+            self.node.execute(tear_down)
+
+        self.stop()
+        if self.state == ResourceState.STOPPED:
+            super(LinuxApplication, self).release()
+    
+    @property
+    def state(self):
+        if self._state == ResourceState.STARTED:
+            # To avoid overwhelming the remote hosts and the local processor
+            # with too many ssh queries, the state is only requested
+            # every 'state_check_delay' .
+            if strfdiff(strfnow(), self._last_state_check) > state_check_delay:
+                # check if execution errors occurred
+                (out, err), proc = self.node.check_output(self.app_home, 'stderr')
+
+                if out or err:
+                    if err.find("No such file or directory") >= 0 :
+                        # The resource is marked as started, but the
+                        # command was not yet executed
+                        return ResourceState.READY
+
+                    msg = " Failed to execute command '%s'" % self.get("command")
+                    self.error(msg, out, err)
+                    self._state = ResourceState.FAILED
+
+                elif self.pid and self.ppid:
+                    status = self.node.status(self.pid, self.ppid)
+
+                    if status == sshfuncs.FINISHED:
+                        self._state = ResourceState.FINISHED
+
+
+                self._last_state_check = strfnow()
+
+        return self._state
+
+    def upload_and_run(self, cmd, fname, pidfile, outfile, errfile):
+        dst = os.path.join(self.app_home, fname)
+        cmd = self.replace_paths(cmd)
+        self.node.upload(cmd, dst, text = True)
+
+        cmd = "bash ./%s" % fname
+        (out, err), proc = self.node.run_and_wait(cmd, self.app_home,
+            pidfile = pidfile,
+            stdout = outfile, 
+            stderr = errfile, 
+            raise_on_error = True)
+
+    def replace_paths(self, command):
+        """
+        Replace all special path tags with shell-escaped actual paths.
+        """
+        def absolute_dir(d):
+            return d if d.startswith("/") else os.path.join("${HOME}", d)
+
+        return ( command
+            .replace("${SOURCES}", absolute_dir(self.src_dir))
+            .replace("${BUILD}", absolute_dir(self.build_dir))
+            .replace("${APP_HOME}", absolute_dir(self.app_home))
+            .replace("${NODE_HOME}", absolute_dir(self.node.node_home))
+            .replace("${EXP_HOME}", absolute_dir(self.node.exp_home) )
+            )
+        
+    def valid_connection(self, guid):
+        # TODO: Validate!
+        return True
+        # XXX: What if it is connected to more than one node?
+        resources = self.find_resources(exact_tags = [tags.NODE])
+        self._node = resources[0] if len(resources) == 1 else None
+        return self._node
+
diff --git a/src/nepi/resources/linux/channel.py b/src/nepi/resources/linux/channel.py
new file mode 100644 (file)
index 0000000..142478b
--- /dev/null
@@ -0,0 +1,27 @@
+from nepi.execution.attribute import Attribute, Flags
+from nepi.execution.resource import ResourceManager, clsinit, ResourceState
+from nepi.resources.linux.node import LinuxNode
+
+import collections
+import logging
+import os
+import random
+import re
+import tempfile
+import time
+import threading
+
+@clsinit
+class LinuxChannel(ResourceManager):
+    _rtype = "LinuxChannel"
+
+    def __init__(self, ec, guid):
+        super(LinuxChannel, self).__init__(ec, guid)
+        self._logger = logging.getLogger("LinuxChannel")
+
+    def log_message(self, msg):
+        return " guid %d - %s " % (self.guid, msg)
+
+    def valid_connection(self, guid):
+        # TODO: Validate!
+        return True
diff --git a/src/nepi/resources/linux/debfuncs.py b/src/nepi/resources/linux/debfuncs.py
new file mode 100644 (file)
index 0000000..fdc8d3c
--- /dev/null
@@ -0,0 +1,26 @@
+# TODO: Investigate using http://nixos.org/nix/
+
+def install_packages_command(os, packages):
+    if not isinstance(packages, list):
+        packages = [packages]
+
+    cmd = ""
+    for p in packages:
+        cmd += " ( dpkg -s %(package)s || sudo -S apt-get -y install %(package)s ) ; " % {
+                'package': p}
+   
+    #cmd = (dpkg -s vim || sudo dpkg -s install vim) ; (...)
+    return cmd 
+
+def remove_packages_command(os, packages):
+    if not isinstance(packages, list):
+        packages = [packages]
+
+    cmd = ""
+    for p in packages:
+        cmd += " ( dpkg -s %(package)s && sudo -S apt-get -y purge %(package)s ) ; " % {
+                'package': p}
+    
+    #cmd = (dpkg -s vim || sudo apt-get -y purge vim) ; (...)
+    return cmd 
+
diff --git a/src/nepi/resources/linux/interface.py b/src/nepi/resources/linux/interface.py
new file mode 100644 (file)
index 0000000..f572766
--- /dev/null
@@ -0,0 +1,296 @@
+from nepi.execution.attribute import Attribute, Types, Flags
+from nepi.execution.resource import ResourceManager, clsinit, ResourceState
+from nepi.resources.linux.node import LinuxNode
+from nepi.resources.linux.channel import LinuxChannel
+
+import collections
+import logging
+import os
+import random
+import re
+import tempfile
+import time
+
+# TODO: UP, MTU attributes!
+
+reschedule_delay = "0.5s"
+
+@clsinit
+class LinuxInterface(ResourceManager):
+    _rtype = "LinuxInterface"
+
+    @classmethod
+    def _register_attributes(cls):
+        ip4 = Attribute("ip4", "IPv4 Address",
+                flags = Flags.ExecReadOnly)
+
+        ip6 = Attribute("ip6", "IPv6 Address",
+                flags = Flags.ExecReadOnly)
+
+        mac = Attribute("mac", "MAC Address",
+                flags = Flags.ExecReadOnly)
+
+        mask4 = Attribute("mask4", "IPv4 network mask",
+                flags = Flags.ExecReadOnly)
+
+        mask6 = Attribute("mask6", "IPv6 network mask",
+                type = Types.Integer,
+                flags = Flags.ExecReadOnly)
+
+        mtu = Attribute("mtu", "Maximum transmition unit for device",
+            type = Types.Integer)
+
+        devname = Attribute("deviceName", 
+                "Name of the network interface (e.g. eth0, wlan0, etc)",
+                flags = Flags.ExecReadOnly)
+
+        up = Attribute("up", "Link up", type = Types.Bool)
+
+        tear_down = Attribute("tearDown", "Bash script to be executed before " + \
+                "releasing the resource",
+                flags = Flags.ExecReadOnly)
+
+        cls._register_attribute(ip4)
+        cls._register_attribute(ip6)
+        cls._register_attribute(mac)
+        cls._register_attribute(mask4)
+        cls._register_attribute(mask6)
+        cls._register_attribute(mtu)
+        cls._register_attribute(devname)
+        cls._register_attribute(up)
+        cls._register_attribute(tear_down)
+
+    def __init__(self, ec, guid):
+        super(LinuxInterface, self).__init__(ec, guid)
+        self._configured = False
+
+        self._logger = logging.getLogger("LinuxInterface")
+        
+        self.add_set_hooks()
+
+    def log_message(self, msg):
+        return " guid %d - host %s - %s " % (self.guid, 
+                self.node.get("hostname"), msg)
+
+    @property
+    def node(self):
+        node = self.get_connected(LinuxNode.rtype())
+        if node: return node[0]
+        return None
+
+    @property
+    def channel(self):
+        chan = self.get_connected(LinuxChannel.rtype())
+        if chan: return chan[0]
+        return None
+
+    def discover(self, filters = None):
+        devname = self.get("deviceName")
+        ip4 = self.get("ip4")
+        ip6 = self.get("ip4")
+        mac = self.get("mac")
+        mask4 = self.get("mask4")
+        mask6 = self.get("mask6")
+        mtu = self.get("mtu")
+
+        # Get current interfaces information
+        (out, err), proc = self.node.execute("ifconfig", sudo = True)
+
+        if err and proc.poll():
+            msg = " Error retrieving interface information "
+            self.error(msg, out, err)
+            raise RuntimeError, "%s - %s - %s" % (msg, out, err)
+        
+        # Check if an interface is found matching the RM attributes
+        ifaces = out.split("\n\n")
+
+        for i in ifaces:
+            m = re.findall("(\w+)\s+Link\s+encap:\w+(\s+HWaddr\s+(([0-9a-fA-F]{2}:?){6}))?(\s+inet\s+addr:((\d+\.?){4}).+Mask:(\d+\.\d+\.\d+\.\d+))?(.+inet6\s+addr:\s+([0-9a-fA-F:.]+)/(\d+))?(.+(UP))?(.+MTU:(\d+))?", i, re.DOTALL)
+            
+            m = m[0]
+            dn = m[0]
+            mc = m[2]
+            i4 = m[5]
+            msk4 = m[7]
+            i6 = m[9]
+            msk6 = m[10]
+            up = True if m[12] else False
+            mu = m[14]
+
+            self.debug("Found interface %(devname)s with MAC %(mac)s,"
+                    "IPv4 %(ipv4)s %(mask4)s IPv6 %(ipv6)s/%(mask6)s %(up)s %(mtu)s" % ({
+                'devname': dn,
+                'mac': mc,
+                'ipv4': i4,
+                'mask4': msk4,
+                'ipv6': i6,
+                'mask6': msk6,
+                'up': up,
+                'mtu': mu
+                }) )
+
+            # If the user didn't provide information we take the first 
+            # interface that is UP
+            if not devname and not ip4 and not ip6 and up:
+                self._configured = True
+                self.load_configuration(dn, mc, i4, msk4, i6, msk6, mu, up)
+                break
+
+            # If the user provided ipv4 or ipv6 matching that of an interface
+            # load the interface info
+            if (ip4 and ip4 == i4) or (ip6 and ip6 == i6):
+                self._configured = True
+                self.load_configuration(dn, mc, i4, msk4, i6, msk6, mu, up)
+                break
+
+            # If the user provided the device name we load the associated info
+            if devname and devname == dn:
+                if ((ip4 and ip4 == i4) and (ipv6 and ip6 == i6)) or \
+                        not (ip4 or ip6):
+                    self._configured = True
+               
+                # If the user gave a different ip than the existing, asume ip 
+                # needs to be changed
+                i4 = ip4 or i4
+                i6 = ip6 or i6
+                mu = mtu or mu 
+
+                self.load_configuration(dn, mc, i4, msk4, i6, msk6, mu, up)
+                break
+       
+        if not self.get("deviceName"):
+            msg = "Unable to resolve interface "
+            self.error(msg)
+            raise RuntimeError, msg
+
+        super(LinuxInterface, self).discover(filters = filters)
+
+    def provision(self, filters = None):
+        devname = self.get("deviceName")
+        ip4 = self.get("ip4")
+        ip6 = self.get("ip4")
+        mac = self.get("mac")
+        mask4 = self.get("mask4")
+        mask6 = self.get("mask6")
+        mtu = self.get("mtu")
+
+        # Must configure interface if configuration is required
+        if not self._configured:
+            cmd = "ifconfig %s" % devname
+
+            if ip4 and mask4:
+                cmd += " %(ip4)s netmask %(mask4)s broadcast %(bcast)s up" % ({
+                    'ip4': ip4,
+                    'mask4': mask4,
+                    'bcast': bcast})
+            if mtu:
+                cmd += " mtu %d " % mtu
+
+            (out, err), proc = self.node.execute(cmd, sudo = True)
+
+            if err and proc.poll():
+                msg = "Error configuring interface with command '%s'" % cmd
+                self.error(msg, out, err)
+                raise RuntimeError, "%s - %s - %s" % (msg, out, err)
+
+            if ip6 and mask6:
+                cmd = "ifconfig %(devname)s inet6 add %(ip6)s/%(mask6)d" % ({
+                        'devname': devname,
+                        'ip6': ip6,
+                        'mask6': mask6})
+
+            (out, err), proc = self.node.execute(cmd, sudo = True)
+
+            if err and proc.poll():
+                msg = "Error seting ipv6 for interface using command '%s' " % cmd
+                self.error(msg, out, err)
+                raise RuntimeError, "%s - %s - %s" % (msg, out, err)
+
+        super(LinuxInterface, self).provision(filters = filters)
+
+    def deploy(self):
+        # Wait until node is provisioned
+        node = self.node
+        chan = self.channel
+
+        if not node or node.state < ResourceState.PROVISIONED:
+            self.ec.schedule(reschedule_delay, self.deploy)
+        elif not chan or chan.state < ResourceState.READY:
+            self.ec.schedule(reschedule_delay, self.deploy)
+        else:
+            # Verify if the interface exists in node. If not, configue
+            # if yes, load existing configuration
+            try:
+                self.discover()
+                self.provision()
+            except:
+                self._state = ResourceState.FAILED
+                raise
+
+            super(LinuxInterface, self).deploy()
+
+    def release(self):
+        tear_down = self.get("tearDown")
+        if tear_down:
+            self.execute(tear_down)
+
+        super(LinuxInterface, self).release()
+
+    def valid_connection(self, guid):
+        # TODO: Validate!
+        return True
+
+    def load_configuration(self, devname, mac, ip4, mask4, ip6, mask6, mtu, up):
+        self.set("deviceName", devname)
+        self.set("mac", mac)
+        self.set("ip4", ip4)
+        self.set("mask4", mask4)
+        self.set("ip6", ip6)
+        self.set("mask6", mask6)
+
+        # set the following without validating or triggering hooks
+        attr = self._attrs["up"]
+        attr._value = up
+        attr = self._attrs["mtu"]
+
+    def add_set_hooks(self):
+        attrup = self._attrs["up"]
+        attrup.set_hook = self.set_hook_up
+
+        attrmtu = self._attrs["mtu"]
+        attrmtu.set_hook = self.set_hook_mtu
+
+    def set_hook_up(self, oldval, newval):
+        if oldval == newval:
+            return oldval
+
+        # configure interface up
+        if newval == True:
+            cmd = "ifup %s" % self.get("deviceName")
+        elif newval == False:
+            cmd = "ifdown %s" % self.get("deviceName")
+
+        (out, err), proc = self.node.execute(cmd, sudo = True)
+
+        if err and proc.poll():
+            msg = "Error setting interface up/down using command '%s' " % cmd
+            self.error(msg, err, out)
+            return oldval
+        
+        return newval
+
+    def set_hook_mtu(self, oldval, newval):
+        if oldval == newval:
+            return oldval
+
+        cmd = "ifconfig %s mtu %d" % (self.get("deviceName"), newval)
+
+        (out, err), proc = self.node.execute(cmd, sudo = True)
+
+        if err and proc.poll():
+            msg = "Error setting interface MTU using command '%s' " % cmd
+            self.error(msg, err, out)
+            return  oldval
+        
+        return newval
+
diff --git a/src/nepi/resources/linux/node.py b/src/nepi/resources/linux/node.py
new file mode 100644 (file)
index 0000000..85e2286
--- /dev/null
@@ -0,0 +1,633 @@
+from nepi.execution.attribute import Attribute, Flags
+from nepi.execution.resource import ResourceManager, clsinit, ResourceState
+from nepi.resources.linux import rpmfuncs, debfuncs 
+from nepi.util import sshfuncs, execfuncs 
+
+import collections
+import logging
+import os
+import random
+import re
+import tempfile
+import time
+import threading
+
+# TODO: Verify files and dirs exists already
+# TODO: Blacklist nodes!
+# TODO: Unify delays!!
+# TODO: Validate outcome of uploads!! 
+
+reschedule_delay = "0.5s"
+
+
+@clsinit
+class LinuxNode(ResourceManager):
+    _rtype = "LinuxNode"
+
+    @classmethod
+    def _register_attributes(cls):
+        hostname = Attribute("hostname", "Hostname of the machine",
+                flags = Flags.ExecReadOnly)
+
+        username = Attribute("username", "Local account username", 
+                flags = Flags.Credential)
+
+        port = Attribute("port", "SSH port", flags = Flags.ExecReadOnly)
+        
+        home = Attribute("home",
+                "Experiment home directory to store all experiment related files",
+                flags = Flags.ExecReadOnly)
+        
+        identity = Attribute("identity", "SSH identity file",
+                flags = Flags.Credential)
+        
+        server_key = Attribute("serverKey", "Server public key", 
+                flags = Flags.ExecReadOnly)
+        
+        clean_home = Attribute("cleanHome", "Remove all files and directories " + \
+                " from home folder before starting experiment", 
+                flags = Flags.ExecReadOnly)
+        
+        clean_processes = Attribute("cleanProcesses", 
+                "Kill all running processes before starting experiment",
+                flags = Flags.ExecReadOnly)
+        
+        tear_down = Attribute("tearDown", "Bash script to be executed before " + \
+                "releasing the resource",
+                flags = Flags.ExecReadOnly)
+
+        cls._register_attribute(hostname)
+        cls._register_attribute(username)
+        cls._register_attribute(port)
+        cls._register_attribute(home)
+        cls._register_attribute(identity)
+        cls._register_attribute(server_key)
+        cls._register_attribute(clean_home)
+        cls._register_attribute(clean_processes)
+        cls._register_attribute(tear_down)
+
+    def __init__(self, ec, guid):
+        super(LinuxNode, self).__init__(ec, guid)
+        self._os = None
+        
+        # lock to avoid concurrency issues on methods used by applications 
+        self._lock = threading.Lock()
+
+        self._logger = logging.getLogger("LinuxNode")
+    
+    def log_message(self, msg):
+        return " guid %d - host %s - %s " % (self.guid, 
+                self.get("hostname"), msg)
+
+    @property
+    def home(self):
+        return self.get("home") or ""
+
+    @property
+    def exp_home(self):
+        return os.path.join(self.home, self.ec.exp_id)
+
+    @property
+    def node_home(self):
+        node_home = "node-%d" % self.guid
+        return os.path.join(self.exp_home, node_home)
+
+    @property
+    def os(self):
+        if self._os:
+            return self._os
+
+        if (not self.get("hostname") or not self.get("username")):
+            msg = "Can't resolve OS, insufficient data "
+            self.error(msg)
+            raise RuntimeError, msg
+
+        (out, err), proc = self.execute("cat /etc/issue", with_lock = True)
+
+        if err and proc.poll():
+            msg = "Error detecting OS "
+            self.error(msg, out, err)
+            raise RuntimeError, "%s - %s - %s" %( msg, out, err )
+
+        if out.find("Fedora release 12") == 0:
+            self._os = "f12"
+        elif out.find("Fedora release 14") == 0:
+            self._os = "f14"
+        elif out.find("Debian") == 0: 
+            self._os = "debian"
+        elif out.find("Ubuntu") ==0:
+            self._os = "ubuntu"
+        else:
+            msg = "Unsupported OS"
+            self.error(msg, out)
+            raise RuntimeError, "%s - %s " %( msg, out )
+
+        return self._os
+
+    @property
+    def localhost(self):
+        return self.get("hostname") in ['localhost', '127.0.0.7', '::1']
+
+    def provision(self, filters = None):
+        if not self.is_alive():
+            self._state = ResourceState.FAILED
+            msg = "Deploy failed. Unresponsive node %s" % self.get("hostname")
+            self.error(msg)
+            raise RuntimeError, msg
+
+        if self.get("cleanProcesses"):
+            self.clean_processes()
+
+        if self.get("cleanHome"):
+            self.clean_home()
+       
+        self.mkdir(self.node_home)
+
+        super(LinuxNode, self).provision()
+
+    def deploy(self):
+        if self.state == ResourceState.NEW:
+            try:
+               self.discover()
+               self.provision()
+            except:
+                self._state = ResourceState.FAILED
+                raise
+
+        # Node needs to wait until all associated interfaces are 
+        # ready before it can finalize deployment
+        from nepi.resources.linux.interface import LinuxInterface
+        ifaces = self.get_connected(LinuxInterface.rtype())
+        for iface in ifaces:
+            if iface.state < ResourceState.READY:
+                self.ec.schedule(reschedule_delay, self.deploy)
+                return 
+
+        super(LinuxNode, self).deploy()
+
+    def release(self):
+        tear_down = self.get("tearDown")
+        if tear_down:
+            self.execute(tear_down)
+
+        super(LinuxNode, self).release()
+
+    def valid_connection(self, guid):
+        # TODO: Validate!
+        return True
+
+    def clean_processes(self, killer = False):
+        self.info("Cleaning up processes")
+        
+        if killer:
+            # Hardcore kill
+            cmd = ("sudo -S killall python tcpdump || /bin/true ; " +
+                "sudo -S killall python tcpdump || /bin/true ; " +
+                "sudo -S kill $(ps -N -T -o pid --no-heading | grep -v $PPID | sort) || /bin/true ; " +
+                "sudo -S killall -u root || /bin/true ; " +
+                "sudo -S killall -u root || /bin/true ; ")
+        else:
+            # Be gentler...
+            cmd = ("sudo -S killall tcpdump || /bin/true ; " +
+                "sudo -S killall tcpdump || /bin/true ; " +
+                "sudo -S killall -u %s || /bin/true ; " % self.get("username") +
+                "sudo -S killall -u %s || /bin/true ; " % self.get("username"))
+
+        out = err = ""
+        (out, err), proc = self.execute(cmd, retry = 1, with_lock = True) 
+            
+    def clean_home(self):
+        self.info("Cleaning up home")
+        
+        cmd = (
+            # "find . -maxdepth 1  \( -name '.cache' -o -name '.local' -o -name '.config' -o -name 'nepi-*' \)" +
+            "find . -maxdepth 1 -name 'nepi-*' " +
+            " -execdir rm -rf {} + "
+            )
+            
+        if self.home:
+            cmd = "cd %s ; " % self.home + cmd
+
+        out = err = ""
+        (out, err), proc = self.execute(cmd, with_lock = True)
+
+    def upload(self, src, dst, text = False):
+        """ Copy content to destination
+
+           src  content to copy. Can be a local file, directory or a list of files
+
+           dst  destination path on the remote host (remote is always self.host)
+
+           text src is text input, it must be stored into a temp file before uploading
+        """
+        # If source is a string input 
+        f = None
+        if text and not os.path.isfile(src):
+            # src is text input that should be uploaded as file
+            # create a temporal file with the content to upload
+            f = tempfile.NamedTemporaryFile(delete=False)
+            f.write(src)
+            f.close()
+            src = f.name
+
+        if not self.localhost:
+            # Build destination as <user>@<server>:<path>
+            dst = "%s@%s:%s" % (self.get("username"), self.get("hostname"), dst)
+
+        result = self.copy(src, dst)
+
+        # clean up temp file
+        if f:
+            os.remove(f.name)
+
+        return result
+
+    def download(self, src, dst):
+        if not self.localhost:
+            # Build destination as <user>@<server>:<path>
+            src = "%s@%s:%s" % (self.get("username"), self.get("hostname"), src)
+        return self.copy(src, dst)
+
+    def install_packages(self, packages, home = None):
+        home = home or self.node_home
+
+        cmd = ""
+        if self.os in ["f12", "f14"]:
+            cmd = rpmfuncs.install_packages_command(self.os, packages)
+        elif self.os in ["debian", "ubuntu"]:
+            cmd = debfuncs.install_packages_command(self.os, packages)
+        else:
+            msg = "Error installing packages ( OS not known ) "
+            self.error(msg, self.os)
+            raise RuntimeError, msg
+
+        out = err = ""
+        (out, err), proc = self.run_and_wait(cmd, home, 
+            pidfile = "instpkg_pid",
+            stdout = "instpkg_out", 
+            stderr = "instpkg_err",
+            raise_on_error = True)
+
+        return (out, err), proc 
+
+    def remove_packages(self, packages, home = None):
+        home = home or self.node_home
+
+        cmd = ""
+        if self.os in ["f12", "f14"]:
+            cmd = rpmfuncs.remove_packages_command(self.os, packages)
+        elif self.os in ["debian", "ubuntu"]:
+            cmd = debfuncs.remove_packages_command(self.os, packages)
+        else:
+            msg = "Error removing packages ( OS not known ) "
+            self.error(msg)
+            raise RuntimeError, msg
+
+        out = err = ""
+        (out, err), proc = self.run_and_wait(cmd, home, 
+            pidfile = "rmpkg_pid",
+            stdout = "rmpkg_out", 
+            stderr = "rmpkg_err",
+            raise_on_error = True)
+         
+        return (out, err), proc 
+
+    def mkdir(self, path, clean = False):
+        if clean:
+            self.rmdir(path)
+
+        return self.execute("mkdir -p %s" % path, with_lock = True)
+
+    def rmdir(self, path):
+        return self.execute("rm -rf %s" % path, with_lock = True)
+
+    def run_and_wait(self, command, 
+            home = ".", 
+            pidfile = "pid", 
+            stdin = None, 
+            stdout = 'stdout', 
+            stderr = 'stderr', 
+            sudo = False,
+            tty = False,
+            raise_on_error = False):
+        """ runs a command in background on the remote host, but waits
+            until the command finishes execution.
+            This is more robust than doing a simple synchronized 'execute',
+            since in the remote host the command can continue to run detached
+            even if network disconnections occur
+        """
+        # run command in background in remote host
+        (out, err), proc = self.run(command, home, 
+                pidfile = pidfile,
+                stdin = stdin, 
+                stdout = stdout, 
+                stderr = stderr, 
+                sudo = sudo,
+                tty = tty)
+
+        # check no errors occurred
+        if proc.poll() and err:
+            msg = " Failed to run command '%s' " % command
+            self.error(msg, out, err)
+            if raise_on_error:
+                raise RuntimeError, msg
+
+        # Wait for pid file to be generated
+        pid, ppid = self.wait_pid(
+                home = home, 
+                pidfile = pidfile, 
+                raise_on_error = raise_on_error)
+
+        # wait until command finishes to execute
+        self.wait_run(pid, ppid)
+       
+        # check if execution errors occurred
+        (out, err), proc = self.check_output(home, stderr)
+
+        if err or out:
+            msg = " Failed to run command '%s' " % command
+            self.error(msg, out, err)
+
+            if raise_on_error:
+                raise RuntimeError, msg
+        
+        return (out, err), proc
+    def wait_pid(self, home = ".", pidfile = "pid", raise_on_error = False):
+        """ Waits until the pid file for the command is generated, 
+            and returns the pid and ppid of the process """
+        pid = ppid = None
+        delay = 1.0
+        for i in xrange(5):
+            pidtuple = self.checkpid(home = home, pidfile = pidfile)
+            
+            if pidtuple:
+                pid, ppid = pidtuple
+                break
+            else:
+                time.sleep(delay)
+                delay = min(30,delay*1.2)
+        else:
+            msg = " Failed to get pid for pidfile %s/%s " % (
+                    home, pidfile )
+            self.error(msg)
+            
+            if raise_on_error:
+                raise RuntimeError, msg
+
+        return pid, ppid
+
+    def wait_run(self, pid, ppid, trial = 0):
+        """ wait for a remote process to finish execution """
+        delay = 1.0
+        first = True
+        bustspin = 0
+
+        while True:
+            status = self.status(pid, ppid)
+            
+            if status is sshfuncs.FINISHED:
+                break
+            elif status is not sshfuncs.RUNNING:
+                bustspin += 1
+                time.sleep(delay*(5.5+random.random()))
+                if bustspin > 12:
+                    break
+            else:
+                if first:
+                    first = False
+
+                time.sleep(delay*(0.5+random.random()))
+                delay = min(30,delay*1.2)
+                bustspin = 0
+
+    def check_output(self, home, filename):
+        """ checks file content """
+        (out, err), proc = self.execute("cat %s" % 
+            os.path.join(home, filename), retry = 1, with_lock = True)
+        return (out, err), proc
+
+    def is_alive(self):
+        if self.localhost:
+            return True
+
+        out = err = ""
+        try:
+            # TODO: FIX NOT ALIVE!!!!
+            (out, err), proc = self.execute("echo 'ALIVE' || (echo 'NOTALIVE') >&2", retry = 5, 
+                    with_lock = True)
+        except:
+            import traceback
+            trace = traceback.format_exc()
+            msg = "Unresponsive host  %s " % err
+            self.error(msg, out, trace)
+            return False
+
+        if out.strip().startswith('ALIVE'):
+            return True
+        else:
+            msg = "Unresponsive host "
+            self.error(msg, out, err)
+            return False
+
+    def copy(self, src, dst):
+        if self.localhost:
+            (out, err), proc =  execfuncs.lcopy(source, dest, 
+                    recursive = True,
+                    strict_host_checking = False)
+        else:
+            with self._lock:
+                (out, err), proc = sshfuncs.rcopy(
+                    src, dst, 
+                    port = self.get("port"),
+                    identity = self.get("identity"),
+                    server_key = self.get("serverKey"),
+                    recursive = True,
+                    strict_host_checking = False)
+
+        return (out, err), proc
+
+    def execute(self, command,
+            sudo = False,
+            stdin = None, 
+            env = None,
+            tty = False,
+            forward_x11 = False,
+            timeout = None,
+            retry = 3,
+            err_on_timeout = True,
+            connect_timeout = 30,
+            strict_host_checking = False,
+            persistent = True,
+            with_lock = False
+            ):
+        """ Notice that this invocation will block until the
+        execution finishes. If this is not the desired behavior,
+        use 'run' instead."""
+
+        if self.localhost:
+            (out, err), proc = execfuncs.lexec(command, 
+                    user = user,
+                    sudo = sudo,
+                    stdin = stdin,
+                    env = env)
+        else:
+            if with_lock:
+                with self._lock:
+                    (out, err), proc = sshfuncs.rexec(
+                        command, 
+                        host = self.get("hostname"),
+                        user = self.get("username"),
+                        port = self.get("port"),
+                        agent = True,
+                        sudo = sudo,
+                        stdin = stdin,
+                        identity = self.get("identity"),
+                        server_key = self.get("serverKey"),
+                        env = env,
+                        tty = tty,
+                        forward_x11 = forward_x11,
+                        timeout = timeout,
+                        retry = retry,
+                        err_on_timeout = err_on_timeout,
+                        connect_timeout = connect_timeout,
+                        persistent = persistent,
+                        strict_host_checking = strict_host_checking
+                        )
+            else:
+                (out, err), proc = sshfuncs.rexec(
+                    command, 
+                    host = self.get("hostname"),
+                    user = self.get("username"),
+                    port = self.get("port"),
+                    agent = True,
+                    sudo = sudo,
+                    stdin = stdin,
+                    identity = self.get("identity"),
+                    server_key = self.get("serverKey"),
+                    env = env,
+                    tty = tty,
+                    forward_x11 = forward_x11,
+                    timeout = timeout,
+                    retry = retry,
+                    err_on_timeout = err_on_timeout,
+                    connect_timeout = connect_timeout,
+                    persistent = persistent
+                    )
+
+        return (out, err), proc
+
+    def run(self, command, 
+            home = None,
+            create_home = False,
+            pidfile = "pid",
+            stdin = None, 
+            stdout = 'stdout', 
+            stderr = 'stderr', 
+            sudo = False,
+            tty = False):
+
+        self.debug("Running command '%s'" % command)
+        
+        if self.localhost:
+            (out, err), proc = execfuncs.lspawn(command, pidfile, 
+                    stdout = stdout, 
+                    stderr = stderr, 
+                    stdin = stdin, 
+                    home = home, 
+                    create_home = create_home, 
+                    sudo = sudo,
+                    user = user) 
+        else:
+            # Start process in a "daemonized" way, using nohup and heavy
+            # stdin/out redirection to avoid connection issues
+            with self._lock:
+                (out,err), proc = sshfuncs.rspawn(
+                    command,
+                    pidfile = pidfile,
+                    home = home,
+                    create_home = create_home,
+                    stdin = stdin if stdin is not None else '/dev/null',
+                    stdout = stdout if stdout else '/dev/null',
+                    stderr = stderr if stderr else '/dev/null',
+                    sudo = sudo,
+                    host = self.get("hostname"),
+                    user = self.get("username"),
+                    port = self.get("port"),
+                    agent = True,
+                    identity = self.get("identity"),
+                    server_key = self.get("serverKey"),
+                    tty = tty
+                    )
+
+        return (out, err), proc
+
+    def checkpid(self, home = ".", pidfile = "pid"):
+        if self.localhost:
+            pidtuple =  execfuncs.lcheckpid(os.path.join(home, pidfile))
+        else:
+            with self._lock:
+                pidtuple = sshfuncs.rcheckpid(
+                    os.path.join(home, pidfile),
+                    host = self.get("hostname"),
+                    user = self.get("username"),
+                    port = self.get("port"),
+                    agent = True,
+                    identity = self.get("identity"),
+                    server_key = self.get("serverKey")
+                    )
+        
+        return pidtuple
+    
+    def status(self, pid, ppid):
+        if self.localhost:
+            status = execfuncs.lstatus(pid, ppid)
+        else:
+            with self._lock:
+                status = sshfuncs.rstatus(
+                        pid, ppid,
+                        host = self.get("hostname"),
+                        user = self.get("username"),
+                        port = self.get("port"),
+                        agent = True,
+                        identity = self.get("identity"),
+                        server_key = self.get("serverKey")
+                        )
+           
+        return status
+    
+    def kill(self, pid, ppid, sudo = False):
+        out = err = ""
+        proc = None
+        status = self.status(pid, ppid)
+
+        if status == sshfuncs.RUNNING:
+            if self.localhost:
+                (out, err), proc = execfuncs.lkill(pid, ppid, sudo)
+            else:
+                with self._lock:
+                    (out, err), proc = sshfuncs.rkill(
+                        pid, ppid,
+                        host = self.get("hostname"),
+                        user = self.get("username"),
+                        port = self.get("port"),
+                        agent = True,
+                        sudo = sudo,
+                        identity = self.get("identity"),
+                        server_key = self.get("serverKey")
+                        )
+        return (out, err), proc
+
+    def check_bad_host(self, out, err):
+        badre = re.compile(r'(?:'
+                           r'|Error: disk I/O error'
+                           r')', 
+                           re.I)
+        return badre.search(out) or badre.search(err)
+
+    def blacklist(self):
+        # TODO!!!!
+        self.warn(" Blacklisting malfunctioning node ")
+        #import util
+        #util.appendBlacklist(self.hostname)
+
diff --git a/src/nepi/resources/linux/rpmfuncs.py b/src/nepi/resources/linux/rpmfuncs.py
new file mode 100644 (file)
index 0000000..b5a7b3d
--- /dev/null
@@ -0,0 +1,42 @@
+RPM_FUSION_URL = 'http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-stable.noarch.rpm'
+RPM_FUSION_URL_F12 = 'http://download1.rpmfusion.org/free/fedora/releases/12/Everything/x86_64/os/rpmfusion-free-release-12-1.noarch.rpm'
+
+# TODO: Investigate using http://nixos.org/nix/
+
+def install_packages_command(os, packages):
+    if not isinstance(packages, list):
+        packages = [packages]
+
+    cmd = "( %s )" % install_rpmfusion_command(os)
+    for p in packages:
+        cmd += " ; ( rpm -q %(package)s || sudo -S yum -y install %(package)s ) " % {
+            'package': p}
+    
+    #cmd = ((rpm -q rpmfusion-free-release || sudo -s rpm -i ...) ; (rpm -q vim || sudo yum -y install vim))
+    return " ( %s )" % cmd 
+
+def remove_packages_command(os, packages):
+    if not isinstance(packages, list):
+        packages = [packages]
+
+    cmd = ""
+    for p in packages:
+        cmd += " ( rpm -q %(package)s && sudo -S yum -y remove %(package)s ) ; " % {
+                    'package': p}
+    
+    #cmd = (rpm -q vim || sudo yum -y remove vim) ; (...)
+    return cmd 
+
+def install_rpmfusion_command(os):
+    cmd = "rpm -q rpmfusion-free-release || sudo -S rpm -i %(package)s"
+
+    if os == "f12":
+        cmd =  cmd %  {'package': RPM_FUSION_URL_F12}
+    elif os == "f14":
+        # This one works for f13+
+        cmd = cmd %  {'package': RPM_FUSION_URL}
+    else:
+        cmd = ""
+
+    return cmd
diff --git a/src/nepi/resources/netns/__init__.py b/src/nepi/resources/netns/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/resources/ns3/__init__.py b/src/nepi/resources/ns3/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/resources/ns3/ns3wrapper.py b/src/nepi/resources/ns3/ns3wrapper.py
new file mode 100644 (file)
index 0000000..a908c1b
--- /dev/null
@@ -0,0 +1,361 @@
+import logging
+import os
+import sys
+import threading
+import uuid
+
+class NS3Wrapper(object):
+    def __init__(self, homedir = None):
+        super(NS3Wrapper, self).__init__()
+        self._ns3 = None
+        self._uuid = self.make_uuid()
+        self._homedir = homedir or os.path.join("/tmp", self._uuid)
+        self._simulation_thread = None
+        self._condition = None
+
+        self._started = False
+        self._stopped = False
+
+        # holds reference to all ns-3 objects in the simulation
+        self._resources = dict()
+
+        # create home dir (where all simulation related files will end up)
+        home = os.path.normpath(self.homedir)
+        if not os.path.exists(home):
+            os.makedirs(home, 0755)
+
+        # Logging
+        loglevel = os.environ.get("NS3LOGLEVEL", "debug")
+        self._logger = logging.getLogger("ns3wrapper.%s" % self.uuid)
+        self._logger.setLevel(getattr(logging, loglevel.upper()))
+        hdlr = logging.FileHandler(os.path.join(self.homedir, "ns3wrapper.log"))
+        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        hdlr.setFormatter(formatter)
+        self._logger.addHandler(hdlr) 
+
+        # Load ns-3 shared libraries and import modules
+        self._load_ns3_module()
+        
+    @property
+    def ns3(self):
+        return self._ns3
+
+    @property
+    def homedir(self):
+        return self._homedir
+
+    @property
+    def uuid(self):
+        return self._uuid
+
+    @property
+    def logger(self):
+        return self._logger
+
+    def make_uuid(self):
+        return "uuid%s" % uuid.uuid4()
+
+    def singleton(self, clazzname):
+        uuid = "uuid%s"%clazzname
+
+        if not uuid in self._resources:
+            if not hasattr(self.ns3, clazzname):
+                msg = "Type %s not supported" % (typeid) 
+                self.logger.error(msg)
+
+            clazz = getattr(self.ns3, clazzname)
+            typeid = "ns3::%s" % clazzname
+            self._resources[uuid] = (clazz, typeid)
+
+        return uuid
+
+    def get_trace(self, trace, offset = None, nbytes = None ):
+        pass
+
+    def is_running(self):
+        return self._started and not self._stopped
+
+    def get_resource(self, uuid):
+        (resource, typeid) =  self._resources.get(uuid)
+        return resource
+    
+    def get_typeid(self, uuid):
+        (resource, typeid) =  self._resources.get(uuid)
+        return typeid
+
+    def create(self, clazzname, *args):
+        if not hasattr(self.ns3, clazzname):
+            msg = "Type %s not supported" % (clazzname) 
+            self.logger.error(msg)
+
+        clazz = getattr(self.ns3, clazzname)
+        #typeid = clazz.GetInstanceTypeId().GetName()
+        typeid = "ns3::%s" % clazzname
+
+        realargs = [self.get_resource(arg) if \
+                str(arg).startswith("uuid") else arg for arg in args]
+      
+        resource = clazz(*realargs)
+        
+        uuid = self.make_uuid()
+        self._resources[uuid] = (resource, typeid)
+        return uuid
+
+    def set(self, uuid, name, value):
+        resource = self.get_resource(uuid)
+
+        if hasattr(resource, name):
+            setattr(resource, name, value)
+        else:
+            self._set_ns3_attr(uuid, name, value)
+
+    def get(self, name, uuid = None):
+        resource = self.get_resource(uuid)
+
+        value = None
+        if hasattr(resource, name):
+            value = getattr(resource, name)
+        else:
+            value = self._get_ns3_attr(uuid, name)
+
+        return value
+
+    def invoke(self, uuid, operation, *args):
+        resource = self.get_resource(uuid)
+        typeid = self.get_typeid(uuid)
+        method = getattr(resource, operation)
+
+        realargs = [self.get_resource(arg) if \
+                str(arg).startswith("uuid") else arg for arg in args]
+
+        result = method(*realargs)
+
+        if not result:
+            return None
+        
+        uuid = self.make_uuid()
+        self._resources[uuid] = (result, typeid)
+
+        return uuid
+
+    def start(self):
+        self._condition = threading.Condition()
+        self._simulator_thread = threading.Thread(
+                target = self._simulator_run,
+                args = [self._condition])
+        self._simulator_thread.setDaemon(True)
+        self._simulator_thread.start()
+        self._started = True
+
+    def stop(self, time = None):
+        if not self.ns3:
+            return
+
+        if time is None:
+            self.ns3.Simulator.Stop()
+        else:
+            self.ns3.Simulator.Stop(self.ns3.Time(time))
+        self._stopped = True
+
+    def shutdown(self):
+        if self.ns3:
+            if not self.ns3.Simulator.IsFinished():
+                self.stop()
+            
+            # TODO!!!! SHOULD WAIT UNTIL THE THREAD FINISHES
+            if self._simulator_thread:
+                self._simulator_thread.join()
+            
+            self.ns3.Simulator.Destroy()
+        
+        self._resources.clear()
+        
+        self._ns3 = None
+        sys.stdout.flush()
+        sys.stderr.flush()
+
+    def _simulator_run(self, condition):
+        # Run simulation
+        self.ns3.Simulator.Run()
+        # Signal condition on simulation end to notify waiting threads
+        condition.acquire()
+        condition.notifyAll()
+        condition.release()
+
+    def _schedule_event(self, condition, func, *args):
+        """ Schedules event on running simulation, and wait until
+            event is executed"""
+
+        def execute_event(contextId, condition, has_event_occurred, func, *args):
+            try:
+                func(*args)
+            finally:
+                # flag event occured
+                has_event_occurred[0] = True
+                # notify condition indicating attribute was set
+                condition.acquire()
+                condition.notifyAll()
+                condition.release()
+
+        # contextId is defined as general context
+        contextId = long(0xffffffff)
+
+        # delay 0 means that the event is expected to execute inmediately
+        delay = self.ns3.Seconds(0)
+
+        # flag to indicate that the event occured
+        # because bool is an inmutable object in python, in order to create a
+        # bool flag, a list is used as wrapper
+        has_event_occurred = [False]
+        condition.acquire()
+        try:
+            if not self.ns3.Simulator.IsFinished():
+                self.ns3.Simulator.ScheduleWithContext(contextId, delay, execute_event,
+                     condition, has_event_occurred, func, *args)
+                while not has_event_occurred[0] and not self.ns3.Simulator.IsFinished():
+                    condition.wait()
+        finally:
+            condition.release()
+
+    def _set_ns3_attr(self, uuid, name, value):
+        resource = self.get_resource(uuid)
+        ns3_value = self._to_ns3_value(uuid, name, value)
+
+        def set_attr(resource, name, ns3_value):
+            resource.SetAttribute(name, ns3_value)
+
+        if self._is_running:
+            # schedule the event in the Simulator
+            self._schedule_event(self._condition, set_attr, resource,
+                    name, ns3_value)
+        else:
+            set_attr(resource, name, ns3_value)
+
+    def _get_ns3_attr(self, uuid, name):
+        resource = self.get_resource(uuid)
+        ns3_value = self._create_ns3_value(uuid, name)
+
+        def get_attr(resource, name, ns3_value):
+            resource.GetAttribute(name, ns3_value)
+
+        if self._is_running:
+            # schedule the event in the Simulator
+            self._schedule_event(self._condition, get_attr, resource,
+                    name, ns3_value)
+        else:
+            get_attr(resource, name, ns3_value)
+
+        return self._from_ns3_value(uuid, name, ns3_value)
+
+    def _create_ns3_value(self, uuid, name):
+        typeid = get_typeid(uuid)
+        TypeId = self.ns3.TypeId()
+        tid = TypeId.LookupByName(typeid)
+        info = TypeId.AttributeInformation()
+        if not tid.LookupAttributeByName(name, info):
+            msg = "TypeId %s has no attribute %s" % (typeid, name) 
+            self.logger.error(msg)
+
+        checker = info.checker
+        ns3_value = checker.Create() 
+        return ns3_value
+
+    def _from_ns3_value(self, uuid, name, ns3_value):
+        typeid = get_typeid(uuid)
+        TypeId = self.ns3.TypeId()
+        tid = TypeId.LookupByName(typeid)
+        info = TypeId.AttributeInformation()
+        if not tid.LookupAttributeByName(name, info):
+            msg = "TypeId %s has no attribute %s" % (typeid, name) 
+            self.logger.error(msg)
+
+        checker = info.checker
+        value = ns3_value.SerializeToString(checker)
+
+        type_name = checker.GetValueTypeName()
+        if type_name in ["ns3::UintegerValue", "ns3::IntegerValue"]:
+            return int(value)
+        if type_name == "ns3::DoubleValue":
+            return float(value)
+        if type_name == "ns3::BooleanValue":
+            return value == "true"
+
+        return value
+
+    def _to_ns3_value(self, uuid, name, value):
+        typeid = get_typeid(uuid)
+        TypeId = self.ns3.TypeId()
+        typeid = TypeId.LookupByName(typeid)
+        info = TypeId.AttributeInformation()
+        if not tid.LookupAttributeByName(name, info):
+            msg = "TypeId %s has no attribute %s" % (typeid, name) 
+            self.logger.error(msg)
+
+        str_value = str(value)
+        if isinstance(value, bool):
+            str_value = str_value.lower()
+
+        checker = info.checker
+        ns3_value = checker.Create()
+        ns3_value.DeserializeFromString(str_value, checker)
+        return ns3_value
+
+    def _load_ns3_module(self):
+        if self.ns3:
+            return 
+
+        import ctypes
+        import imp
+        import re
+        import pkgutil
+
+        bindings = os.environ.get("NS3BINDINGS")
+        libdir = os.environ.get("NS3LIBRARIES")
+
+        # Load the ns-3 modules shared libraries
+        if libdir:
+            files = os.listdir(libdir)
+            regex = re.compile("(.*\.so)$")
+            libs = [m.group(1) for filename in files for m in [regex.search(filename)] if m]
+
+            libscp = list(libs)
+            while len(libs) > 0:
+                for lib in libs:
+                    libfile = os.path.join(libdir, lib)
+                    try:
+                        ctypes.CDLL(libfile, ctypes.RTLD_GLOBAL)
+                        libs.remove(lib)
+                    except:
+                        pass
+
+                # if did not load any libraries in the last iteration break
+                # to prevent infinit loop
+                if len(libscp) == len(libs):
+                    raise RuntimeError("Imposible to load shared libraries %s" % str(libs))
+                libscp = list(libs)
+
+        # import the python bindings for the ns-3 modules
+        if bindings:
+            sys.path.append(bindings)
+
+        # create a module to add all ns3 classes
+        ns3mod = imp.new_module("ns3")
+        sys.modules["ns3"] = ns3mod
+
+        # retrieve all ns3 classes and add them to the ns3 module
+        import ns
+        for importer, modname, ispkg in pkgutil.iter_modules(ns.__path__):
+            fullmodname = "ns.%s" % modname
+            module = __import__(fullmodname, globals(), locals(), ['*'])
+
+            # netanim.Config singleton overrides ns3::Config
+            if modname in ['netanim']:
+                continue
+
+            for sattr in dir(module):
+                if not sattr.startswith("_"):
+                    attr = getattr(module, sattr)
+                    setattr(ns3mod, sattr, attr)
+
+        self._ns3 = ns3mod
+
diff --git a/src/nepi/resources/ns3/ns3wrapper_server.py b/src/nepi/resources/ns3/ns3wrapper_server.py
new file mode 100644 (file)
index 0000000..5a82909
--- /dev/null
@@ -0,0 +1,344 @@
+class Server(object):
+    def __init__(self, root_dir = ".", log_level = "ERROR", 
+            environment_setup = "", clean_root = False):
+        self._root_dir = root_dir
+        self._clean_root = clean_root
+        self._stop = False
+        self._ctrl_sock = None
+        self._log_level = log_level
+        self._rdbuf = ""
+        self._environment_setup = environment_setup
+
+    def run(self):
+        try:
+            if self.daemonize():
+                self.post_daemonize()
+                self.loop()
+                self.cleanup()
+                # ref: "os._exit(0)"
+                # can not return normally after fork beacuse no exec was done.
+                # This means that if we don't do a os._exit(0) here the code that 
+                # follows the call to "Server.run()" in the "caller code" will be 
+                # executed... but by now it has already been executed after the 
+                # first process (the one that did the first fork) returned.
+                os._exit(0)
+        except:
+            print >>sys.stderr, "SERVER_ERROR."
+            self.log_error()
+            self.cleanup()
+            os._exit(0)
+        print >>sys.stderr, "SERVER_READY."
+
+    def daemonize(self):
+        # pipes for process synchronization
+        (r, w) = os.pipe()
+        
+        # build root folder
+        root = os.path.normpath(self._root_dir)
+        if self._root_dir not in [".", ""] and os.path.exists(root) \
+                and self._clean_root:
+            shutil.rmtree(root)
+        if not os.path.exists(root):
+            os.makedirs(root, 0755)
+
+        pid1 = os.fork()
+        if pid1 > 0:
+            os.close(w)
+            while True:
+                try:
+                    os.read(r, 1)
+                except OSError, e: # pragma: no cover
+                    if e.errno == errno.EINTR:
+                        continue
+                    else:
+                        raise
+                break
+            os.close(r)
+            # os.waitpid avoids leaving a <defunc> (zombie) process
+            st = os.waitpid(pid1, 0)[1]
+            if st:
+                raise RuntimeError("Daemonization failed")
+            # return 0 to inform the caller method that this is not the 
+            # daemonized process
+            return 0
+        os.close(r)
+
+        # Decouple from parent environment.
+        os.chdir(self._root_dir)
+        os.umask(0)
+        os.setsid()
+
+        # fork 2
+        pid2 = os.fork()
+        if pid2 > 0:
+            # see ref: "os._exit(0)"
+            os._exit(0)
+
+        # close all open file descriptors.
+        max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+        if (max_fd == resource.RLIM_INFINITY):
+            max_fd = MAX_FD
+        for fd in range(3, max_fd):
+            if fd != w:
+                try:
+                    os.close(fd)
+                except OSError:
+                    pass
+
+        # Redirect standard file descriptors.
+        stdin = open(DEV_NULL, "r")
+        stderr = stdout = open(STD_ERR, "a", 0)
+        os.dup2(stdin.fileno(), sys.stdin.fileno())
+        # NOTE: sys.stdout.write will still be buffered, even if the file
+        # was opened with 0 buffer
+        os.dup2(stdout.fileno(), sys.stdout.fileno())
+        os.dup2(stderr.fileno(), sys.stderr.fileno())
+        
+        # setup environment
+        if self._environment_setup:
+            # parse environment variables and pass to child process
+            # do it by executing shell commands, in case there's some heavy setup involved
+            envproc = subprocess.Popen(
+                [ "bash", "-c", 
+                    "( %s python -c 'import os,sys ; print \"\\x01\".join(\"\\x02\".join(map(str,x)) for x in os.environ.iteritems())' ) | tail -1" %
+                        ( self._environment_setup, ) ],
+                stdin = subprocess.PIPE, 
+                stdout = subprocess.PIPE,
+                stderr = subprocess.PIPE
+            )
+            out,err = envproc.communicate()
+
+            # parse new environment
+            if out:
+                environment = dict(map(lambda x:x.split("\x02"), out.split("\x01")))
+            
+                # apply to current environment
+                for name, value in environment.iteritems():
+                    os.environ[name] = value
+                
+                # apply pythonpath
+                if 'PYTHONPATH' in environment:
+                    sys.path = environment['PYTHONPATH'].split(':') + sys.path
+
+        # create control socket
+        self._ctrl_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        try:
+            self._ctrl_sock.bind(CTRL_SOCK)
+        except socket.error:
+            # Address in use, check pidfile
+            pid = None
+            try:
+                pidfile = open(CTRL_PID, "r")
+                pid = pidfile.read()
+                pidfile.close()
+                pid = int(pid)
+            except:
+                # no pidfile
+                pass
+            
+            if pid is not None:
+                # Check process liveliness
+                if not os.path.exists("/proc/%d" % (pid,)):
+                    # Ok, it's dead, clean the socket
+                    os.remove(CTRL_SOCK)
+            
+            # try again
+            self._ctrl_sock.bind(CTRL_SOCK)
+            
+        self._ctrl_sock.listen(0)
+        
+        # Save pidfile
+        pidfile = open(CTRL_PID, "w")
+        pidfile.write(str(os.getpid()))
+        pidfile.close()
+
+        # let the parent process know that the daemonization is finished
+        os.write(w, "\n")
+        os.close(w)
+        return 1
+
+    def post_daemonize(self):
+        os.environ["NEPI_CONTROLLER_LOGLEVEL"] = self._log_level
+        # QT, for some strange reason, redefines the SIGCHILD handler to write
+        # a \0 to a fd (lets say fileno 'x'), when ever a SIGCHILD is received.
+        # Server dameonization closes all file descriptors from fileno '3',
+        # but the overloaded handler (inherited by the forked process) will
+        # keep trying to write the \0 to fileno 'x', which might have been reused 
+        # after closing, for other operations. This is bad bad bad when fileno 'x'
+        # is in use for communication pouroses, because unexpected \0 start
+        # appearing in the communication messages... this is exactly what happens 
+        # when using netns in daemonized form. Thus, be have no other alternative than
+        # restoring the SIGCHLD handler to the default here.
+        import signal
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+
+    def loop(self):
+        while not self._stop:
+            conn, addr = self._ctrl_sock.accept()
+            self.log_error("ACCEPTED CONNECTION: %s" % (addr,))
+            conn.settimeout(5)
+            while not self._stop:
+                try:
+                    msg = self.recv_msg(conn)
+                except socket.timeout, e:
+                    #self.log_error("SERVER recv_msg: connection timedout ")
+                    continue
+                
+                if not msg:
+                    self.log_error("CONNECTION LOST")
+                    break
+                    
+                if msg == STOP_MSG:
+                    self._stop = True
+                    reply = self.stop_action()
+                else:
+                    reply = self.reply_action(msg)
+                
+                try:
+                    self.send_reply(conn, reply)
+                except socket.error:
+                    self.log_error()
+                    self.log_error("NOTICE: Awaiting for reconnection")
+                    break
+            try:
+                conn.close()
+            except:
+                # Doesn't matter
+                self.log_error()
+
+    def recv_msg(self, conn):
+        data = [self._rdbuf]
+        chunk = data[0]
+        while '\n' not in chunk:
+            try:
+                chunk = conn.recv(1024)
+            except (OSError, socket.error), e:
+                if e[0] != errno.EINTR:
+                    raise
+                else:
+                    continue
+            if chunk:
+                data.append(chunk)
+            else:
+                # empty chunk = EOF
+                break
+        data = ''.join(data).split('\n',1)
+        while len(data) < 2:
+            data.append('')
+        data, self._rdbuf = data
+        
+        decoded = base64.b64decode(data)
+        return decoded.rstrip()
+
+    def send_reply(self, conn, reply):
+        encoded = base64.b64encode(reply)
+        conn.send("%s\n" % encoded)
+       
+    def cleanup(self):
+        try:
+            self._ctrl_sock.close()
+            os.remove(CTRL_SOCK)
+        except:
+            self.log_error()
+
+    def stop_action(self):
+        return "Stopping server"
+
+    def reply_action(self, msg):
+        return "Reply to: %s" % msg
+
+    def log_error(self, text = None, context = ''):
+        if text == None:
+            text = traceback.format_exc()
+        date = time.strftime("%Y-%m-%d %H:%M:%S")
+        if context:
+            context = " (%s)" % (context,)
+        sys.stderr.write("ERROR%s: %s\n%s\n" % (context, date, text))
+        return text
+
+    def log_debug(self, text):
+        if self._log_level == DC.DEBUG_LEVEL:
+            date = time.strftime("%Y-%m-%d %H:%M:%S")
+            sys.stderr.write("DEBUG: %s\n%s\n" % (date, text))
+
+class Forwarder(object):
+    def __init__(self, root_dir = "."):
+        self._ctrl_sock = None
+        self._root_dir = root_dir
+        self._stop = False
+        self._rdbuf = ""
+
+    def forward(self):
+        self.connect()
+        print >>sys.stderr, "FORWARDER_READY."
+        while not self._stop:
+            data = self.read_data()
+            if not data:
+                # Connection to client lost
+                break
+            self.send_to_server(data)
+            
+            data = self.recv_from_server()
+            if not data:
+                # Connection to server lost
+                raise IOError, "Connection to server lost while "\
+                    "expecting response"
+            self.write_data(data)
+        self.disconnect()
+
+    def read_data(self):
+        return sys.stdin.readline()
+
+    def write_data(self, data):
+        sys.stdout.write(data)
+        # sys.stdout.write is buffered, this is why we need to do a flush()
+        sys.stdout.flush()
+
+    def send_to_server(self, data):
+        try:
+            self._ctrl_sock.send(data)
+        except (IOError, socket.error), e:
+            if e[0] == errno.EPIPE:
+                self.connect()
+                self._ctrl_sock.send(data)
+            else:
+                raise e
+        encoded = data.rstrip() 
+        msg = base64.b64decode(encoded)
+        if msg == STOP_MSG:
+            self._stop = True
+
+    def recv_from_server(self):
+        data = [self._rdbuf]
+        chunk = data[0]
+        while '\n' not in chunk:
+            try:
+                chunk = self._ctrl_sock.recv(1024)
+            except (OSError, socket.error), e:
+                if e[0] != errno.EINTR:
+                    raise
+                continue
+            if chunk:
+                data.append(chunk)
+            else:
+                # empty chunk = EOF
+                break
+        data = ''.join(data).split('\n',1)
+        while len(data) < 2:
+            data.append('')
+        data, self._rdbuf = data
+        
+        return data+'\n'
+    def connect(self):
+        self.disconnect()
+        self._ctrl_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        sock_addr = os.path.join(self._root_dir, CTRL_SOCK)
+        self._ctrl_sock.connect(sock_addr)
+
+    def disconnect(self):
+        try:
+            self._ctrl_sock.close()
+        except:
+            pass
+
diff --git a/src/nepi/resources/omf/__init__.py b/src/nepi/resources/omf/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/resources/omf/omf_api.py b/src/nepi/resources/omf/omf_api.py
new file mode 100644 (file)
index 0000000..754be53
--- /dev/null
@@ -0,0 +1,369 @@
+import datetime
+import logging
+import ssl
+import sys
+import time
+import hashlib
+import nepi
+import threading
+
+from nepi.resources.omf.omf_client import OMFClient
+from nepi.resources.omf.omf_messages_5_4 import MessageHandler
+
+class OMFAPI(object):
+    """
+    .. class:: Class Args :
+      
+        :param slice: Xmpp Slice
+        :type slice: Str
+        :param host: Xmpp Server
+        :type host: Str
+        :param port: Xmpp Port
+        :type port: Str
+        :param password: Xmpp password
+        :type password: Str
+        :param xmpp_root: Root of the Xmpp Topic Architecture
+        :type xmpp_root: Str
+
+    .. note::
+
+       This class is the implementation of an OMF 5.4 API. Since the version 5.4.1, the Topic Architecture start with OMF_5.4 instead of OMF used for OMF5.3
+
+    """
+    def __init__(self, slice, host, port, password, xmpp_root = None):
+        """
+    
+        :param slice: Xmpp Slice
+        :type slice: Str
+        :param host: Xmpp Server
+        :type host: Str
+        :param port: Xmpp Port
+        :type port: Str
+        :param password: Xmpp password
+        :type password: Str
+        :param xmpp_root: Root of the Xmpp Topic Architecture
+        :type xmpp_root: Str
+
+        """
+        date = datetime.datetime.now().strftime("%Y-%m-%dt%H.%M.%S")
+        tz = -time.altzone if time.daylight != 0 else -time.timezone
+        date += "%+06.2f" % (tz / 3600) # timezone difference is in seconds
+        self._user = "%s-%s" % (slice, date)
+        self._slice = slice
+        self._host = host
+        self._port = port
+        self._password = password
+        self._hostnames = []
+        self._xmpp_root = xmpp_root or "OMF_5.4"
+
+        self._logger = logging.getLogger("nepi.omf.omfApi    ")
+        self._logger.setLevel(nepi.LOGLEVEL)
+
+        # OMF xmpp client
+        self._client = None
+        # message handler
+        self._message = None
+
+        if sys.version_info < (3, 0):
+            reload(sys)
+            sys.setdefaultencoding('utf8')
+
+        # instantiate the xmpp client
+        self._init_client()
+
+        # register xmpp nodes for the experiment
+        self._enroll_experiment()
+        self._enroll_newexperiment()
+
+        # register xmpp logger for the experiment
+        self._enroll_logger()
+
+    def _init_client(self):
+        """ Initialize XMPP Client
+
+        """
+        jid = "%s@%s" % (self._user, self._host)
+        xmpp = OMFClient(jid, self._password)
+        # PROTOCOL_SSLv3 required for compatibility with OpenFire
+        xmpp.ssl_version = ssl.PROTOCOL_SSLv3
+
+        if xmpp.connect((self._host, self._port)):
+            xmpp.process(block=False)
+            while not xmpp.ready:
+                time.sleep(1)
+            self._client = xmpp
+            self._message = MessageHandler(self._slice, self._user)
+        else:
+            msg = "Unable to connect to the XMPP server."
+            self._logger.error(msg)
+            raise RuntimeError(msg)
+
+    def _enroll_experiment(self):
+        """ Create and Subscribe to the Session Topic
+
+        """
+        xmpp_node = self._exp_session_id
+        self._client.create(xmpp_node)
+        #print "Create experiment sesion id topics !!" 
+        self._client.subscribe(xmpp_node)
+        #print "Subscribe to experiment sesion id topics !!" 
+
+
+    def _enroll_newexperiment(self):
+        """ Publish New Experiment Message
+
+        """
+        address = "/%s/%s/%s/%s" % (self._host, self._xmpp_root, self._slice, self._user)
+        #print address
+        payload = self._message.newexp_function(self._user, address)
+        slice_sid = "/%s/%s" % (self._xmpp_root, self._slice)
+        self._client.publish(payload, slice_sid)
+
+    def _enroll_logger(self):
+        """ Create and Subscribe to the Logger Topic
+
+        """
+        xmpp_node = self._logger_session_id
+        self._client.create(xmpp_node)
+        self._client.subscribe(xmpp_node)
+
+        payload = self._message.log_function("2", 
+                "nodeHandler::NodeHandler", 
+                "INFO", 
+                "OMF Experiment Controller 5.4 (git 529a626)")
+        self._client.publish(payload, xmpp_node)
+
+    def _host_session_id(self, hostname):
+        """ Return the Topic Name as /xmpp_root/slice/user/hostname
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+
+        """
+        return "/%s/%s/%s/%s" % (self._xmpp_root, self._slice, self._user, hostname)
+
+    def _host_resource_id(self, hostname):
+        """ Return the Topic Name as /xmpp_root/slice/resources/hostname
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+
+        """
+        return "/%s/%s/resources/%s" % (self._xmpp_root, self._slice, hostname)
+
+    @property
+    def _exp_session_id(self):
+        """ Return the Topic Name as /xmpp_root/slice/user
+
+        """
+        return "/%s/%s/%s" % (self._xmpp_root, self._slice, self._user)
+
+    @property
+    def _logger_session_id(self):
+        """ Return the Topic Name as /xmpp_root/slice/LOGGER
+
+        """
+        return "/%s/%s/%s/LOGGER" % (self._xmpp_root, self._slice, self._user)
+
+    def delete(self, hostname):
+        """ Delete the topic corresponding to the hostname for this session
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+
+        """
+        if not hostname in self._hostnames:
+            return
+
+        self._hostnames.remove(hostname)
+
+        xmpp_node = self._host_session_id(hostname)
+        self._client.delete(xmpp_node)
+
+    def enroll_host(self, hostname):
+        """ Create and Subscribe to the session topic and the resources corresponding to the hostname
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+
+        """
+        if hostname in self._hostnames:
+            return 
+
+        self._hostnames.append(hostname)
+
+        xmpp_node =  self._host_session_id(hostname)
+        self._client.create(xmpp_node)
+        self._client.subscribe(xmpp_node)
+
+        xmpp_node =  self._host_resource_id(hostname)
+        self._client.subscribe(xmpp_node)
+
+        payload = self._message.enroll_function("1", "*", "1", hostname)
+        self._client.publish(payload, xmpp_node)
+
+    def configure(self, hostname, attribute, value):
+        """ Configure attribute on the node
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+        :param attribute: Attribute that need to be configured (often written as /net/wX/attribute, with X the interface number)
+        :type attribute: str
+        :param value: Value of the attribute
+        :type value: str
+
+        """
+        payload = self._message.configure_function(hostname, value, attribute)
+        xmpp_node =  self._host_session_id(hostname)
+        self._client.publish(payload, xmpp_node)
+
+    def execute(self, hostname, app_id, arguments, path, env):
+        """ Execute command on the node
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+        :param app_id: Application Id (Any id that represents in a unique way the application)
+        :type app_id: str
+        :param arguments: Arguments of the application
+        :type arguments: str
+        :param path: Path of the application
+        :type path: str
+        :param env: Environnement values for the application
+        :type env: str
+
+        """
+        payload = self._message.execute_function(hostname, app_id, arguments, path, env)
+        xmpp_node =  self._host_session_id(hostname)
+        self._client.publish(payload, xmpp_node)
+
+    def exit(self, hostname, app_id):
+        """ Kill an application started with OMF
+
+        :param hostname: Full hrn of the node
+        :type hostname: str
+        :param app_id: Application Id of the application you want to stop
+        :type app_id: str
+
+        """
+        payload = self._message.exit_function(hostname, app_id)
+        xmpp_node =  self._host_session_id(hostname)
+        self._client.publish(payload, xmpp_node)
+
+    def release(self, hostname):
+        """ Delete the session and logger topics. Then disconnect 
+
+        """
+        if hostname in self._hostnames:
+            self.delete(hostname)
+
+    def disconnect(self) :
+        """ Delete the session and logger topics. Then disconnect 
+
+        """
+        self._client.delete(self._exp_session_id)
+        self._client.delete(self._logger_session_id)
+
+        time.sleep(1)
+        
+        # Wait the send queue to be empty before disconnect
+        self._client.disconnect(wait=True)
+        self._logger.debug(" Disconnected from XMPP Server")
+
+
+class OMFAPIFactory(object):
+    """ 
+    .. note::
+
+        It allows the different RM to use the same xmpp client if they use the same credentials. 
+        For the moment, it is focused on Xmpp.
+
+    """
+    # use lock to avoid concurrent access to the Api list at the same times by 2 different threads
+    lock = threading.Lock()
+    _apis = dict()
+
+    @classmethod 
+    def get_api(cls, slice, host, port, password):
+        """ Get an Api
+
+        :param slice: Xmpp Slice Name
+        :type slice: str
+        :param host: Xmpp Server Adress
+        :type host: str
+        :param port: Xmpp Port (Default : 5222)
+        :type port: str
+        :param password: Xmpp Password
+        :type password: str
+
+        """
+        if slice and host and port and password:
+            key = cls._make_key(slice, host, port, password)
+            cls.lock.acquire()
+            if key in cls._apis:
+                cls._apis[key]['cnt'] += 1
+                cls.lock.release()
+                return cls._apis[key]['api']
+            else :
+                omf_api = cls.create_api(slice, host, port, password)
+                cls.lock.release()
+                return omf_api
+        return None
+
+    @classmethod 
+    def create_api(cls, slice, host, port, password):
+        """ Create an API if this one doesn't exist yet with this credentials
+
+        :param slice: Xmpp Slice Name
+        :type slice: str
+        :param host: Xmpp Server Adress
+        :type host: str
+        :param port: Xmpp Port (Default : 5222)
+        :type port: str
+        :param password: Xmpp Password
+        :type password: str
+
+        """
+        omf_api = OMFAPI(slice, host, port, password)
+        key = cls._make_key(slice, host, port, password)
+        cls._apis[key] = {}
+        cls._apis[key]['api'] = omf_api
+        cls._apis[key]['cnt'] = 1
+        return omf_api
+
+    @classmethod 
+    def release_api(cls, slice, host, port, password):
+        """ Release an API with this credentials
+
+        :param slice: Xmpp Slice Name
+        :type slice: str
+        :param host: Xmpp Server Adress
+        :type host: str
+        :param port: Xmpp Port (Default : 5222)
+        :type port: str
+        :param password: Xmpp Password
+        :type password: str
+
+        """
+        if slice and host and port and password:
+            key = cls._make_key(slice, host, port, password)
+            if key in cls._apis:
+                cls._apis[key]['cnt'] -= 1
+                #print "Api Counter : " + str(cls._apis[key]['cnt'])
+                if cls._apis[key]['cnt'] == 0:
+                    omf_api = cls._apis[key]['api']
+                    omf_api.disconnect()
+
+
+    @classmethod 
+    def _make_key(cls, *args):
+        """ Hash the credentials in order to create a key
+
+        :param args: list of arguments used to create the hash (user, host, port, ...)
+        :type args: list of args
+
+        """
+        skey = "".join(map(str, args))
+        return hashlib.md5(skey).hexdigest()
+
+
+
diff --git a/src/nepi/resources/omf/omf_application.py b/src/nepi/resources/omf/omf_application.py
new file mode 100644 (file)
index 0000000..5196ecd
--- /dev/null
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+
+from nepi.execution.resource import ResourceManager, clsinit
+from nepi.execution.attribute import Attribute, Flags 
+from nepi.resources.omf.omf_api import OMFAPIFactory
+
+import nepi
+import logging
+
+@clsinit
+class OMFApplication(ResourceManager):
+    """
+    .. class:: Class Args :
+      
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient)
+        :type creds: dict
+
+    .. note::
+
+       This class is used only by the Experiment Controller through the Resource Factory
+
+    """
+    _rtype = "OMFApplication"
+    _authorized_connections = ["OMFNode"]
+    _waiters = ["OMFNode", "OMFChannel", "OMFWifiInterface"]
+
+    @classmethod
+    def _register_attributes(cls):
+        """Register the attributes of an OMF application
+        """
+
+        appid = Attribute("appid", "Name of the application")
+        path = Attribute("path", "Path of the application")
+        args = Attribute("args", "Argument of the application")
+        env = Attribute("env", "Environnement variable of the application")
+        xmppSlice = Attribute("xmppSlice","Name of the slice", flags = Flags.Credential)
+        xmppHost = Attribute("xmppHost", "Xmpp Server",flags = Flags.Credential)
+        xmppPort = Attribute("xmppPort", "Xmpp Port",flags = Flags.Credential)
+        xmppPassword = Attribute("xmppPassword", "Xmpp Port",flags = Flags.Credential)
+        cls._register_attribute(appid)
+        cls._register_attribute(path)
+        cls._register_attribute(args)
+        cls._register_attribute(env)
+        cls._register_attribute(xmppSlice)
+        cls._register_attribute(xmppHost)
+        cls._register_attribute(xmppPort)
+        cls._register_attribute(xmppPassword)
+
+
+    def __init__(self, ec, guid):
+        """
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+        """
+        
+        super(OMFApplication, self).__init__(ec, guid)
+
+        self.set('appid', "")
+        self.set('path', "")
+        self.set('args', "")
+        self.set('env', "")
+
+        self._node = None
+
+        self._omf_api = None
+
+        self._logger = logging.getLogger("nepi.omf.omfApp    ")
+        self._logger.setLevel(nepi.LOGLEVEL)
+
+
+    def _validate_connection(self, guid):
+        """Check if the connection is available.
+
+        :param guid: Guid of the current RM
+        :type guid: int
+        :rtype:  Boolean
+
+        """
+        rm = self.ec.get_resource(guid)
+        if rm.rtype() not in self._authorized_connections:
+            self._logger.debug("Connection between %s %s and %s %s refused : An Application can be connected only to a Node" % (self.rtype(), self._guid, rm.rtype(), guid))
+            return False
+        elif len(self.connections) != 0 :
+            self._logger.debug("Connection between %s %s and %s %s refused : Already Connected" % (self.rtype(), self._guid, rm.rtype(), guid))
+            return False
+        else :
+            self._logger.debug("Connection between %s %s and %s %s accepted" % (self.rtype(), self._guid, rm.rtype(), guid))
+            return True
+
+    def _get_nodes(self, conn_set):
+        """Get the RM of the node to which the application is connected
+
+        :param conn_set: Connections of the current Guid
+        :type conn_set: set
+        :rtype: ResourceManager
+        """
+
+        for elt in conn_set:
+            rm = self.ec.get_resource(elt)
+            if rm.rtype() == "OMFNode":
+                return rm
+        return None
+
+    def deploy_action(self):
+        """Deploy the RM
+
+        """
+        self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+        super(OMFApplication, self).deploy_action()
+
+    def start(self):
+        """Send Xmpp Message Using OMF protocol to execute the application
+
+        """
+        super(OMFApplication, self).start()
+        self._logger.debug(" " + self.rtype() + " ( Guid : " + str(self._guid) +") : " + self.get('appid') + " : " + self.get('path') + " : " + self.get('args') + " : " + self.get('env'))
+
+        if self.get('appid') and self.get('path') and self.get('args') and self.get('env'):
+            rm_node = self._get_nodes(self._connections)
+            self._omf_api.execute(rm_node.get('hostname'),self.get('appid'), self.get('args'), self.get('path'), self.get('env'))
+
+    def stop(self):
+        """Send Xmpp Message Using OMF protocol to kill the application
+
+        """
+
+        rm_node = self._get_nodes(self._connections)
+        self._omf_api.exit(rm_node.get('hostname'),self.get('appid'))
+        super(OMFApplication, self).stop()
+
+    def release(self):
+        """Clean the RM at the end of the experiment
+
+        """
+        OMFAPIFactory.release_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
diff --git a/src/nepi/resources/omf/omf_channel.py b/src/nepi/resources/omf/omf_channel.py
new file mode 100644 (file)
index 0000000..8019896
--- /dev/null
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceManager, clsinit
+from nepi.execution.attribute import Attribute, Flags 
+
+from nepi.resources.omf.omf_api import OMFAPIFactory
+
+import nepi
+import logging
+
+@clsinit
+class OMFChannel(ResourceManager):
+    """
+    .. class:: Class Args :
+      
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+    .. note::
+
+       This class is used only by the Experiment Controller through the Resource Factory
+
+    """
+    _rtype = "OMFChannel"
+    _authorized_connections = ["OMFWifiInterface", "OMFNode"]
+    _waiters = ["OMFNode", "OMFWifiInterface"]
+
+
+    @classmethod
+    def _register_attributes(cls):
+        """Register the attributes of an OMF channel
+        """
+        channel = Attribute("channel", "Name of the application")
+        xmppSlice = Attribute("xmppSlice","Name of the slice", flags = Flags.Credential)
+        xmppHost = Attribute("xmppHost", "Xmpp Server",flags = Flags.Credential)
+        xmppPort = Attribute("xmppPort", "Xmpp Port",flags = Flags.Credential)
+        xmppPassword = Attribute("xmppPassword", "Xmpp Port",flags = Flags.Credential)
+        cls._register_attribute(channel)
+        cls._register_attribute(xmppSlice)
+        cls._register_attribute(xmppHost)
+        cls._register_attribute(xmppPort)
+        cls._register_attribute(xmppPassword)
+
+    def __init__(self, ec, guid):
+        """
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+        """
+        super(OMFChannel, self).__init__(ec, guid)
+
+        self._nodes_guid = list()
+
+        self._omf_api = None
+
+        self._logger = logging.getLogger("nepi.omf.omfChannel")
+        self._logger.setLevel(nepi.LOGLEVEL)
+
+    def _validate_connection(self, guid):
+        """Check if the connection is available.
+
+        :param guid: Guid of the current RM
+        :type guid: int
+        :rtype:  Boolean
+
+        """
+        rm = self.ec.get_resource(guid)
+        if rm.rtype() in self._authorized_connections:
+            self._logger.debug("Connection between %s %s and %s %s accepted" %
+                (self.rtype(), self._guid, rm.rtype(), guid))
+            return True
+        self._logger.debug("Connection between %s %s and %s %s refused" % (self.rtype(), self._guid, rm.rtype(), guid))
+        return False
+
+    def _get_target(self, conn_set):
+        """
+        Get the couples (host, interface) that used this channel
+
+        :param conn_set: Connections of the current Guid
+        :type conn_set: set
+        :rtype: list
+        :return: self._nodes_guid
+
+        """
+        for elt in conn_set:
+            rm_iface = self.ec.get_resource(elt)
+            for conn in rm_iface.connections:
+                rm_node = self.ec.get_resource(conn)
+                if rm_node.rtype() == "OMFNode":
+                    couple = [rm_node.get('hostname'), rm_iface.get('alias')]
+                    #print couple
+                    self._nodes_guid.append(couple)
+        return self._nodes_guid
+
+    def deploy_action(self):
+        """Deploy the RM
+
+        """
+        self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
+        if self.get('channel'):
+            set_nodes = self._get_target(self._connections) 
+            print set_nodes
+            for couple in set_nodes:
+                #print "Couple node/alias : " + couple[0] + "  ,  " + couple[1]
+                attrval = self.get('channel')
+                attrname = "net/%s/%s" % (couple[1], 'channel')
+                #print "Send the configure message"
+                self._omf_api.configure(couple[0], attrname, attrval)
+
+        super(OMFChannel, self).deploy_action()
+
+    def discover(self):
+        """ Discover the availables channels
+
+        """
+        pass
+     
+    def provision(self):
+        """ Provision some availables channels
+
+        """
+        pass
+
+    def start(self):
+        """Send Xmpp Message Using OMF protocol to configure Channel
+
+        """
+
+        super(OMFChannel, self).start()
+
+    def stop(self):
+        """Send Xmpp Message Using OMF protocol to put down the interface
+
+        """
+        super(OMFChannel, self).stop()
+
+    def release(self):
+        """Clean the RM at the end of the experiment
+
+        """
+        OMFAPIFactory.release_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
diff --git a/src/nepi/resources/omf/omf_client.py b/src/nepi/resources/omf/omf_client.py
new file mode 100644 (file)
index 0000000..e5058f3
--- /dev/null
@@ -0,0 +1,320 @@
+import logging
+import sleekxmpp
+from sleekxmpp.exceptions import IqError, IqTimeout
+import traceback
+import xml.etree.ElementTree as ET
+
+import nepi
+
+# inherit from BaseXmpp and XMLStream classes
+class OMFClient(sleekxmpp.ClientXMPP): 
+    """
+    .. class:: Class Args :
+      
+        :param jid: Jabber Id (= Xmpp Slice + Date)
+        :type jid: Str
+        :param password: Jabber Password (= Xmpp Password)
+        :type password: Str
+
+    .. note::
+
+       This class is an XMPP Client with customized method
+
+    """
+
+    def __init__(self, jid, password):
+        """
+
+        :param jid: Jabber Id (= Xmpp Slice + Date)
+        :type jid: Str
+        :param password: Jabber Password (= Xmpp Password)
+        :type password: Str
+
+
+        """
+        sleekxmpp.ClientXMPP.__init__(self, jid, password)
+        self._ready = False
+        self._registered = False
+        self._server = None
+
+        self.register_plugin('xep_0077') # In-band registration
+        self.register_plugin('xep_0030')
+        self.register_plugin('xep_0059')
+        self.register_plugin('xep_0060') # PubSub 
+
+        self.add_event_handler("session_start", self.start)
+        self.add_event_handler("register", self.register)
+        self.add_event_handler("pubsub_publish", self.handle_omf_message)
+        
+        self._logger = logging.getLogger("nepi.omf.xmppClient")
+        self._logger.setLevel(nepi.LOGLEVEL)
+
+    @property
+    def ready(self):
+        """ Check if the client is ready
+
+        """
+        return self._ready
+
+    def start(self, event):
+        """ Send presence to the Xmppp Server. This function is called directly by the sleekXmpp library
+
+        """
+        self.send_presence()
+        self._ready = True
+        self._server = "pubsub.%s" % self.boundjid.domain
+
+    def register(self, iq):
+        """  Register to the Xmppp Server. This function is called directly by the sleekXmpp library
+
+        """
+        if self._registered:
+            self._logger.info(" %s already registered!" % self.boundjid)
+            return 
+
+        resp = self.Iq()
+        resp['type'] = 'set'
+        resp['register']['username'] = self.boundjid.user
+        resp['register']['password'] = self.password
+
+        try:
+            resp.send(now=True)
+            self._logger.info(" Account created for %s!" % self.boundjid)
+            self._registered = True
+        except IqError as e:
+            self._logger.error(" Could not register account: %s" %
+                    e.iq['error']['text'])
+        except IqTimeout:
+            self._logger.error(" No response from server.")
+
+    def unregister(self):
+        """  Unregister from the Xmppp Server.
+
+        """
+        try:
+            self.plugin['xep_0077'].cancel_registration(
+                ifrom=self.boundjid.full)
+            self._logger.info(" Account unregistered for %s!" % self.boundjid)
+        except IqError as e:
+            self._logger.error(" Could not unregister account: %s" %
+                    e.iq['error']['text'])
+        except IqTimeout:
+            self._logger.error(" No response from server.")
+
+    def nodes(self):
+        """  Get all the nodes of the Xmppp Server.
+
+        """
+        try:
+            result = self['xep_0060'].get_nodes(self._server)
+            for item in result['disco_items']['items']:
+                self._logger.info(' - %s' % str(item))
+            return result
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not retrieve node list.\ntraceback:\n%s', error)
+
+    def subscriptions(self):
+        """  Get all the subscriptions of the Xmppp Server.
+
+        """
+        try:
+            result = self['xep_0060'].get_subscriptions(self._server)
+                #self.boundjid.full)
+            for node in result['node']:
+                self._logger.info(' - %s' % str(node))
+            return result
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not retrieve subscriptions.\ntraceback:\n%s', error)
+
+    def create(self, node):
+        """  Create the topic corresponding to the node
+
+        :param node: Name of the topic, corresponding to the node (ex : omf.plexus.wlab17)
+        :type node: str
+
+        """
+        self._logger.debug(" Create Topic : " + node)
+   
+        config = self['xep_0004'].makeForm('submit')
+        config.add_field(var='pubsub#node_type', value='leaf')
+        config.add_field(var='pubsub#notify_retract', value='0')
+        config.add_field(var='pubsub#publish_model', value='open')
+        config.add_field(var='pubsub#persist_items', value='1')
+        config.add_field(var='pubsub#max_items', value='1')
+        config.add_field(var='pubsub#title', value=node)
+
+        try:
+            self['xep_0060'].create_node(self._server, node, config = config)
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not create topic: %s\ntraceback:\n%s' % (node, error))
+
+    def delete(self, node):
+        """  Delete the topic corresponding to the node
+
+        :param node: Name of the topic, corresponding to the node (ex : omf.plexus.wlab17)
+        :type node: str
+
+        """
+        # To check if the queue are well empty at the end
+        #print " length of the queue : " + str(self.send_queue.qsize())
+        #print " length of the queue : " + str(self.event_queue.qsize())
+        try:
+            self['xep_0060'].delete_node(self._server, node)
+            self._logger.info(' Deleted node: %s' % node)
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not delete topic: %s\ntraceback:\n%s' % (node, error))
+    
+    def publish(self, data, node):
+        """  Publish the data to the corresponding topic
+
+        :param data: Data that will be published
+        :type data: str
+        :param node: Name of the topic
+        :type node: str
+
+        """ 
+
+        self._logger.debug(" Publish to Topic : " + node)
+        try:
+            result = self['xep_0060'].publish(self._server,node,payload=data)
+            # id = result['pubsub']['publish']['item']['id']
+            # print('Published at item id: %s' % id)
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not publish to: %s\ntraceback:\n%s' \
+                    % (node, error))
+
+    def get(self, data):
+        """  Get the item
+
+        :param data: data from which the items will be get back
+        :type data: str
+
+
+        """
+        try:
+            result = self['xep_0060'].get_item(self._server, self.boundjid,
+                data)
+            for item in result['pubsub']['items']['substanzas']:
+                self._logger.info('Retrieved item %s: %s' % (item['id'], 
+                    tostring(item['payload'])))
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not retrieve item %s from topic %s\ntraceback:\n%s' \
+                    % (data, self.boundjid, error))
+
+    def retract(self, data):
+        """  Retract the item
+
+        :param data: data from which the item will be retracted
+        :type data: str
+
+        """
+        try:
+            result = self['xep_0060'].retract(self._server, self.boundjid, data)
+            self._logger.info(' Retracted item %s from topic %s' % (data, self.boundjid))
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not retract item %s from topic %s\ntraceback:\n%s' \
+                    % (data, self.boundjid, error))
+
+    def purge(self):
+        """  Purge the information in the server
+
+        """
+        try:
+            result = self['xep_0060'].purge(self._server, self.boundjid)
+            self._logger.info(' Purged all items from topic %s' % self.boundjid)
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not purge items from topic %s\ntraceback:\n%s' \
+                    % (self.boundjid, error))
+
+    def subscribe(self, node):
+        """ Subscribe to a topic
+
+        :param node: Name of the topic
+        :type node: str
+
+        """
+        try:
+            result = self['xep_0060'].subscribe(self._server, node)
+            #self._logger.debug('Subscribed %s to node %s' \
+                    #% (self.boundjid.bare, node))
+            self._logger.info(' Subscribed %s to topic %s' \
+                    % (self.boundjid.user, node))
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not subscribe %s to topic %s\ntraceback:\n%s' \
+                    % (self.boundjid.bare, node, error))
+
+    def unsubscribe(self, node):
+        """ Unsubscribe to a topic
+
+        :param node: Name of the topic
+        :type node: str
+
+        """
+        try:
+            result = self['xep_0060'].unsubscribe(self._server, node)
+            self._logger.info(' Unsubscribed %s from topic %s' % (self.boundjid.bare, node))
+        except:
+            error = traceback.format_exc()
+            self._logger.error(' Could not unsubscribe %s from topic %s\ntraceback:\n%s' \
+                    % (self.boundjid.bare, node, error))
+
+    def _check_for_tag(self, root, namespaces, tag):
+        """  Check if an element markup is in the ElementTree
+
+        :param root: Root of the tree
+        :type root: ElementTree Element
+        :param namespaces: Namespaces of the element
+        :type namespaces: str
+        :param tag: Tag that will search in the tree
+        :type tag: str
+
+        """
+        for element in root.iter(namespaces+tag):
+            if element.text:
+                return element
+            else : 
+                return None    
+
+    def _check_output(self, root, namespaces):
+        """ Check the significative element in the answer and display it
+
+        :param root: Root of the tree
+        :type root: ElementTree Element
+        :param namespaces: Namespaces of the tree
+        :type namespaces: str
+
+        """
+        fields = ["TARGET", "REASON", "PATH", "APPID", "VALUE"]
+        response = ""
+        for elt in fields:
+            msg = self._check_for_tag(root, namespaces, elt)
+            if msg is not None:
+                response = response + " " + msg.text + " :"
+        deb = self._check_for_tag(root, namespaces, "MESSAGE")
+        if deb is not None:
+            self._logger.debug(response + " " + deb.text)
+        else :
+            self._logger.info(response)
+
+    def handle_omf_message(self, iq):
+        """ Handle published/received message 
+
+        :param iq: Stanzas that is currently published/received
+        :type iq: Iq Stanza
+
+        """
+        namespaces = "{http://jabber.org/protocol/pubsub}"
+        for i in iq['pubsub_event']['items']:
+            root = ET.fromstring(str(i))
+            self._check_output(root, namespaces)
+
+
diff --git a/src/nepi/resources/omf/omf_interface.py b/src/nepi/resources/omf/omf_interface.py
new file mode 100644 (file)
index 0000000..36a479f
--- /dev/null
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceManager, clsinit
+from nepi.execution.attribute import Attribute, Flags 
+
+from nepi.resources.omf.omf_api import OMFAPIFactory
+
+import nepi
+import logging
+
+@clsinit
+class OMFWifiInterface(ResourceManager):
+    """
+    .. class:: Class Args :
+      
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+    .. note::
+
+       This class is used only by the Experiment Controller through the Resource Factory
+
+    """
+    _rtype = "OMFWifiInterface"
+    _authorized_connections = ["OMFNode" , "OMFChannel"]
+    _waiters = ["OMFNode"]
+
+    #alias2name = dict({'w0':'wlan0', 'w1':'wlan1'})
+
+    @classmethod
+    def _register_attributes(cls):
+        """Register the attributes of an OMF interface 
+
+        """
+        alias = Attribute("alias","Alias of the interface", default = "w0")
+        mode = Attribute("mode","Mode of the interface")
+        type = Attribute("type","Type of the interface")
+        essid = Attribute("essid","Essid of the interface")
+        ip = Attribute("ip","IP of the interface")
+        xmppSlice = Attribute("xmppSlice","Name of the slice", flags = Flags.Credential)
+        xmppHost = Attribute("xmppHost", "Xmpp Server",flags = Flags.Credential)
+        xmppPort = Attribute("xmppPort", "Xmpp Port",flags = Flags.Credential)
+        xmppPassword = Attribute("xmppPassword", "Xmpp Port",flags = Flags.Credential)
+        cls._register_attribute(alias)
+        cls._register_attribute(xmppSlice)
+        cls._register_attribute(xmppHost)
+        cls._register_attribute(xmppPort)
+        cls._register_attribute(xmppPassword)
+        cls._register_attribute(mode)
+        cls._register_attribute(type)
+        cls._register_attribute(essid)
+        cls._register_attribute(ip)
+
+    def __init__(self, ec, guid):
+        """
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+        """
+        super(OMFWifiInterface, self).__init__(ec, guid)
+
+        self._omf_api = None
+        self._alias = self.get('alias')
+
+        self._logger = logging.getLogger("nepi.omf.omfIface  ")
+        self._logger.setLevel(nepi.LOGLEVEL)
+
+    def _validate_connection(self, guid):
+        """ Check if the connection is available.
+
+        :param guid: Guid of the current RM
+        :type guid: int
+        :rtype:  Boolean
+
+        """
+        rm = self.ec.get_resource(guid)
+        if rm.rtype() in self._authorized_connections:
+            self._logger.debug("Connection between %s %s and %s %s accepted" %
+                (self.rtype(), self._guid, rm.rtype(), guid))
+            return True
+        self._logger.debug("Connection between %s %s and %s %s refused" % 
+            (self.rtype(), self._guid, rm.rtype(), guid))
+        return False
+
+    def _get_nodes(self, conn_set):
+        """ Get the RM of the node to which the application is connected
+
+        :param conn_set: Connections of the current Guid
+        :type conn_set: set
+        :rtype: ResourceManager
+
+        """
+        for elt in conn_set:
+            rm = self.ec.get_resource(elt)
+            if rm.rtype() == "OMFNode":
+                return rm
+        return None
+
+    def deploy_action(self):
+        """Deploy the RM
+
+        """
+        self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
+        self._logger.debug(" " + self.rtype() + " ( Guid : " + str(self._guid) +") : " +
+            self.get('mode') + " : " + self.get('type') + " : " +
+            self.get('essid') + " : " + self.get('ip'))
+        #try:
+        if self.get('mode') and self.get('type') and self.get('essid') and self.get('ip'):
+            rm_node = self._get_nodes(self._connections)    
+            for attrname in ["mode", "type", "essid", "ip"]:
+                attrval = self.get(attrname)
+                attrname = "net/%s/%s" % (self._alias, attrname)
+                #print "Send the configure message"
+                self._omf_api.configure(rm_node.get('hostname'), attrname, attrval)
+
+        super(OMFWifiInterface, self).deploy_action()
+
+
+    def start(self):
+        """Send Xmpp Messages Using OMF protocol to configure Interface
+
+        """
+
+        super(OMFWifiInterface, self).start()
+
+    def stop(self):
+        """Send Xmpp Message Using OMF protocol to put down the interface
+
+        """
+        super(OMFWifiInterface, self).stop()
+
+    def release(self):
+        """Clean the RM at the end of the experiment
+
+        """
+        OMFAPIFactory.release_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
+
diff --git a/src/nepi/resources/omf/omf_messages_5_4.py b/src/nepi/resources/omf/omf_messages_5_4.py
new file mode 100644 (file)
index 0000000..c5857c2
--- /dev/null
@@ -0,0 +1,215 @@
+from xml.etree import cElementTree as ET
+
+class MessageHandler():
+    """
+    .. class:: Class Args :
+      
+        :param sliceid: Slice Name (= Xmpp Slice)
+        :type expid: Str
+        :param expid: Experiment ID (= Xmpp User)
+        :type expid: Str
+
+    .. note::
+
+       This class is used only for OMF 5.4 Protocol and is going to become unused
+
+    """
+
+    def __init__(self, sliceid, expid ):
+        """
+
+        :param sliceid: Slice Name (= Xmpp Slice)
+        :type expid: Str
+        :param expid: Experiment ID (= Xmpp User)
+        :type expid: Str
+
+        """
+        self._slice_id = sliceid
+        self._exp_id = expid
+
+
+    def _id_element(self, parent, markup):
+        """ Insert a markup element with an id
+
+        :param parent: Parent element in an XML point of view
+        :type parent: ElementTree Element
+        :param markup: Name of the markup
+        :type markup: str
+
+        """
+        elt = ET.SubElement(parent, markup)
+        elt.set("id", "\'omf-payload\'")
+        return elt
+
+    def _attr_element(self, parent, markup, text):
+        """ Insert a markup element with a text (value)
+
+        :param parent: Parent element in an XML point of view
+        :type parent: ElementTree Element
+        :param markup: Name of the markup
+        :type markup: str
+        :param text: Value of the markup element
+        :type text: str
+
+        """
+        elt = ET.SubElement(parent, markup)
+        elt.text = text
+        return elt
+
+    def execute_function(self, target, appid, cmdlineargs, path, env):
+        """ Build an Execute Message
+
+        :param target: Hrn of the target node (ex : omf.plexus.wlab17)
+        :type target: str
+        :param appid: Application id
+        :type appid: str
+        :param cmdlineargs: Arguments of the application
+        :type cmdlineargs: str
+        :param path: Path of the application
+        :type path: str
+        :param env: Environment variables
+        :type env: str
+
+        """
+        payload = ET.Element("omf-message")
+        execute = self._id_element(payload,"EXECUTE")
+        env = self._attr_element(execute, "ENV", env)
+        sliceid = self._attr_element(execute,"SLICEID",self._slice_id)
+        expid = self._attr_element(execute,"EXPID",self._exp_id)
+        target = self._attr_element(execute,"TARGET",target)
+        appid = self._attr_element(execute,"APPID",appid)
+        cmdlineargs = self._attr_element(execute,"CMDLINEARGS",cmdlineargs)
+        path = self._attr_element(execute,"PATH",path)
+        return payload
+
+    def exit_function(self, target, appid):
+        """ Build an Exit Message
+
+        :param target: Hrn of the target node (ex : omf.plexus.wlab17)
+        :type target: str
+        :param appid: Application id (ex : vlc#1)
+        :type appid: str
+
+        """
+        payload = ET.Element("omf-message")
+        execute = self._id_element(payload,"EXIT")
+        sliceid = self._attr_element(execute,"SLICEID",self._slice_id)
+        expid = self._attr_element(execute,"EXPID",self._exp_id)
+        target = self._attr_element(execute,"TARGET",target)
+        appid = self._attr_element(execute,"APPID",appid)
+        return payload
+
+    def configure_function(self, target, value, path):
+        """ Build a Configure Message
+
+        :param target: Hrn of the target node (ex : omf.plexus.wlab17)
+        :type target: str
+        :param value: guid of the RM
+        :type value: int
+        :param path: Path of the element to configure (ex : net/w0/channel)
+        :type path: dict
+
+        """
+        payload = ET.Element("omf-message")
+        config = self._id_element(payload, "CONFIGURE")
+        sliceid = self._attr_element(config,"SLICEID",self._slice_id)
+        expid = self._attr_element(config,"EXPID",self._exp_id)
+        target = self._attr_element(config,"TARGET",target)
+        value = self._attr_element(config,"VALUE",value)
+        path = self._attr_element(config,"PATH",path)
+        return payload
+
+    def log_function(self,level, logger, level_name, data):
+        """ Build a Log Message
+
+        :param level: Level of logging
+        :type level: str
+        :param logger: Element publishing the log
+        :type logger: str
+        :param level_name: Name of the level (ex : INFO)
+        :type level_name: str
+        :param data: Content to publish
+        :type data: str
+
+        """
+        payload = ET.Element("omf-message")
+        log = self._id_element(payload, "LOGGING")
+        level = self._attr_element(log,"LEVEL",level)
+        sliceid = self._attr_element(log,"SLICEID",self._slice_id)
+        logger = self._attr_element(log,"LOGGER",logger)
+        expid = self._attr_element(log,"EXPID",self._exp_id)
+        level_name = self._attr_element(log,"LEVEL_NAME",level_name)
+        data = self._attr_element(log,"DATA",data)
+        return payload
+
+    def alias_function(self, name, target):
+        """ Build a Alias Message
+
+        :param name: Name of the new alias
+        :type name: str
+        :param target: Hrn of the target node (ex : omf.plexus.wlab17)
+        :type target: str
+
+        """
+        payload = ET.Element("omf-message")
+        alias = self._id_element(payload,"ALIAS")
+        sliceid = self._attr_element(alias,"SLICEID",self._slice_id)
+        expid = self._attr_element(alias,"EXPID",self._exp_id)
+        name = self._attr_element(alias,"NAME",name)
+        target = self._attr_element(alias,"TARGET",target)
+        return payload
+
+    def enroll_function(self, enrollkey, image, index, target ):
+        """ Build an Enroll Message
+
+        :param enrollkey: Type of enrollment (= 1)
+        :type enrollkey: str
+        :param image: Image (= * when all the nodes are concerned)
+        :type image: str
+        :param index: Index (= 1 in general)
+        :type index: str
+        :param target: Hrn of the target node (ex : omf.plexus.wlab17)
+        :type target: str
+
+        """
+        payload = ET.Element("omf-message")
+        enroll = self._id_element(payload,"ENROLL")
+        enrollkey = self._attr_element(enroll,"ENROLLKEY",enrollkey)
+        sliceid = self._attr_element(enroll,"SLICEID",self._slice_id)
+        image = self._attr_element(enroll,"IMAGE",image)
+        expid = self._attr_element(enroll,"EXPID",self._exp_id)
+        index = self._attr_element(enroll,"INDEX",index)
+        target = self._attr_element(enroll,"TARGET",target)
+        return payload
+
+    def noop_function(self,target):
+        """ Build a Noop Message
+
+        :param target: Hrn of the target node (ex : omf.plexus.wlab17)
+        :type target: str
+
+        """
+        payload = ET.Element("omf-message")
+        noop = self._id_element(payload,"NOOP")
+        sliceid = self._attr_element(noop,"SLICEID",self._slice_id)
+        expid = self._attr_element(noop,"EXPID",self._exp_id)
+        target = self._attr_element(noop,"TARGET",target)
+        return payload
+
+    def newexp_function(self, experimentid, address):
+        """ Build a NewExp Message
+
+        :param experimentid: Id of the new experiment
+        :type experimentid: str
+        :param address: Adress of the destination set of nodes
+        :type address: str
+
+        """
+        payload = ET.Element("omf-message")
+        newexp = self._id_element(payload,"EXPERIMENT_NEW")
+        experimentid = self._attr_element(newexp,"EXPERIMENT_ID",experimentid)
+        sliceid = self._attr_element(newexp,"SLICEID",self._slice_id)
+        expid = self._attr_element(newexp,"EXPID",self._exp_id)
+        address = self._attr_element(newexp,"ADDRESS",address)
+        return payload
+
diff --git a/src/nepi/resources/omf/omf_node.py b/src/nepi/resources/omf/omf_node.py
new file mode 100644 (file)
index 0000000..1396cdb
--- /dev/null
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceManager, clsinit
+from nepi.execution.attribute import Attribute, Flags 
+
+from nepi.resources.omf.omf_api import OMFAPIFactory
+
+import nepi
+import logging
+import time
+
+@clsinit
+class OMFNode(ResourceManager):
+    """
+    .. class:: Class Args :
+      
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+    .. note::
+
+       This class is used only by the Experiment Controller through the Resource Factory
+
+    """
+    _rtype = "OMFNode"
+    _authorized_connections = ["OMFApplication" , "OMFWifiInterface"]
+    _waiters = []
+
+    @classmethod
+    def _register_attributes(cls):
+        """Register the attributes of an OMF Node
+
+        """
+        hostname = Attribute("hostname", "Hostname of the machine")
+        cpu = Attribute("cpu", "CPU of the node")
+        ram = Attribute("ram", "RAM of the node")
+        xmppSlice = Attribute("xmppSlice","Name of the slice", flags = Flags.Credential)
+        xmppHost = Attribute("xmppHost", "Xmpp Server",flags = Flags.Credential)
+        xmppPort = Attribute("xmppPort", "Xmpp Port",flags = Flags.Credential)
+        xmppPassword = Attribute("xmppPassword", "Xmpp Port",flags = Flags.Credential)
+        cls._register_attribute(hostname)
+        cls._register_attribute(ram)
+        cls._register_attribute(cpu)
+        cls._register_attribute(xmppSlice)
+        cls._register_attribute(xmppHost)
+        cls._register_attribute(xmppPort)
+        cls._register_attribute(xmppPassword)
+
+    @classmethod
+    def _register_filters(cls):
+        """Register the filters of an OMF Node
+
+        """
+        hostname = Attribute("hostname", "Hostname of the machine")
+        gateway = Attribute("gateway", "Gateway")
+        granularity = Attribute("granularity", "Granularity of the reservation time")
+        hardware_type = Attribute("hardware_type", "Hardware type of the machine")
+        cls._register_filter(hostname)
+        cls._register_filter(gateway)
+        cls._register_filter(granularity)
+        cls._register_filter(hardware_type)
+
+    # XXX: We don't necessary need to have the credentials at the 
+    # moment we create the RM
+    def __init__(self, ec, guid):
+        """
+        :param ec: The Experiment controller
+        :type ec: ExperimentController
+        :param guid: guid of the RM
+        :type guid: int
+        :param creds: Credentials to communicate with the rm (XmppClient for OMF)
+        :type creds: dict
+
+        """
+        super(OMFNode, self).__init__(ec, guid)
+
+        self._omf_api = None 
+
+        self._logger = logging.getLogger("nepi.omf.omfNode   ")
+
+        # XXX: TO DISCUSS
+        self._logger.setLevel(nepi.LOGLEVEL)
+
+    def _validate_connection(self, guid):
+        """Check if the connection is available.
+
+        :param guid: Guid of the current RM
+        :type guid: int
+        :rtype:  Boolean
+
+        """
+        rm = self.ec.get_resource(guid)
+        if rm.rtype() in self._authorized_connections:
+            self._logger.debug("Connection between %s %s and %s %s accepted" %
+                (self.rtype(), self._guid, rm.rtype(), guid))
+            return True
+        self._logger.debug("Connection between %s %s and %s %s refused" %
+            (self.rtype(), self._guid, rm.rtype(), guid))
+        return False
+
+    def deploy_action(self):
+        """Deploy the RM
+
+        """ 
+        self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+        self._omf_api.enroll_host(self.get('hostname'))
+
+        super(OMFNode, self).deploy_action()
+
+    def discover(self):
+        """ Discover the availables nodes
+
+        """
+        pass
+     
+    def provision(self):
+        """ Provision some availables nodes
+
+        """
+        pass
+
+    def start(self):
+        """Send Xmpp Message Using OMF protocol to enroll the node into the experiment
+
+        """
+        super(OMFNode, self).start()
+
+
+    def stop(self):
+        """Send Xmpp Message Using OMF protocol to disconnect the node
+
+        """
+        super(OMFNode, self).stop()
+
+    def release(self):
+        """Clean the RM at the end of the experiment
+
+        """
+        self._omf_api.release(self.get('hostname'))
+        OMFAPIFactory.release_api(self.get('xmppSlice'), 
+            self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
+
+    def configure(self):
+        #routes = self.tc._add_route.get(self.guid, [])
+        #iface_guids = self.tc.get_connected(self.guid, "devs", "node")
+       
+        for route in routes:
+            (destination, netprefix, nexthop, metric, device) = route
+            netmask = ipaddr2.ipv4_mask2dot(netprefix)
+
+            # Validate that the interface is associated to the node
+            for iface_guid in iface_guids:
+                iface = self.tc.elements.get(iface_guid)
+                if iface.devname == device:
+                    self._omf_api.execute(self.get('hostname'), 
+                        "Id#%s" % str(random.getrandbits(128)), 
+                        "add -net %s netmask %s dev %s" % (destination, netmask, iface.devname), 
+                        "/sbin/route", # path
+                        None, # env
+                     )
+                    break
diff --git a/src/nepi/resources/omf/xx_omf_resource.py b/src/nepi/resources/omf/xx_omf_resource.py
new file mode 100644 (file)
index 0000000..0ca5962
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceManager, clsinit
+from nepi.execution.attribute import Attribute
+
+from nepi.resources.omf.omf_api import OMFAPIFactory
+
+@clsinit
+class OMFResource(ResourceManager):
+    _rtype = "OMFResource"
+
+    @classmethod
+    def _register_attributes(cls):
+        xmppSlice = Attribute("xmppSlice","Name of the slice", flags = "0x02")
+        xmppHost = Attribute("xmppHost", "Xmpp Server",flags = "0x02")
+        xmppPort = Attribute("xmppPort", "Xmpp Port",flags = "0x02")
+        xmppPassword = Attribute("xmppPassword", "Xmpp Port",flags = "0x02")
+        cls._register_attribute(xmppSlice)
+        cls._register_attribute(xmppHost)
+        cls._register_attribute(xmppPort)
+        cls._register_attribute(xmppPassword)
+
+    def __init__(self, ec, guid, creds):
+        super(OMFNode, self).__init__(ec, guid)
+        self.set('xmppSlice', creds['xmppSlice'])
+        self.set('xmppHost', creds['xmppHost'])
+        self.set('xmppPort', creds['xmppPort'])
+        self.set('xmppPassword', creds['xmppPassword'])
+
+        self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'), self.get('xmppHost'), self.get('xmppPort'), self.get('xmppPassword'))
+
+    def discover(self):
+        pass
+     
+    def provision(self, credential):
+        pass
+
+
diff --git a/src/nepi/resources/planetlab/__init__.py b/src/nepi/resources/planetlab/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/nepi/testbeds/netns/__init__.py b/src/nepi/testbeds/netns/__init__.py
deleted file mode 100644 (file)
index d3ef5dd..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_ID
-from execute import TestbedController 
-
diff --git a/src/nepi/testbeds/netns/constants.py b/src/nepi/testbeds/netns/constants.py
deleted file mode 100644 (file)
index a173c62..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-TESTBED_ID = "netns"
-TESTBED_VERSION = "0.1"
-
diff --git a/src/nepi/testbeds/netns/execute.py b/src/nepi/testbeds/netns/execute.py
deleted file mode 100644 (file)
index 577c39e..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import testbed_impl
-from nepi.util.constants import TIME_NOW
-import os
-import fcntl
-import threading
-
-class TestbedController(testbed_impl.TestbedController):
-    from nepi.util.tunchannel_impl import TunChannel
-    
-    LOCAL_FACTORIES = {
-        'TunChannel' : TunChannel,
-    }
-    
-    LOCAL_TYPES = tuple(LOCAL_FACTORIES.values())
-
-    class HostLock(object):
-        # This class is used as a lock to prevent concurrency issues with more
-        # than one instance of netns running in the same machine. Both in 
-        # different processes or different threads.
-        taken = False
-        processcond = threading.Condition()
-        
-        def __init__(self, lockfile):
-            processcond = self.__class__.processcond
-            
-            processcond.acquire()
-            try:
-                # It's not reentrant
-                while self.__class__.taken:
-                    processcond.wait()
-                self.__class__.taken = True
-            finally:
-                processcond.release()
-            
-            self.lockfile = lockfile
-            fcntl.flock(self.lockfile, fcntl.LOCK_EX)
-        
-        def __del__(self):
-            processcond = self.__class__.processcond
-            
-            processcond.acquire()
-            try:
-                assert self.__class__.taken, "HostLock unlocked without being locked!"
-
-                fcntl.flock(self.lockfile, fcntl.LOCK_UN)
-                
-                # It's not reentrant
-                self.__class__.taken = False
-                processcond.notify()
-            finally:
-                processcond.release()
-    
-    def __init__(self):
-        super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
-        self._netns = None
-        self._home_directory = None
-        self._traces = dict()
-        self._netns_lock = open("/tmp/nepi-netns-lock","a")
-    
-    def _lock(self):
-        return self.HostLock(self._netns_lock)
-
-    @property
-    def home_directory(self):
-        return self._home_directory
-
-    @property
-    def netns(self):
-        return self._netns
-
-    def do_setup(self):
-        self._home_directory = self._attributes.\
-            get_attribute_value("homeDirectory")
-        # create home...
-        home = os.path.normpath(self.home_directory)
-        if not os.path.exists(home):
-            os.makedirs(home, 0755)
-
-        self._netns = self._load_netns_module()
-        super(TestbedController, self).do_setup()
-    
-    def do_create(self):
-        lock = self._lock()
-        super(TestbedController, self).do_create()    
-
-    def set(self, guid, name, value, time = TIME_NOW):
-        super(TestbedController, self).set(guid, name, value, time)
-        # TODO: take on account schedule time for the task 
-        factory_id = self._create[guid]
-        factory = self._factories[factory_id]
-        if factory_id not in self.LOCAL_FACTORIES and \
-                factory.box_attributes.is_attribute_metadata(name):
-            return
-        element = self._elements.get(guid)
-        if element:
-            setattr(element, name, value)
-
-    def get(self, guid, name, time = TIME_NOW):
-        value = super(TestbedController, self).get(guid, name, time)
-        # TODO: take on account schedule time for the task
-        factory_id = self._create[guid]
-        factory = self._factories[factory_id]
-        if factory_id not in self.LOCAL_FACTORIES and \
-                factory.box_attributes.is_attribute_metadata(name):
-            return value
-        element = self._elements.get(guid)
-        try:
-            return getattr(element, name)
-        except (KeyError, AttributeError):
-            return value
-
-    def action(self, time, guid, action):
-        raise NotImplementedError
-
-    def shutdown(self):
-        for guid, traces in self._traces.iteritems():
-            for trace_id, (trace, filename) in traces.iteritems():
-                if hasattr(trace, "close"):
-                    trace.close()
-                if hasattr(trace, "signal"):
-                    trace.signal()
-        for guid, element in self._elements.iteritems():
-            if isinstance(element, self.TunChannel):
-                element.cleanup()
-            else:
-                factory_id = self._create[guid]
-                if factory_id == "Node":
-                    element.destroy()
-        self._elements.clear()
-
-    def trace_filepath(self, guid, trace_id, filename = None):
-        if not filename:
-            (trace, filename) = self._traces[guid][trace_id]
-        return os.path.join(self.home_directory, filename)
-
-    def trace_filename(self, guid, trace_id):
-        (trace, filename) = self._traces[guid][trace_id]
-        return filename
-
-
-    def follow_trace(self, guid, trace_id, trace, filename):
-        if not guid in self._traces:
-            self._traces[guid] = dict()
-        self._traces[guid][trace_id] = (trace, filename)
-
-    def _load_netns_module(self):
-        # TODO: Do something with the configuration!!!
-        import sys
-        __import__("netns")
-        netns_mod = sys.modules["netns"]
-        # enable debug
-        enable_debug = self._attributes.get_attribute_value("enableDebug")
-        if enable_debug:
-            netns_mod.environ.set_log_level(netns_mod.environ.LOG_DEBUG)
-        return netns_mod
-
diff --git a/src/nepi/testbeds/netns/metadata.py b/src/nepi/testbeds/netns/metadata.py
deleted file mode 100644 (file)
index 36e914a..0000000
+++ /dev/null
@@ -1,641 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import metadata
-from nepi.core.attributes import Attribute
-from nepi.util import tags, validation
-from nepi.util.constants import ApplicationStatus as AS, \
-        FactoryCategories as FC, DeploymentConfiguration as DC
-
-from nepi.util.tunchannel_impl import \
-    preconfigure_tunchannel, postconfigure_tunchannel, \
-    prestart_tunchannel, create_tunchannel, \
-    crossconnect_tunchannel_peer_init, \
-    crossconnect_tunchannel_peer_compl
-
-import functools
-
-# Factories
-NODE = "Node"
-P2PIFACE = "P2PNodeInterface"
-TAPIFACE = "TapNodeInterface"
-TUNIFACE = "TunNodeInterface"
-NODEIFACE = "NodeInterface"
-SWITCH = "Switch"
-APPLICATION = "Application"
-TUNCHANNEL = "TunChannel"
-
-NS3_TESTBED_ID = "ns3"
-FDNETDEV = "ns3::FdNetDevice"
-
-def _follow_trace(testbed_instance, guid, trace_id, filename):
-    filepath = testbed_instance.trace_filepath(guid, trace_id, filename)
-    trace = open(filepath, "wb")
-    testbed_instance.follow_trace(guid, trace_id, trace, filename)
-    return trace
-
-### Connection functions ####
-
-def connect_switch(testbed_instance, switch_guid, interface_guid):
-    switch = testbed_instance._elements[switch_guid]
-    interface = testbed_instance._elements[interface_guid]
-    switch.connect(interface)
-   
-def connect_fd(testbed_instance, tap_guid, cross_data):
-    import passfd
-    import socket
-    tap = testbed_instance._elements[tap_guid]
-    address = cross_data["tun_addr"]
-    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-    sock.connect(address)
-    passfd.sendfd(sock, tap.fd, '0')
-    # TODO: after succesful transfer, the tap device should close the fd
-
-def connect_tunchannel_tun(testbed_instance, chan_guid, tap_guid):
-    connect_tunchannel_tap(testbed_instance, chan_guid, tap_guid, ethernet_mode=False)
-
-def connect_tunchannel_tap(testbed_instance, chan_guid, tap_guid, ethernet_mode=True):
-    tap = testbed_instance._elements[tap_guid]
-    chan = testbed_instance._elements[chan_guid]
-
-    # Create a file object for the tap's interface device 
-    # and send it to the channel. It should comply with all the
-    # requirements for the channel's tun_socket.
-    import os
-    chan.tun_socket = os.fdopen(tap.fd)
-    
-    # Set the channel to ethernet mode (it's a tap)
-    chan.ethernet_mode = ethernet_mode
-    
-    # Check to see if the device uses PI headers
-    # It's normally so
-    with_pi = True
-    try:
-        import fcntl
-        import struct
-        TUNGETIFF = 0x800454d2
-        IFF_NO_PI = 0x00001000
-        struct_ifreq = "x"*16+"H"+"x"*22
-        flags = struct.unpack(struct_ifreq,
-            fcntl.ioctl(tap.fd, TUNGETIFF, struct.pack(struct_ifreq,0)) )[0]
-        with_pi = (0 == (flags & IFF_NO_PI))
-    except:
-        # maybe the kernel doesn't support the IOCTL,
-        # in which case, we assume it uses PI headers (as is usual)
-        pass
-    chan.with_pi = with_pi
-
-### Trace functions ###
-
-def nodepcap_trace(testbed_instance, guid, trace_id):
-    node = testbed_instance._elements[guid]
-    parameters = testbed_instance._get_parameters(guid)
-    filename = "%d-pcap.stdout" % guid
-    stdout = _follow_trace(testbed_instance, guid, "pcap_stdout", filename)
-    filename = "%d-pcap.stderr" % guid
-    stderr = _follow_trace(testbed_instance, guid, "pcap_stderr", filename)
-    filename = "%d-node.pcap" % guid
-    filepath = testbed_instance.trace_filepath(guid, trace_id, filename)
-    command = "tcpdump -i 'any' -w %s" % filepath
-    user = "root"
-    trace = node.Popen(command, shell = True, stdout = stdout, 
-            stderr = stderr, user = user)
-    testbed_instance.follow_trace(guid, trace_id, trace, filename)
-
-trace_functions = dict({
-    "pcap": nodepcap_trace,
-    })
-
-### Creation functions ###
-
-def create_node(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    forward_X11 = False
-    if "forward_X11" in parameters:
-        forward_X11 = parameters["forward_X11"]
-        del parameters["forward_X11"]
-    element = testbed_instance.netns.Node(forward_X11 = forward_X11)
-    testbed_instance.elements[guid] = element
-
-def create_p2piface(testbed_instance, guid):
-    if guid in testbed_instance.elements:
-        # The interface pair was already instantiated
-        return
-    # search for the node asociated with the p2piface
-    node1_guid = testbed_instance.get_connected(guid, "node", "devs")
-    if len(node1_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % guid)
-    node1 = testbed_instance.elements[node1_guid[0]]
-    # search for the pair p2piface
-    p2p_guid = testbed_instance.get_connected(guid, "p2p","p2p")
-    if len(p2p_guid) == 0:
-        raise RuntimeError("Can't instantiate p2p interface %d. \
-                Missing interface pair" % guid)
-    guid2 = p2p_guid[0]
-    node2_guid = testbed_instance.get_connected(guid2, "node", "devs")
-    if len(node2_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % guid2)
-    node2 = testbed_instance.elements[node2_guid[0]]
-    element1, element2 = testbed_instance.netns.P2PInterface.create_pair(
-        node1, node2)
-    testbed_instance.elements[guid] = element1
-    testbed_instance.elements[guid2] = element2
-
-def create_tapiface(testbed_instance, guid):
-    node_guid = testbed_instance.get_connected(guid, "node", "devs")
-    if len(node_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % guid)
-    node = testbed_instance.elements[node_guid[0]]
-    element = node.add_tap()
-    testbed_instance.elements[guid] = element
-
-def create_tuniface(testbed_instance, guid):
-    node_guid = testbed_instance.get_connected(guid, "node", "devs")
-    if len(node_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % guid)
-    node = testbed_instance.elements[node_guid[0]]
-    element = node.add_tun()
-    testbed_instance.elements[guid] = element
-
-def create_nodeiface(testbed_instance, guid):
-    node_guid = testbed_instance.get_connected(guid, "node", "devs")
-    if len(node_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % guid)
-    node = testbed_instance.elements[node_guid[0]]
-    element = node.add_if()
-    testbed_instance.elements[guid] = element
-
-def create_switch(testbed_instance, guid):
-    element = testbed_instance.netns.Switch()
-    testbed_instance.elements[guid] = element
-
-def create_application(testbed_instance, guid):
-    testbed_instance.elements[guid] = None # Delayed construction 
-
-### Start/Stop functions ###
-
-def start_application(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    traces = testbed_instance._get_traces(guid)
-    command = parameters["command"]
-    user = None
-    if "user" in parameters:
-        user = parameters["user"]
-    stdout = stderr = None
-    if "stdout" in traces:
-        filename = "%d-stdout.trace" % guid
-        stdout = _follow_trace(testbed_instance, guid, "stdout", filename)
-    if "stderr" in traces:
-        filename = "%d-stderr.trace" % guid
-        stderr = _follow_trace(testbed_instance, guid, "stderr", filename)
-    node_guid = testbed_instance.get_connected(guid, "node", "apps")
-    if len(node_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % guid)
-    node = testbed_instance.elements[node_guid[0]]
-    element  = node.Popen(command, shell = True, stdout = stdout, 
-            stderr = stderr, user = user)
-    testbed_instance.elements[guid] = element
-
-def stop_application(testbed_instance, guid):
-    #app = testbed_instance.elements[guid]
-    #app.signal()
-    pass
-
-### Status functions ###
-
-def status_application(testbed_instance, guid):
-    if guid not in testbed_instance.elements.keys():
-        return AS.STATUS_NOT_STARTED
-    app = testbed_instance.elements[guid]
-    if app.poll() == None:
-        return AS.STATUS_RUNNING
-    return AS.STATUS_FINISHED
-
-### Configure functions ###
-
-def configure_traces(testbed_instance, guid):
-    traces = testbed_instance._get_traces(guid)
-    for trace_id in traces:
-        if trace_id not in trace_functions:
-            continue
-        trace_func = trace_functions[trace_id]
-        trace_func(testbed_instance, guid, trace_id)
-
-def configure_device(testbed_instance, guid):
-    configure_traces(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    if not guid in testbed_instance._add_address:
-        return
-    addresses = testbed_instance._add_address[guid]
-    for address in addresses:
-        (address, netprefix, broadcast) = address
-        # TODO: Decide if we should add a ipv4 or ipv6 address
-        element.add_v4_address(address, netprefix)
-
-def configure_node(testbed_instance, guid):
-    configure_traces(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    if not guid in testbed_instance._add_route:
-        return
-    routes = testbed_instance._add_route[guid]
-    for route in routes:
-        (destination, netprefix, nexthop, metric, device) = route
-        element.add_route(prefix = destination, prefix_len = netprefix,
-            nexthop = nexthop, metric = metric)
-
-### Factory information ###
-
-connector_types = dict({
-    "apps": dict({
-                "help": "Connector from node to applications", 
-                "name": "apps",
-                "max": -1, 
-                "min": 0
-            }),
-    "devs": dict({
-                "help": "Connector from node to network interfaces", 
-                "name": "devs",
-                "max": -1, 
-                "min": 0
-            }),
-    "node": dict({
-                "help": "Connector to a Node", 
-                "name": "node",
-                "max": 1, 
-                "min": 1
-            }),
-    "p2p": dict({
-                "help": "Connector to a P2PInterface", 
-                "name": "p2p",
-                "max": 1, 
-                "min": 0
-            }),
-    "->fd": dict({
-                "help": "File descriptor receptor for devices with file descriptors",
-                "name": "->fd",
-                "max": 1,
-                "min": 0
-            }),
-    "fd->": dict({
-                "help": "File descriptor provider for devices with file descriptors",
-                "name": "fd->",
-                "max": 1,
-                "min": 0
-            }),
-    "switch": dict({
-                "help": "Connector to a switch", 
-                "name": "switch",
-                "max": 1, 
-                "min": 0
-            }),
-    "tcp": dict({
-                "help": "ip-ip tunneling over TCP link", 
-                "name": "tcp",
-                "max": 1, 
-                "min": 0
-            }),
-    "udp": dict({
-                "help": "ip-ip tunneling over UDP datagrams", 
-                "name": "udp",
-                "max": 1, 
-                "min": 0
-            }),
-   })
-
-connections = [
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, P2PIFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, TAPIFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, TUNIFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, NODEIFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, P2PIFACE, "p2p"),
-        "to":   (TESTBED_ID, P2PIFACE, "p2p"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "fd->"),
-        "to":   (None, None, "->fd"),
-        "compl_code": connect_fd,
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "fd->"),
-        "to":   (None, None, "->fd"),
-        "compl_code": connect_fd,
-        "can_cross": True
-    }),
-     dict({
-        "from": (TESTBED_ID, SWITCH, "devs"),
-        "to":   (TESTBED_ID, NODEIFACE, "switch"),
-        "init_code": connect_switch,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "apps"),
-        "to":   (TESTBED_ID, APPLICATION, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNCHANNEL, "->fd" ),
-        "to":   (TESTBED_ID, TAPIFACE, "fd->" ),
-        "init_code": connect_tunchannel_tap,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNCHANNEL, "->fd" ),
-        "to":   (TESTBED_ID, TUNIFACE, "fd->" ),
-        "init_code": connect_tunchannel_tun,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNCHANNEL, "tcp"),
-        "to":   (None, None, "tcp"),
-        "init_code": functools.partial(crossconnect_tunchannel_peer_init,"tcp"),
-        "compl_code": functools.partial(crossconnect_tunchannel_peer_compl,"tcp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNCHANNEL, "udp"),
-        "to":   (None, None, "udp"),
-        "init_code": functools.partial(crossconnect_tunchannel_peer_init,"udp"),
-        "compl_code": functools.partial(crossconnect_tunchannel_peer_compl,"udp"),
-        "can_cross": True
-    }),
-]
-
-attributes = dict({
-    "forward_X11": dict({      
-                "name": "forward_X11",
-                "help": "Forward x11 from main namespace to the node",
-                "type": Attribute.BOOL, 
-                "value": False,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_bool
-            }),
-    "lladdr": dict({      
-                "name": "lladdr", 
-                "help": "Mac address", 
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_mac_address
-            }),
-    "up": dict({
-                "name": "up",
-                "help": "Link up",
-                "type": Attribute.BOOL,
-                "value": True,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_bool
-            }),
-    "device_name": dict({
-                "name": "name",
-                "help": "Device name",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "mtu":  dict({
-                "name": "mtu", 
-                "help": "Maximum transmition unit for device",
-                "type": Attribute.INTEGER,
-                "validation_function": validation.is_integer
-            }),
-    "broadcast": dict({ 
-                "name": "broadcast",
-                "help": "Broadcast address",
-                "type": Attribute.STRING,
-                "validation_function": validation.is_string # TODO: should be is address!
-            }),
-    "multicast": dict({      
-                "name": "multicast",
-                "help": "Multicast enabled",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "arp": dict({
-                "name": "arp",
-                "help": "ARP enabled",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "command": dict({
-                "name": "command",
-                "help": "Command line string",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "user": dict({
-                "name": "user",
-                "help": "System user",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "stdin": dict({
-                "name": "stdin",
-                "help": "Standard input",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    })
-
-traces = dict({
-    "stdout": dict({
-                "name": "stdout",
-                "help": "Standard output stream"
-              }),
-    "stderr": dict({
-                "name": "stderr",
-                "help": "Application standard error",
-        }),
-    "node_pcap": dict({
-                "name": "pcap",
-                "help": "tcpdump at all node interfaces",
-        }) 
-    })
-
-create_order = [ NODE, P2PIFACE, NODEIFACE, TAPIFACE, 
-        TUNIFACE, TUNCHANNEL, SWITCH,
-        APPLICATION ]
-
-configure_order = [ P2PIFACE, NODEIFACE, TAPIFACE, 
-        TUNIFACE, TUNCHANNEL, SWITCH, 
-        NODE, APPLICATION ]
-
-factories_info = dict({
-    NODE: dict({
-            "help": "Emulated Node with virtualized network stack",
-            "category": FC.CATEGORY_NODES,
-            "create_function": create_node,
-            "configure_function": configure_node,
-            "box_attributes": ["forward_X11"],
-            "connector_types": ["devs", "apps"],
-            "traces": ["node_pcap"],
-            "tags": [tags.NODE, tags.ALLOW_ROUTES],
-       }),
-    P2PIFACE: dict({
-            "help": "Point to point network interface",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_p2piface,
-            "configure_function": configure_device,
-            "box_attributes": ["lladdr", "up", "device_name", "mtu", 
-                "multicast", "broadcast", "arp"],
-            "connector_types": ["node", "p2p"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-       }),
-    TAPIFACE: dict({
-            "help": "Tap device network interface",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_tapiface,
-            "configure_function": configure_device,
-            "box_attributes": ["lladdr", "up", "device_name", "mtu", 
-                "multicast", "broadcast", "arp"],
-            "connector_types": ["node", "fd->"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-        }),
-    TUNIFACE: dict({
-            "help": "Tun device network interface",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_tuniface,
-            "configure_function": configure_device,
-            "box_attributes": ["lladdr", "up", "device_name", "mtu", 
-                "multicast", "broadcast", "arp"],
-            "connector_types": ["node", "fd->"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-        }),
-    NODEIFACE: dict({
-            "help": "Node network interface",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_nodeiface,
-            "configure_function": configure_device,
-            "box_attributes": ["lladdr", "up", "device_name", "mtu", 
-                "multicast", "broadcast", "arp"],
-            "connector_types": ["node", "switch"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-        }),
-    SWITCH: dict({
-            "display_name": "Switch",
-            "help": "Switch interface",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_switch,
-            "box_attributes": ["up", "device_name", "mtu", "multicast"],
-             #TODO: Add attribute ("Stp", help, type, value, range, allowed, readonly, validation_function),
-             #TODO: Add attribute ("ForwarddDelay", help, type, value, range, allowed, readonly, validation_function),
-             #TODO: Add attribute ("HelloTime", help, type, value, range, allowed, readonly, validation_function),
-             #TODO: Add attribute ("AgeingTime", help, type, value, range, allowed, readonly, validation_function),
-             #TODO: Add attribute ("MaxAge", help, type, value, range, allowed, readonly, validation_function)
-            "connector_types": ["devs"],
-            "tags": [tags.SWITCH],
-        }),
-    APPLICATION: dict({
-            "help": "Generic executable command line application",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_application,
-            "start_function": start_application,
-            "stop_function": stop_application,
-            "status_function": status_application,
-            "box_attributes": ["command", "user"],
-            "connector_types": ["node"],
-            "traces": ["stdout", "stderr"],
-            "tags": [tags.APPLICATION],
-        }),
-     TUNCHANNEL : dict({
-            "category": FC.CATEGORY_TUNNELS,
-            "create_function": create_tunchannel,
-            "preconfigure_function": preconfigure_tunchannel,
-            "configure_function": postconfigure_tunchannel,
-            "prestart_function": prestart_tunchannel,
-            "help": "Channel to forward "+TAPIFACE+" data to "
-                "other TAP interfaces supporting the NEPI tunneling protocol.",
-            "connector_types": ["->fd", "udp", "tcp"],
-            "allow_addresses": False,
-            "box_attributes": ["tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher"],
-            "tags": [tags.TUNNEL],
-    }),
-})
-
-testbed_attributes = dict({
-        "enable_debug": dict({
-                "name": "enableDebug",
-                "help": "Enable netns debug output",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    })
-
-supported_recovery_policies = [
-        DC.POLICY_FAIL,
-    ]
-
-class MetadataInfo(metadata.MetadataInfo):
-    @property
-    def connector_types(self):
-        return connector_types
-
-    @property
-    def connections(self):
-        return connections
-
-    @property
-    def attributes(self):
-        return attributes
-
-    @property
-    def traces(self):
-        return traces
-
-    @property
-    def create_order(self):
-        return create_order
-
-    @property
-    def configure_order(self):
-        return configure_order
-
-    @property
-    def factories_info(self):
-        return factories_info
-
-    @property
-    def testbed_attributes(self):
-        return testbed_attributes
-
-    @property
-    def testbed_id(self):
-        return TESTBED_ID
-
-    @property
-    def testbed_version(self):
-        return TESTBED_VERSION
-    
-    @property
-    def supported_recover_policies(self):
-        return supported_recovery_policies
-
diff --git a/src/nepi/testbeds/ns3/__init__.py b/src/nepi/testbeds/ns3/__init__.py
deleted file mode 100644 (file)
index 45a2de9..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from execute import TestbedController, load_ns3_module 
-
diff --git a/src/nepi/testbeds/ns3/attributes_metadata.py b/src/nepi/testbeds/ns3/attributes_metadata.py
deleted file mode 100644 (file)
index 8797520..0000000
+++ /dev/null
@@ -1,2587 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from factories_metadata import wifi_standards, l4_protocols, \
-    service_flow_direction, service_flow_scheduling_type
-import validation as ns3_validation
-from nepi.core.attributes import Attribute
-from nepi.util import validation
-
-testbed_attributes = dict({
-    "simu_impl_type": dict({
-            "name": "SimulatorImplementationType",
-            "help": "The object class to use as the simulator implementation",
-            "value": "ns3::DefaultSimulatorImpl",
-            "type": Attribute.ENUM,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "allowed": [
-                "ns3::DefaultSimulatorImpl",
-                "ns3::RealtimeSimulatorImpl",
-            ],
-            "validation_function": validation.is_enum
-        }),
-    "sched_impl_type": dict({
-            "name": "SchedulerType",
-            "help": "The object class to use as the scheduler implementation. Make sure to pick a thread-safe variant.",
-            "value": "ns3::MapScheduler",
-            "type": Attribute.ENUM,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "allowed": [
-                "ns3::MapScheduler",
-                "ns3::HeapScheduler",
-                "ns3::ListScheduler",
-                "ns3::CalendarScheduler",
-            ],
-            "validation_function": validation.is_enum
-        }),
-    "checksum": dict({
-            "name": "ChecksumEnabled",
-            "help": "A global switch to enable all checksums for all protocols",
-            "type": Attribute.BOOL,
-            "value": False,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_bool
-        }),
-    "simu_stop": dict({
-        "name": "StopTime",
-        "validation_function": validation.is_time,
-        "value": None,
-        "type": Attribute.STRING,
-        "help": "Stop time for the simulation"
-    }),
-})
-
-attributes = dict({
-    "SleepCurrentA": dict({
-        "name": "SleepCurrentA",
-        "validation_function": validation.is_double,
-        "value": 2.0000000000000002e-05,
-        "type": Attribute.DOUBLE,
-        "help": "The default radio Sleep current in Ampere."
-    }),
-    "Protocol": dict({
-        "name": "Protocol",
-        "validation_function": validation.is_string,
-        "value": "ns3::UdpSocketFactory",
-        "type": Attribute.STRING,
-        "help": "The type of protocol to use."
-    }),
-    "TxCurrentA": dict({
-        "name": "TxCurrentA",
-        "validation_function": validation.is_double,
-        "value": 0.017399999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "The radio Tx current in Ampere."
-    }),
-    "BasicEnergySourceInitialEnergyJ": dict({
-        "name": "BasicEnergySourceInitialEnergyJ",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "Initial energy stored in basic energy source."
-    }),
-    "FrameSize": dict({
-        "name": "FrameSize",
-        "validation_function": validation.is_integer,
-        "value": 1000,
-        "type": Attribute.INTEGER,
-        "help": "Size of data frames in bytes"
-    }),
-    "RateStep": dict({
-        "name": "RateStep",
-        "validation_function": validation.is_integer,
-        "value": 4,
-        "type": Attribute.INTEGER,
-        "help": "Increments available for rate assignment in bps"
-    }),
-    "Stop": dict({
-        "name": "Stop",
-        "validation_function": validation.is_time,
-        "value": "0ns",
-        "type": Attribute.STRING,
-        "help": "The simulation time at which to tear down the device thread."
-    }),
-    "ChannelSwitchDelay": dict({
-        "name": "ChannelSwitchDelay",
-        "validation_function": validation.is_time,
-        "value": "250000ns",
-        "type": Attribute.STRING,
-        "help": "Delay between two short frames transmitted on different frequencies. NOTE: Unused now."
-    }),
-    "Time": dict({
-        "name": "Time",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "Change current direction and speed after moving for this delay."
-    }),
-    "ewndFor12mbps": dict({
-        "name": "ewndFor12mbps",
-        "validation_function": validation.is_integer,
-        "value": 20,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 12 Mbs data mode"
-    }),
-    "BerThreshold": dict({
-        "name": "BerThreshold",
-        "validation_function": validation.is_double,
-        "value": 1.0000000000000001e-05,
-        "type": Attribute.DOUBLE,
-        "help": "The maximum Bit Error Rate acceptable at any transmission mode"
-    }),
-    "Dot11MeshHWMPactivePathTimeout": dict({
-        "name": "Dot11MeshHWMPactivePathTimeout",
-        "validation_function": validation.is_time,
-        "value": "5120000000ns",
-        "type": Attribute.STRING,
-        "help": "Lifetime of reactive routing information"
-    }),
-    "pmtlFor48mbps": dict({
-        "name": "pmtlFor48mbps",
-        "validation_function": validation.is_double,
-        "value": 0.23000000000000001,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 48 Mbs data mode"
-    }),
-    "SystemLoss": dict({
-        "name": "SystemLoss",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "The system loss"
-    }),
-    "ReferenceLoss": dict({
-        "name": "ReferenceLoss",
-        "validation_function": validation.is_double,
-        "value": 46.677700000000002,
-        "type": Attribute.DOUBLE,
-        "help": "The reference loss at distance d0 (dB). (Default is Friis at 1m with 5.15 GHz)"
-    }),
-    "MaxQueueTime": dict({
-        "name": "MaxQueueTime",
-        "validation_function": validation.is_time,
-        "value": "30000000000ns",
-        "type": Attribute.STRING,
-        "help": "Maximum time packets can be queued (in seconds)"
-    }),
-    "Dot11MeshHWMPactiveRootTimeout": dict({
-        "name": "Dot11MeshHWMPactiveRootTimeout",
-        "validation_function": validation.is_time,
-        "value": "5120000000ns",
-        "type": Attribute.STRING,
-        "help": "Lifetime of poractive routing information"
-    }),
-    "DutyCycle": dict({
-        "name": "DutyCycle",
-        "validation_function": validation.is_double,
-        "value": 0.5,
-        "type": Attribute.DOUBLE,
-        "help": "the duty cycle of the generator, i.e., the fraction of the period that is occupied by a signal"
-    }),
-    "DeviceName": dict({
-        "name": "DeviceName",
-        "validation_function": validation.is_string,
-        "value": "eth1",
-        "type": Attribute.STRING,
-        "help": "The name of the underlying real device (e.g. eth1)."
-    }),
-    "Direction": dict({
-        "name": "Direction",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:6.28318",
-        "type": Attribute.STRING,
-        "help": "A random variable used to pick the direction (gradients)."
-    }),
-    "OffTime": dict({
-        "name": "OffTime",
-        "validation_function": validation.is_string,
-        "value": "Constant:1",
-        "type": Attribute.STRING,
-        "help": "A RandomVariable used to pick the duration of the 'Off' state."
-    }),
-    "UpdatePeriod": dict({
-        "name": "UpdatePeriod",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "The interval between decisions about rate control changes"
-    }),
-    "DelayBinWidth": dict({
-        "name": "DelayBinWidth",
-        "validation_function": validation.is_double,
-        "value": 0.001,
-        "type": Attribute.DOUBLE,
-        "help": "The width used in the delay histogram."
-    }),
-    "EnergyDetectionThreshold": dict({
-        "name": "EnergyDetectionThreshold",
-        "validation_function": validation.is_double,
-        "value": -96.0,
-        "type": Attribute.DOUBLE,
-        "help": "The energy of a received signal should be higher than this threshold (dbm) to allow the PHY layer to detect the signal."
-    }),
-    "PacketSizeBinWidth": dict({
-        "name": "PacketSizeBinWidth",
-        "validation_function": validation.is_double,
-        "value": 20.0,
-        "type": Attribute.DOUBLE,
-        "help": "The width used in the packetSize histogram."
-    }),
-    "Resolution": dict({
-        "name": "Resolution",
-        "validation_function": validation.is_time,
-        "value": "1000000ns",
-        "type": Attribute.STRING,
-        "help": "the lengh of the time interval over which the power spectral density of incoming signals is averaged"
-    }),
-    "MaxX": dict({
-        "name": "MaxX",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Maximum X value of traveling region, [m]"
-    }),
-    "IdleCurrentA": dict({
-        "name": "IdleCurrentA",
-        "validation_function": validation.is_double,
-        "value": 0.000426,
-        "type": Attribute.DOUBLE,
-        "help": "The default radio Idle current in Ampere."
-    }),
-    "Netmask": dict({
-        "name": "Netmask",
-        "validation_function": validation.is_string,
-        "value": "255.255.255.255",
-        "type": Attribute.STRING,
-        "help": "The network mask to assign to the tap device, when in ConfigureLocal mode. This address will override the discovered MAC address of the simulated device."
-    }),
-    "PathDiscoveryTime": dict({
-        "name": "PathDiscoveryTime",
-        "validation_function": validation.is_time,
-        "value": "5599999999ns",
-        "type": Attribute.STRING,
-        "help": "Estimate of maximum time needed to find route in network = 2 * NetTraversalTime"
-    }),
-    "poriFor24mbps": dict({
-        "name": "poriFor24mbps",
-        "validation_function": validation.is_double,
-        "value": 0.1681,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 24 Mbs data mode"
-    }),
-    "Exponent0": dict({
-        "name": "Exponent0",
-        "validation_function": validation.is_double,
-        "value": 1.8999999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "The exponent for the first field."
-    }),
-    "TimeStep": dict({
-        "name": "TimeStep",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "Change current direction and speed after moving for this time."
-    }),
-    "MaxMissedBeacons": dict({
-        "name": "MaxMissedBeacons",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Number of beacons which much be consecutively missed before we attempt to restart association."
-    }),
-    "RxGain": dict({
-        "name": "RxGain",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Reception gain (dB)."
-    }),
-    "MaxRetries": dict({
-        "name": "MaxRetries",
-        "validation_function": validation.is_integer,
-        "value": 4,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of retries"
-    }),
-    "pmtlFor24mbps": dict({
-        "name": "pmtlFor24mbps",
-        "validation_function": validation.is_double,
-        "value": 0.26500000000000001,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 24 Mbs data mode"
-    }),
-    "TurnOnRtsAfterRateIncrease": dict({
-        "name": "TurnOnRtsAfterRateIncrease",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "If true the RTS mechanism will be turned on when the rate will be increased"
-    }),
-    "Gain": dict({
-        "name": "Gain",
-        "validation_function": validation.is_double,
-        "value": 0.10000000000000001,
-        "type": Attribute.DOUBLE,
-        "help": "XXX"
-    }),
-    "SuccessK": dict({
-        "name": "SuccessK",
-        "validation_function": validation.is_double,
-        "value": 2.0,
-        "type": Attribute.DOUBLE,
-        "help": "Multiplication factor for the success threshold in the AARF algorithm."
-    }),
-    "MinTimerThreshold": dict({
-        "name": "MinTimerThreshold",
-        "validation_function": validation.is_integer,
-        "value": 15,
-        "type": Attribute.INTEGER,
-        "help": "The minimum value for the 'timer' threshold in the AARF algorithm."
-    }),
-    "TimerThreshold": dict({
-        "name": "TimerThreshold",
-        "validation_function": validation.is_integer,
-        "value": 15,
-        "type": Attribute.INTEGER,
-        "help": "The 'timer' threshold in the ARF algorithm."
-    }),
-    "poriFor36mbps": dict({
-        "name": "poriFor36mbps",
-        "validation_function": validation.is_double,
-        "value": 0.115,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 36 Mbs data mode"
-    }),
-    "SlotTime": dict({
-        "name": "SlotTime",
-        "validation_function": validation.is_time,
-        "value": "20000000ns",
-        "type": Attribute.STRING,
-        "help": "Time slot duration for MAC backoff"
-    }),
-    "DeltaX": dict({
-        "name": "DeltaX",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "The x space between objects."
-    }),
-    "DeltaY": dict({
-        "name": "DeltaY",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "The y space between objects."
-    }),
-    "Shipping": dict({
-        "name": "Shipping",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "Shipping contribution to noise between 0 and 1"
-    }),
-    "HardLimit": dict({
-        "name": "HardLimit",
-        "validation_function": validation.is_time,
-        "value": "100000000ns",
-        "type": Attribute.STRING,
-        "help": "Maximum acceptable real-time jitter (used in conjunction with SynchronizationMode=HardLimit)"
-    }),
-    "SupportedModesPhy2": dict({
-        "name": "SupportedModesPhy2",
-        "validation_function": validation.is_string,
-        "value": "2|0|1|",
-        "type": Attribute.STRING,
-        "help": "List of modes supported by Phy2"
-    }),
-    "SupportedModesPhy1": dict({
-        "name": "SupportedModesPhy1",
-        "validation_function": validation.is_string,
-        "value": "2|0|1|",
-        "type": Attribute.STRING,
-        "help": "List of modes supported by Phy1"
-    }),
-    "TxGain": dict({
-        "name": "TxGain",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Transmission gain (dB)."
-    }),
-    "MaxPropDelay": dict({
-        "name": "MaxPropDelay",
-        "validation_function": validation.is_time,
-        "value": "2000000000ns",
-        "type": Attribute.STRING,
-        "help": "Maximum possible propagation delay to gateway"
-    }),
-    "Alpha": dict({
-        "name": "Alpha",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "A constant representing the tunable parameter in the Gauss-Markov model."
-    }),
-    "X": dict({
-        "name": "X",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The x coordinate of the center of the  disc."
-    }),
-    "ExpirationTime": dict({
-        "name": "ExpirationTime",
-        "validation_function": validation.is_time,
-        "value": "30000000000ns",
-        "type": Attribute.STRING,
-        "help": "Time it takes for learned MAC state entry to expire."
-    }),
-    "GratuitousReply": dict({
-        "name": "GratuitousReply",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Indicates whether a gratuitous RREP should be unicast to the node originated route discovery."
-    }),
-    "CcaThreshold": dict({
-        "name": "CcaThreshold",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "Aggregate energy of incoming signals to move to CCA Busy state dB"
-    }),
-    "AllowedHelloLoss": dict({
-        "name": "AllowedHelloLoss",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "Number of hello messages which may be loss for valid link."
-    }),
-    "Wind": dict({
-        "name": "Wind",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Wind speed in m/s"
-    }),
-    "Exponent1": dict({
-        "name": "Exponent1",
-        "validation_function": validation.is_double,
-        "value": 3.7999999999999998,
-        "type": Attribute.DOUBLE,
-        "help": "The exponent for the second field."
-    }),
-    "DefaultTtl": dict({
-        "name": "DefaultTtl",
-        "validation_function": validation.is_integer,
-        "value": 64,
-        "type": Attribute.INTEGER,
-        "help": "The TTL value set by default on all outgoing packets generated on this node."
-    }),
-    "TxPowerEnd": dict({
-        "name": "TxPowerEnd",
-        "validation_function": validation.is_double,
-        "value": 16.020600000000002,
-        "type": Attribute.DOUBLE,
-        "help": "Maximum available transmission level (dbm)."
-    }),
-    "DataRate": dict({
-        "name": "DataRate",
-        "validation_function": validation.is_string,
-        "value": "32768bps",
-        "type": Attribute.STRING,
-        "help": "The default data rate for point to point links"
-    }),
-    "MaxSuccessThreshold": dict({
-        "name": "MaxSuccessThreshold",
-        "validation_function": validation.is_integer,
-        "value": 60,
-        "type": Attribute.INTEGER,
-        "help": "Maximum value of the success threshold in the AARF algorithm."
-    }),
-    "MaxRangCorrectionRetries": dict({
-        "name": "MaxRangCorrectionRetries",
-        "validation_function": validation.is_integer,
-        "value": 16,
-        "type": Attribute.INTEGER,
-        "help": "Number of retries on contention Ranging Requests"
-    }),
-    "Dot11MeshHWMPpreqMinInterval": dict({
-        "name": "Dot11MeshHWMPpreqMinInterval",
-        "validation_function": validation.is_time,
-        "value": "102400000ns",
-        "type": Attribute.STRING,
-        "help": "Minimal interval between to successive PREQs"
-    }),
-    "BlackListTimeout": dict({
-        "name": "BlackListTimeout",
-        "validation_function": validation.is_time,
-        "value": "5599999999ns",
-        "type": Attribute.STRING,
-        "help": "Time for which the node is put into the blacklist = RreqRetries * NetTraversalTime"
-    }),
-    "MaxBytes": dict({
-        "name": "MaxBytes",
-        "validation_function": validation.is_integer,
-        "value": 6553500,
-        "type": Attribute.INTEGER,
-        "help": "The maximum number of bytes accepted by this DropTailQueue."
-    }),
-    "MaxAmsduSize": dict({
-        "name": "MaxAmsduSize",
-        "validation_function": validation.is_integer,
-        "value": 7935,
-        "type": Attribute.INTEGER,
-        "help": "Max length in byte of an A-MSDU"
-    }),
-    "Distance2": dict({
-        "name": "Distance2",
-        "validation_function": validation.is_double,
-        "value": 200.0,
-        "type": Attribute.DOUBLE,
-        "help": "Beginning of the third distance field. Default is 200m."
-    }),
-    "MaxFrames": dict({
-        "name": "MaxFrames",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of frames to include in a single RTS"
-    }),
-    "RxGainPhy2": dict({
-        "name": "RxGainPhy2",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "Gain added to incoming signal at receiver of Phy2"
-    }),
-    "LayoutType": dict({
-        "name": "LayoutType",
-        "validation_function": validation.is_enum,
-        "value": "RowFirst",
-        "allowed": ["RowFirst",
-            "ColumnFirst"],
-        "type": Attribute.ENUM,
-        "help": "The type of layout."
-    }),
-    "ewndFor54mbps": dict({
-        "name": "ewndFor54mbps",
-        "validation_function": validation.is_integer,
-        "value": 40,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 54 Mbs data mode"
-    }),
-    "FailureThreshold": dict({
-        "name": "FailureThreshold",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "The number of consecutive transmissions failure to decrease the rate."
-    }),
-    "ewndFor24mbps": dict({
-        "name": "ewndFor24mbps",
-        "validation_function": validation.is_integer,
-        "value": 40,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 24 Mbs data mode"
-    }),
-    "ewndFor48mbps": dict({
-        "name": "ewndFor48mbps",
-        "validation_function": validation.is_integer,
-        "value": 40,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 48 Mbs data mode"
-    }),
-    "SendEnable": dict({
-        "name": "SendEnable",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Enable or disable the transmitter section of the device."
-    }),
-    "DataMode": dict({
-        "name": "DataMode",
-        "validation_function": validation.is_string,
-        "value": "OfdmRate6Mbps",
-        "type": Attribute.STRING,
-        "help": "The transmission mode to use for every data packet transmission"
-    }),
-    "ErrorUnit": dict({
-        "name": "ErrorUnit",
-        "validation_function": validation.is_enum,
-        "value": "EU_BYTE",
-        "allowed": ["EU_BYTE",
-     "EU_PKT",
-     "EU_BIT"],
-        "type": Attribute.ENUM,
-        "help": "The error unit"
-    }),
-    "IpAddress": dict({
-        "name": "IpAddress",
-        "validation_function": validation.is_ip4_address,
-        "value": None,
-        "type": Attribute.STRING,
-        "help": "The IP address to assign to the tap device,  when in ConfigureLocal mode. This address will override the discovered IP address of the simulated device."
-    }),
-    "MinSuccessThreshold": dict({
-        "name": "MinSuccessThreshold",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "The minimum value for the success threshold in the AARF algorithm."
-    }),
-    "NodeTraversalTime": dict({
-        "name": "NodeTraversalTime",
-        "validation_function": validation.is_time,
-        "value": "40000000ns",
-        "type": Attribute.STRING,
-        "help": "Conservative estimate of the average one hop traversal time for packets and should include queuing delays, interrupt processing times and transfer times."
-    }),
-    "TxPowerPhy2": dict({
-        "name": "TxPowerPhy2",
-        "validation_function": validation.is_double,
-        "value": 190.0,
-        "type": Attribute.DOUBLE,
-        "help": "Transmission output power in dB of Phy2"
-    }),
-    "TxPowerPhy1": dict({
-        "name": "TxPowerPhy1",
-        "validation_function": validation.is_double,
-        "value": 190.0,
-        "type": Attribute.DOUBLE,
-        "help": "Transmission output power in dB of Phy1"
-    }),
-    "ReceiveEnable": dict({
-        "name": "ReceiveEnable",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Enable or disable the receiver section of the device."
-    }),
-    "Lambda": dict({
-        "name": "Lambda",
-        "validation_function": validation.is_double,
-        "value": 0.058252400000000003,
-        "type": Attribute.DOUBLE,
-        "help": "The wavelength  (default is 5.15 GHz at 300 000 km/s)."
-    }),
-    "ewndFor6mbps": dict({
-        "name": "ewndFor6mbps",
-        "validation_function": validation.is_integer,
-        "value": 6,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 6 Mbs data mode"
-    }),
-    "NumberOfRaysPerPath": dict({
-        "name": "NumberOfRaysPerPath",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "The number of rays to use by default for compute the fading coeficent for a given path (default is 1)"
-    }),
-    "HnaInterval": dict({
-        "name": "HnaInterval",
-        "validation_function": validation.is_time,
-        "value": "5000000000ns",
-        "type": Attribute.STRING,
-        "help": "HNA messages emission interval.  Normally it is equal to TcInterval."
-    }),
-    "RanVar": dict({
-        "name": "RanVar",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:1",
-        "type": Attribute.STRING,
-        "help": "The decision variable attached to this error model."
-    }),
-    "Theta": dict({
-        "name": "Theta",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:6.283",
-        "type": Attribute.STRING,
-        "help": "A random variable which represents the angle (gradients) of a position in a random disc."
-    }),
-    "UpdateStatistics": dict({
-        "name": "UpdateStatistics",
-        "validation_function": validation.is_time,
-        "value": "100000000ns",
-        "type": Attribute.STRING,
-        "help": "The interval between updating statistics table "
-    }),
-    "Distance1": dict({
-        "name": "Distance1",
-        "validation_function": validation.is_double,
-        "value": 80.0,
-        "type": Attribute.DOUBLE,
-        "help": "Beginning of the second distance field. Default is 80m."
-    }),
-    "MyRouteTimeout": dict({
-        "name": "MyRouteTimeout",
-        "validation_function": validation.is_time,
-        "value": "11199999999ns",
-        "type": Attribute.STRING,
-        "help": "Value of lifetime field in RREP generating by this node = 2 * max(ActiveRouteTimeout, PathDiscoveryTime)"
-    }),
-    "RcvBufSize": dict({
-        "name": "RcvBufSize",
-        "validation_function": validation.is_integer,
-        "value": 131072,
-        "type": Attribute.INTEGER,
-        "help": "PacketSocket maximum receive buffer size (bytes)"
-    }),
-    "RreqRetries": dict({
-        "name": "RreqRetries",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of retransmissions of RREQ to discover a route"
-    }),
-    "MaxNumberOfPeerLinks": dict({
-        "name": "MaxNumberOfPeerLinks",
-        "validation_function": validation.is_integer,
-        "value": 32,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of peer links"
-    }),
-    "QueueLimit": dict({
-        "name": "QueueLimit",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Maximum packets to queue at MAC"
-    }),
-    "MinSpeed": dict({
-        "name": "MinSpeed",
-        "validation_function": validation.is_double,
-        "value": 0.29999999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "Minimum speed value, [m/s]"
-    }),
-    "MaxSpeed": dict({
-        "name": "MaxSpeed",
-        "validation_function": validation.is_double,
-        "value": 0.69999999999999996,
-        "type": Attribute.DOUBLE,
-        "help": "Maximum speed value, [m/s]"
-    }),
-    "NumberOfRetryRates": dict({
-        "name": "NumberOfRetryRates",
-        "validation_function": validation.is_integer,
-        "value": 100,
-        "type": Attribute.INTEGER,
-        "help": "Number of retry rates"
-    }),
-    "MaxPacketSize": dict({
-        "name": "MaxPacketSize",
-        "validation_function": validation.is_integer,
-        "value": 1024,
-        "type": Attribute.INTEGER,
-        "help": "The maximum size of a packet."
-    }),
-    "TxPowerLevels": dict({
-        "name": "TxPowerLevels",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "Number of transmission power levels available between TxPowerBase and TxPowerEnd included."
-    }),
-    "RandomStart": dict({
-        "name": "RandomStart",
-        "validation_function": validation.is_time,
-        "value": "500000000ns",
-        "type": Attribute.STRING,
-        "help": "Window when beacon generating starts (uniform random) in seconds"
-    }),
-    "SampleColumn": dict({
-        "name": "SampleColumn",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "The number of columns used for sampling"
-    }),
-    "NormalDirection": dict({
-        "name": "NormalDirection",
-        "validation_function": validation.is_string,
-        "value": "Normal:0:1:10",
-        "type": Attribute.STRING,
-        "help": "A gaussian random variable used to calculate the next direction value."
-    }),
-    "MinPause": dict({
-        "name": "MinPause",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "Minimum pause value, [s]"
-    }),
-    "TcInterval": dict({
-        "name": "TcInterval",
-        "validation_function": validation.is_time,
-        "value": "5000000000ns",
-        "type": Attribute.STRING,
-        "help": "TC messages emission interval."
-    }),
-    "RfFlag": dict({
-        "name": "RfFlag",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Reply and forward flag"
-    }),
-    "CcaThresholdPhy2": dict({
-        "name": "CcaThresholdPhy2",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "Aggregate energy of incoming signals to move to CCA Busy state dB of Phy2"
-    }),
-    "CcaThresholdPhy1": dict({
-        "name": "CcaThresholdPhy1",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "Aggregate energy of incoming signals to move to CCA Busy state dB of Phy1"
-    }),
-    "MaxQueueLen": dict({
-        "name": "MaxQueueLen",
-        "validation_function": validation.is_integer,
-        "value": 64,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of packets that we allow a routing protocol to buffer."
-    }),
-    "HeightAboveZ": dict({
-        "name": "HeightAboveZ",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The height of the antenna (m) above the node's Z coordinate"
-    }),
-    "poriFor9mbps": dict({
-        "name": "poriFor9mbps",
-        "validation_function": validation.is_double,
-        "value": 0.1434,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 9 Mbs data mode"
-    }),
-    "BasicEnergySupplyVoltageV": dict({
-        "name": "BasicEnergySupplyVoltageV",
-        "validation_function": validation.is_double,
-        "value": 3.0,
-        "type": Attribute.DOUBLE,
-        "help": "Initial supply voltage for basic energy source."
-    }),
-    "LostUlMapInterval": dict({
-        "name": "LostUlMapInterval",
-        "validation_function": validation.is_time,
-        "value": "500000000ns",
-        "type": Attribute.STRING,
-        "help": "Time since last received UL-MAP before uplink synchronization is considered lost, maximum is 600."
-    }),
-    "UnicastPreqThreshold": dict({
-        "name": "UnicastPreqThreshold",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of PREQ receivers, when we send a PREQ as a chain of unicasts"
-    }),
-    "poriFor48mbps": dict({
-        "name": "poriFor48mbps",
-        "validation_function": validation.is_double,
-        "value": 0.047,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 48 Mbs data mode"
-    }),
-    "pmtlFor54mbps": dict({
-        "name": "pmtlFor54mbps",
-        "validation_function": validation.is_double,
-        "value": 0.094,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 54 Mbs data mode"
-    }),
-    "BeaconInterval": dict({
-        "name": "BeaconInterval",
-        "validation_function": validation.is_time,
-        "value": "102400000ns",
-        "type": Attribute.STRING,
-        "help": "Delay between two beacons"
-    }),
-    "IntervalT20": dict({
-        "name": "IntervalT20",
-        "validation_function": validation.is_time,
-        "value": "500000000ns",
-        "type": Attribute.STRING,
-        "help": "Time the SS searches for preambles on a given channel. Minimum is 2 MAC frames"
-    }),
-    "IntervalT21": dict({
-        "name": "IntervalT21",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "time the SS searches for (decodable) DL-MAP on a given channel"
-    }),
-    "MeanPitch": dict({
-        "name": "MeanPitch",
-        "validation_function": validation.is_string,
-        "value": "Constant:0",
-        "type": Attribute.STRING,
-        "help": "A random variable used to assign the average pitch."
-    }),
-    "Dot11MeshHWMPrannInterval": dict({
-        "name": "Dot11MeshHWMPrannInterval",
-        "validation_function": validation.is_time,
-        "value": "5120000000ns",
-        "type": Attribute.STRING,
-        "help": "Lifetime of poractive routing information"
-    }),
-    "Distribution": dict({
-        "name": "Distribution",
-        "validation_function": validation.is_string,
-        "value": "Constant:1",
-        "type": Attribute.STRING,
-        "help": "The distribution to choose the initial phases."
-    }),
-    "RxThreshold": dict({
-        "name": "RxThreshold",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "Required SNR for signal acquisition in dB"
-    }),
-    "WaypointsLeft": dict({
-        "name": "WaypointsLeft",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-        "type": Attribute.INTEGER,
-        "help": "The number of waypoints remaining."
-    }),
-    "ConfirmTimeout": dict({
-        "name": "ConfirmTimeout",
-        "validation_function": validation.is_time,
-        "value": "40960000ns",
-        "type": Attribute.STRING,
-        "help": "Confirm timeout"
-    }),
-    "ActiveRouteTimeout": dict({
-        "name": "ActiveRouteTimeout",
-        "validation_function": validation.is_time,
-        "value": "3000000000ns",
-        "type": Attribute.STRING,
-        "help": "Period of time during which the route is considered to be valid"
-    }),
-    "InitialRangInterval": dict({
-        "name": "InitialRangInterval",
-        "validation_function": validation.is_time,
-        "value": "50000000ns",
-        "type": Attribute.STRING,
-        "help": "Time between Initial Ranging regions assigned by the BS. Maximum is 2s"
-    }),
-    "ewndFor18mbps": dict({
-        "name": "ewndFor18mbps",
-        "validation_function": validation.is_integer,
-        "value": 20,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 18 Mbs data mode"
-    }),
-    "FlowInterruptionsBinWidth": dict({
-        "name": "FlowInterruptionsBinWidth",
-        "validation_function": validation.is_double,
-        "value": 0.25,
-        "type": Attribute.DOUBLE,
-        "help": "The width used in the flowInterruptions histogram."
-    }),
-    "MinY": dict({
-        "name": "MinY",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The y coordinate where the grid starts."
-    }),
-    "poriFor12mbps": dict({
-        "name": "poriFor12mbps",
-        "validation_function": validation.is_double,
-        "value": 0.18609999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 12 Mbs data mode"
-    }),
-    "UnicastDataThreshold": dict({
-        "name": "UnicastDataThreshold",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number ofbroadcast receivers, when we send a broadcast as a chain of unicasts"
-    }),
-    "SuccessRatio": dict({
-        "name": "SuccessRatio",
-        "validation_function": validation.is_double,
-        "value": 0.10000000000000001,
-        "type": Attribute.DOUBLE,
-        "help": "Ratio of maximum erroneous transmissions needed to switch to a higher rate"
-    }),
-    "SupportedModes": dict({
-        "name": "SupportedModes",
-        "validation_function": validation.is_string,
-        "value": "2|0|1|",
-        "type": Attribute.STRING,
-        "help": "List of modes supported by this PHY"
-    }),
-    "CaptureSize": dict({
-        "name": "CaptureSize",
-        "validation_function": validation.is_integer,
-        "value": 65535,
-        "type": Attribute.INTEGER,
-        "help": "Maximum length of captured packets (cf. pcap snaplen)"
-    }),
-    "NetTraversalTime": dict({
-        "name": "NetTraversalTime",
-        "validation_function": validation.is_time,
-        "value": "2799999999ns",
-        "type": Attribute.STRING,
-        "help": "Estimate of the average net traversal time = 2 * NodeTraversalTime * NetDiameter"
-    }),
-    "Lifetime": dict({
-        "name": "Lifetime",
-        "validation_function": validation.is_time,
-        "value": "120000000000ns",
-        "type": Attribute.STRING,
-        "help": "The lifetime of the routing enrty"
-    }),
-    "DeletePeriod": dict({
-        "name": "DeletePeriod",
-        "validation_function": validation.is_time,
-        "value": "15000000000ns",
-        "type": Attribute.STRING,
-        "help": "DeletePeriod is intended to provide an upper bound on the time for which an upstream node A can have a neighbor B as an active next hop for destination D, while B has invalidated the route to D. = 5 * max (HelloInterval, ActiveRouteTimeout)"
-    }),
-    "MaxPerHopDelay": dict({
-        "name": "MaxPerHopDelay",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "The maximum per-hop delay that should be considered.  Packets still not received after this delay are to be considered lost."
-    }),
-    "NumberOfOscillatorsPerRay": dict({
-        "name": "NumberOfOscillatorsPerRay",
-        "validation_function": validation.is_integer,
-        "value": 4,
-        "type": Attribute.INTEGER,
-        "help": "The number of oscillators to use by default for compute the coeficent for a given ray of a given path (default is 4)"
-    }),
-    "MinRetryRate": dict({
-        "name": "MinRetryRate",
-        "validation_function": validation.is_double,
-        "value": 0.01,
-        "type": Attribute.DOUBLE,
-        "help": "Smallest allowed RTS retry rate"
-    }),
-    "Pause": dict({
-        "name": "Pause",
-        "validation_function": validation.is_string,
-        "value": "Constant:2",
-        "type": Attribute.STRING,
-        "help": "A random variable used to pick the pause of a random waypoint model."
-    }),
-    "Exponent": dict({
-        "name": "Exponent",
-        "validation_function": validation.is_double,
-        "value": 3.0,
-        "type": Attribute.DOUBLE,
-        "help": "The exponent of the Path Loss propagation model"
-    }),
-    "MidInterval": dict({
-        "name": "MidInterval",
-        "validation_function": validation.is_time,
-        "value": "5000000000ns",
-        "type": Attribute.STRING,
-        "help": "MID messages emission interval.  Normally it is equal to TcInterval."
-    }),
-    "pmtlFor9mbps": dict({
-        "name": "pmtlFor9mbps",
-        "validation_function": validation.is_double,
-        "value": 0.39319999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 9 Mbs data mode"
-    }),
-    "Dot11MeshHWMPnetDiameterTraversalTime": dict({
-        "name": "Dot11MeshHWMPnetDiameterTraversalTime",
-        "validation_function": validation.is_time,
-        "value": "102400000ns",
-        "type": Attribute.STRING,
-        "help": "Time we suppose the packet to go from one edge of the network to another"
-    }),
-    "TxPowerStart": dict({
-        "name": "TxPowerStart",
-        "validation_function": validation.is_double,
-        "value": 16.020600000000002,
-        "type": Attribute.DOUBLE,
-        "help": "Minimum available transmission level (dbm)."
-    }),
-    "ewndFor9mbps": dict({
-        "name": "ewndFor9mbps",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 9 Mbs data mode"
-    }),
-    "IntervalT12": dict({
-        "name": "IntervalT12",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "Wait for UCD descriptor.Maximum is 5*MaxUcdInterval"
-    }),
-    "NormalPitch": dict({
-        "name": "NormalPitch",
-        "validation_function": validation.is_string,
-        "value": "Normal:0:1:10",
-        "type": Attribute.STRING,
-        "help": "A gaussian random variable used to calculate the next pitch value."
-    }),
-    "PacketWindowSize": dict({
-        "name": "PacketWindowSize",
-        "validation_function": validation.is_integer,
-        "value": 32,
-        "type": Attribute.INTEGER,
-        "help": "The size of the window used to compute the packet loss. This value should be a multiple of 8."
-    }),
-    "Start": dict({
-        "name": "Start",
-        "validation_function": validation.is_time,
-        "value": "0ns",
-        "type": Attribute.STRING,
-        "help": "The simulation time at which to spin up the device thread."
-    }),
-    "MaxDcdInterval": dict({
-        "name": "MaxDcdInterval",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "Maximum time between transmission of DCD messages. Maximum is 10s"
-    }),
-    "ChannelNumber": dict({
-        "name": "ChannelNumber",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "Channel center frequency = Channel starting frequency + 5 MHz * (nch - 1)"
-    }),
-    "MaxPacketFailure": dict({
-        "name": "MaxPacketFailure",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of failed packets before link will be closed"
-    }),
-    "AddCreditThreshold": dict({
-        "name": "AddCreditThreshold",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Add credit threshold"
-    }),
-    "Basic": dict({
-        "name": "Basic",
-        "validation_function": validation.is_bool,
-        "value": False,
-        "type": Attribute.BOOL,
-        "help": "If true the RRAA-BASIC algorithm will be used, otherwise the RRAA wil be used"
-    }),
-    "UcdInterval": dict({
-        "name": "UcdInterval",
-        "validation_function": validation.is_time,
-        "value": "3000000000ns",
-        "type": Attribute.STRING,
-        "help": "Time between transmission of UCD messages. Maximum value is 10s."
-    }),
-    "DestinationOnly": dict({
-        "name": "DestinationOnly",
-        "validation_function": validation.is_bool,
-        "value": False,
-        "type": Attribute.BOOL,
-        "help": "Indicates only the destination may respond to this RREQ."
-    }),
-    "Local": dict({
-        "name": "Local",
-        "validation_function": ns3_validation.is_address,
-        "value": None,
-        "type": Attribute.STRING,
-        "help": "The Address on which to Bind the rx socket."
-    }),
-    "NumberOfNodes": dict({
-        "name": "NumberOfNodes",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Number of non-gateway nodes in this gateway's neighborhood"
-    }),
-    "MaxPause": dict({
-        "name": "MaxPause",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "Maximum pause value, [s]"
-    }),
-    "MaxBeaconLoss": dict({
-        "name": "MaxBeaconLoss",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of lost beacons before link will be closed"
-    }),
-    "MaxY": dict({
-        "name": "MaxY",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Maximum Y value of traveling region, [m]"
-    }),
-    "MaxReservations": dict({
-        "name": "MaxReservations",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of reservations to accept per cycle"
-    }),
-    "OnTime": dict({
-        "name": "OnTime",
-        "validation_function": validation.is_string,
-        "value": "Constant:1",
-        "type": Attribute.STRING,
-        "help": "A RandomVariable used to pick the duration of the 'On' state."
-    }),
-    "RxGainPhy1": dict({
-        "name": "RxGainPhy1",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "Gain added to incoming signal at receiver of Phy1"
-    }),
-    "Gateway": dict({
-        "name": "Gateway",
-        "validation_function": validation.is_ip4_address,
-        "value": None,
-        "type": Attribute.STRING,
-        "help": "The IP address of the default gateway to assign to the host machine, when in ConfigureLocal mode."
-    }),
-    "GridWidth": dict({
-        "name": "GridWidth",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "The number of objects layed out on a line."
-    }),
-    "NormalVelocity": dict({
-        "name": "NormalVelocity",
-        "validation_function": validation.is_string,
-        "value": "Normal:0:1:10",
-        "type": Attribute.STRING,
-        "help": "A gaussian random variable used to calculate the next velocity value."
-    }),
-    "ReferenceDistance": dict({
-        "name": "ReferenceDistance",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "The distance at which the reference loss is calculated (m)"
-    }),
-    "m1": dict({
-        "name": "m1",
-        "validation_function": validation.is_double,
-        "value": 0.75,
-        "type": Attribute.DOUBLE,
-        "help": "m1 for distances smaller than Distance2. Default is 0.75."
-    }),
-    "m0": dict({
-        "name": "m0",
-        "validation_function": validation.is_double,
-        "value": 1.5,
-        "type": Attribute.DOUBLE,
-        "help": "m0 for distances smaller than Distance1. Default is 1.5."
-    }),
-    "BroadcastInterval": dict({
-        "name": "BroadcastInterval",
-        "validation_function": validation.is_time,
-        "value": "5000000000ns",
-        "type": Attribute.STRING,
-        "help": "How often we must send broadcast packets"
-    }),
-    "Variable": dict({
-        "name": "Variable",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:1",
-        "type": Attribute.STRING,
-        "help": "The random variable which generates random delays (s)."
-    }),
-    "MacAddress": dict({
-        "name": "MacAddress",
-        "validation_function": validation.is_string,
-        "value": "ff:ff:ff:ff:ff:ff",
-        "type": Attribute.STRING,
-        "help": "The MAC address to assign to the tap device, when in ConfigureLocal mode. This address will override the discovered MAC address of the simulated device."
-    }),
-    "MaxBeaconShiftValue": dict({
-        "name": "MaxBeaconShiftValue",
-        "validation_function": validation.is_integer,
-        "value": 15,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of TUs for beacon shifting"
-    }),
-    "MeanDirection": dict({
-        "name": "MeanDirection",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:6.28319",
-        "type": Attribute.STRING,
-        "help": "A random variable used to assign the average direction."
-    }),
-    "NextHopWait": dict({
-        "name": "NextHopWait",
-        "validation_function": validation.is_time,
-        "value": "50000000ns",
-        "type": Attribute.STRING,
-        "help": "Period of our waiting for the neighbour's RREP_ACK = 10 ms + NodeTraversalTime"
-    }),
-    "EnableBeaconCollisionAvoidance": dict({
-        "name": "EnableBeaconCollisionAvoidance",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Enable/Disable Beacon collision avoidance."
-    }),
-    "TimeoutBuffer": dict({
-        "name": "TimeoutBuffer",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "Its purpose is to provide a buffer for the timeout so that if the RREP is delayed due to congestion, a timeout is less likely to occur while the RREP is still en route back to the source."
-    }),
-    "PeriodicEnergyUpdateInterval": dict({
-        "name": "PeriodicEnergyUpdateInterval",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "Time between two consecutive periodic energy updates."
-    }),
-    "RxCurrentA": dict({
-        "name": "RxCurrentA",
-        "validation_function": validation.is_double,
-        "value": 0.019699999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "The radio Rx current in Ampere."
-    }),
-    "LocalIpv6": dict({
-        "name": "LocalIpv6",
-        "validation_function": validation.is_string,
-        "value": "0000:0000:0000:0000:0000:0000:0000:0000",
-        "type": Attribute.STRING,
-        "help": "Local Ipv6Address of the sender"
-    }),
-    "Remote": dict({
-        "name": "Remote",
-        "validation_function": ns3_validation.is_address,
-        "value": None,
-        "type": Attribute.STRING,
-        "help": "The address of the destination"
-    }),
-    "SSAntennaHeight": dict({
-        "name": "SSAntennaHeight",
-        "validation_function": validation.is_double,
-        "value": 3.0,
-        "type": Attribute.DOUBLE,
-        "help": " SS Antenna Height (default is 3m)."
-    }),
-    "MeanVelocity": dict({
-        "name": "MeanVelocity",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:1",
-        "type": Attribute.STRING,
-        "help": "A random variable used to assign the average velocity."
-    }),
-    "NumberOfRates": dict({
-        "name": "NumberOfRates",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "type": Attribute.INTEGER,
-        "help": "Number of rate divisions supported by each PHY"
-    }),
-    "BSAntennaHeight": dict({
-        "name": "BSAntennaHeight",
-        "validation_function": validation.is_double,
-        "value": 50.0,
-        "type": Attribute.DOUBLE,
-        "help": " BS Antenna Height (default is 50m)."
-    }),
-    "Interval": dict({
-        "name": "Interval",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "The time to wait between packets"
-    }),
-    "CcaMode1Threshold": dict({
-        "name": "CcaMode1Threshold",
-        "validation_function": validation.is_double,
-        "value": -99.0,
-        "type": Attribute.DOUBLE,
-        "help": "The energy of a received signal should be higher than this threshold (dbm) to allow the PHY layer to declare CCA BUSY state"
-    }),
-    "Mtu": dict({
-        "name": "Mtu",
-        "validation_function": validation.is_integer,
-        "value": 1500,
-        "type": Attribute.INTEGER,
-        "help": "The MAC-level Maximum Transmission Unit"
-    }),
-    "pmtlFor12mbps": dict({
-        "name": "pmtlFor12mbps",
-        "validation_function": validation.is_double,
-        "value": 0.2868,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 12 Mbs data mode"
-    }),
-    "MaxRtsWnd": dict({
-        "name": "MaxRtsWnd",
-        "validation_function": validation.is_integer,
-        "value": 40,
-        "type": Attribute.INTEGER,
-        "help": "Maximum value for Rts window of Aarf-CD"
-    }),
-    "HoldingTimeout": dict({
-        "name": "HoldingTimeout",
-        "validation_function": validation.is_time,
-        "value": "40960000ns",
-        "type": Attribute.STRING,
-        "help": "Holding timeout"
-    }),
-    "AssocRequestTimeout": dict({
-        "name": "AssocRequestTimeout",
-        "validation_function": validation.is_time,
-        "value": "500000000ns",
-        "type": Attribute.STRING,
-        "help": "The interval between two consecutive assoc request attempts."
-    }),
-    "Timeout": dict({
-        "name": "Timeout",
-        "validation_function": validation.is_time,
-        "value": "50000000ns",
-        "type": Attribute.STRING,
-        "help": "Timeout for the RRAA BASIC loss estimaton block (s)"
-    }),
-    "Dot11MeshHWMPmaxPREQretries": dict({
-        "name": "Dot11MeshHWMPmaxPREQretries",
-        "validation_function": validation.is_integer,
-        "value": 3,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of retries before we suppose the destination to be unreachable"
-    }),
-    "Z": dict({
-        "name": "Z",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:1",
-        "type": Attribute.STRING,
-        "help": "A random variable which represents the z coordinate of a position in a random box."
-    }),
-    "CW": dict({
-        "name": "CW",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "The MAC parameter CW"
-    }),
-    "MaxPacketNumber": dict({
-        "name": "MaxPacketNumber",
-        "validation_function": validation.is_integer,
-        "value": 400,
-        "type": Attribute.INTEGER,
-        "help": "If a packet arrives when there are already this number of packets, it is dropped."
-    }),
-    "RemoteIpv6": dict({
-        "name": "RemoteIpv6",
-        "validation_function": validation.is_string,
-        "value": "0000:0000:0000:0000:0000:0000:0000:0000",
-        "type": Attribute.STRING,
-        "help": "The Ipv6Address of the outbound packets"
-    }),
-    "RttEstimatorFactory": dict({
-        "name": "RttEstimatorFactory",
-        "validation_function": validation.is_string,
-        "value": "ns3::RttMeanDeviation[]",
-        "type": Attribute.STRING,
-        "help": "How RttEstimator objects are created."
-    }),
-    "TxPower": dict({
-        "name": "TxPower",
-        "validation_function": validation.is_double,
-        "value": 190.0,
-        "type": Attribute.DOUBLE,
-        "help": "Transmission output power in dB"
-    }),
-    "pmtlFor36mbps": dict({
-        "name": "pmtlFor36mbps",
-        "validation_function": validation.is_double,
-        "value": 0.33629999999999999,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 36 Mbs data mode"
-    }),
-    "MinRtsWnd": dict({
-        "name": "MinRtsWnd",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "Minimum value for Rts window of Aarf-CD"
-    }),
-    "Frequency": dict({
-        "name": "Frequency",
-        "validation_function": validation.is_double,
-        "value": 2300000000.0,
-        "type": Attribute.DOUBLE,
-        "help": "The Frequency  (default is 2.3 GHz)."
-    }),
-    "Willingness": dict({
-        "name": "Willingness",
-        "validation_function": validation.is_enum,
-        "value": "default",
-        "allowed": ["never",
-     "low",
-     "default",
-     "high",
-     "always"],
-        "type": Attribute.ENUM,
-        "help": "Willingness of a node to carry and forward traffic for other nodes."
-    }),
-    "DoFlag": dict({
-        "name": "DoFlag",
-        "validation_function": validation.is_bool,
-        "value": False,
-        "type": Attribute.BOOL,
-        "help": "Destination only HWMP flag"
-    }),
-    "BlockAckThreshold": dict({
-        "name": "BlockAckThreshold",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "type": Attribute.INTEGER,
-        "help": "If number of packets in this queue reaches this value, block ack mechanism is used. If this value is 0, block ack is never used."
-    }),
-    "TimerK": dict({
-        "name": "TimerK",
-        "validation_function": validation.is_double,
-        "value": 2.0,
-        "type": Attribute.DOUBLE,
-        "help": "Multiplication factor for the timer threshold in the AARF algorithm."
-    }),
-    "Period": dict({
-        "name": "Period",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "the period (=1/frequency)"
-    }),
-    "Library": dict({
-        "name": "Library",
-        "validation_function": validation.is_string,
-        "value": "liblinux2.6.26.so",
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-        "type": Attribute.STRING,
-        "help": "Set the linux library to be used to create the stack"
-    }),
-    "DcdInterval": dict({
-        "name": "DcdInterval",
-        "validation_function": validation.is_time,
-        "value": "3000000000ns",
-        "type": Attribute.STRING,
-        "help": "Time between transmission of DCD messages. Maximum value is 10s."
-    }),
-    "SpreadCoef": dict({
-        "name": "SpreadCoef",
-        "validation_function": validation.is_double,
-        "value": 1.5,
-        "type": Attribute.DOUBLE,
-        "help": "Spreading coefficient used in calculation of Thorp's approximation"
-    }),
-    "ewndFor36mbps": dict({
-        "name": "ewndFor36mbps",
-        "validation_function": validation.is_integer,
-        "value": 40,
-        "type": Attribute.INTEGER,
-        "help": "ewnd parameter for 36 Mbs data mode"
-    }),
-    "MaxTtl": dict({
-        "name": "MaxTtl",
-        "validation_function": validation.is_integer,
-        "value": 32,
-        "type": Attribute.INTEGER,
-        "help": "Initial value of Time To Live field"
-    }),
-    "MinDistance": dict({
-        "name": "MinDistance",
-        "validation_function": validation.is_double,
-        "value": 0.5,
-        "type": Attribute.DOUBLE,
-        "help": "The distance under which the propagation model refuses to give results (m)"
-    }),
-    "RxNoiseFigure": dict({
-        "name": "RxNoiseFigure",
-        "validation_function": validation.is_double,
-        "value": 7.0,
-        "type": Attribute.DOUBLE,
-        "help": "Loss (dB) in the Signal-to-Noise-Ratio due to non-idealities in the receiver. According to Wikipedia (http://en.wikipedia.org/wiki/Noise_figure), this is 'the difference in decibels (dB) between the noise output of the actual receiver to the noise output of an  ideal receiver with the same overall gain and bandwidth when the receivers  are connected to sources at the standard noise temperature T0 (usually 290 K)'."
-    }),
-    "DopplerFreq": dict({
-        "name": "DopplerFreq",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The doppler frequency in Hz (f_d = v / lambda = v * f / c), the default is 0)"
-    }),
-    "RetryTimeout": dict({
-        "name": "RetryTimeout",
-        "validation_function": validation.is_time,
-        "value": "40960000ns",
-        "type": Attribute.STRING,
-        "help": "Retry timeout"
-    }),
-    "ControlMode": dict({
-        "name": "ControlMode",
-        "validation_function": validation.is_string,
-        "value": "OfdmRate6Mbps",
-        "type": Attribute.STRING,
-        "help": "The transmission mode to use for every control packet transmission."
-    }),
-    "Size": dict({
-        "name": "Size",
-        "validation_function": validation.is_integer,
-        "value": 56,
-        "type": Attribute.INTEGER,
-        "help": "The number of data bytes to be sent, real packet will be 8 (ICMP) + 20 (IP) bytes longer."
-    }),
-    "ErrorRate": dict({
-        "name": "ErrorRate",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The error rate."
-    }),
-    "PacketLength": dict({
-        "name": "PacketLength",
-        "validation_function": validation.is_double,
-        "value": 1200.0,
-        "type": Attribute.DOUBLE,
-        "help": "The packet length used for calculating mode TxTime"
-    }),
-    "MaxCost": dict({
-        "name": "MaxCost",
-        "validation_function": validation.is_integer,
-        "value": 32,
-        "type": Attribute.INTEGER,
-        "help": "Cost threshold after which packet will be dropped"
-    }),
-    "SegmentSize": dict({
-        "name": "SegmentSize",
-        "validation_function": validation.is_double,
-        "value": 6000.0,
-        "type": Attribute.DOUBLE,
-        "help": "The largest allowable segment size packet"
-    }),
-    "poriFor18mbps": dict({
-        "name": "poriFor18mbps",
-        "validation_function": validation.is_double,
-        "value": 0.13250000000000001,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 18 Mbs data mode"
-    }),
-    "UnicastPerrThreshold": dict({
-        "name": "UnicastPerrThreshold",
-        "validation_function": validation.is_integer,
-        "value": 32,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of PERR receivers, when we send a PERR as a chain of unicasts"
-    }),
-    "EnableHello": dict({
-        "name": "EnableHello",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Indicates whether a hello messages enable."
-    }),
-    "BeaconGeneration": dict({
-        "name": "BeaconGeneration",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Whether or not beacons are generated."
-    }),
-    "MaxUcdInterval": dict({
-        "name": "MaxUcdInterval",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "Maximum time between transmission of UCD messages. Maximum is 10s"
-    }),
-    "Dot11MeshHWMPperrMinInterval": dict({
-        "name": "Dot11MeshHWMPperrMinInterval",
-        "validation_function": validation.is_time,
-        "value": "102400000ns",
-        "type": Attribute.STRING,
-        "help": "Minimal interval between to successive PREQs"
-    }),
-    "Delay": dict({
-        "name": "Delay",
-        "validation_function": validation.is_time,
-        "value": "0ns",
-        "type": Attribute.STRING,
-        "help": "Transmission delay through the channel"
-    }),
-    "SIFS": dict({
-        "name": "SIFS",
-        "validation_function": validation.is_time,
-        "value": "200000000ns",
-        "type": Attribute.STRING,
-        "help": "Spacing to give between frames (this should match gateway)"
-    }),
-    "MaxRange": dict({
-        "name": "MaxRange",
-        "validation_function": validation.is_double,
-        "value": 250.0,
-        "type": Attribute.DOUBLE,
-        "help": "Maximum Transmission Range (meters)"
-    }),
-    "LostDlMapInterval": dict({
-        "name": "LostDlMapInterval",
-        "validation_function": validation.is_time,
-        "value": "500000000ns",
-        "type": Attribute.STRING,
-        "help": "Time since last received DL-MAP message before downlink synchronization is considered lost. Maximum is 600ms"
-    }),
-    "IntervalT2": dict({
-        "name": "IntervalT2",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "Wait for broadcast ranging timeout, i.e., wait for initial ranging opportunity. Maximum is 5*Ranging interval"
-    }),
-    "TurnOffRtsAfterRateDecrease": dict({
-        "name": "TurnOffRtsAfterRateDecrease",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "If true the RTS mechanism will be turned off when the rate will be decreased"
-    }),
-    "MaxContentionRangingRetries": dict({
-        "name": "MaxContentionRangingRetries",
-        "validation_function": validation.is_integer,
-        "value": 16,
-        "type": Attribute.INTEGER,
-        "help": "Number of retries on contention Ranging Requests"
-    }),
-    "DAD": dict({
-        "name": "DAD",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Always do DAD check."
-    }),
-    "RemotePort": dict({
-        "name": "RemotePort",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "type": Attribute.INTEGER,
-        "help": "The destination port of the outbound packets"
-    }),
-    "Distance0": dict({
-        "name": "Distance0",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Beginning of the first (near) distance field"
-    }),
-    "FlowInterruptionsMinTime": dict({
-        "name": "FlowInterruptionsMinTime",
-        "validation_function": validation.is_time,
-        "value": "500000000ns",
-        "type": Attribute.STRING,
-        "help": "The minimum inter-arrival time that is considered a flow interruption."
-    }),
-    "PacketSize": dict({
-        "name": "PacketSize",
-        "validation_function": validation.is_integer,
-        "value": 512,
-        "type": Attribute.INTEGER,
-        "help": "The size of packets sent in on state"
-    }),
-    "LookAroundRate": dict({
-        "name": "LookAroundRate",
-        "validation_function": validation.is_double,
-        "value": 10.0,
-        "type": Attribute.DOUBLE,
-        "help": "the percentage to try other rates"
-    }),
-    "NumberOfHops": dict({
-        "name": "NumberOfHops",
-        "validation_function": validation.is_integer,
-        "value": 13,
-        "type": Attribute.INTEGER,
-        "help": "Number of frequencies in hopping pattern"
-    }),
-    "Dot11MeshHWMPpathToRootInterval": dict({
-        "name": "Dot11MeshHWMPpathToRootInterval",
-        "validation_function": validation.is_time,
-        "value": "2048000000ns",
-        "type": Attribute.STRING,
-        "help": "Interval between two successive proactive PREQs"
-    }),
-    "ProbeRequestTimeout": dict({
-        "name": "ProbeRequestTimeout",
-        "validation_function": validation.is_time,
-        "value": "50000000ns",
-        "type": Attribute.STRING,
-        "help": "The interval between two consecutive probe request attempts."
-    }),
-    "RreqRateLimit": dict({
-        "name": "RreqRateLimit",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of RREQ per second."
-    }),
-    "RangReqOppSize": dict({
-        "name": "RangReqOppSize",
-        "validation_function": validation.is_integer,
-        "value": 8,
-        "type": Attribute.INTEGER,
-        "help": "The ranging opportunity size in symbols"
-    }),
-    "BwReqOppSize": dict({
-        "name": "BwReqOppSize",
-        "validation_function": validation.is_integer,
-        "value": 2,
-        "type": Attribute.INTEGER,
-        "help": "The bandwidth request opportunity size in symbols"
-    }),
-    "Rho": dict({
-        "name": "Rho",
-        "validation_function": validation.is_string,
-        "value": "Uniform:0:200",
-        "type": Attribute.STRING,
-        "help": "A random variable which represents the radius of a position in a random disc."
-    }),
-    "Address": dict({
-        "name": "Address",
-        "validation_function": validation.is_string,
-        "value": "ff:ff:ff:ff:ff:ff",
-        "type": Attribute.STRING,
-        "help": "The MAC address of this device."
-    }),
-    "RetryStep": dict({
-        "name": "RetryStep",
-        "validation_function": validation.is_double,
-        "value": 0.01,
-        "type": Attribute.DOUBLE,
-        "help": "Retry rate increment"
-    }),
-    "m2": dict({
-        "name": "m2",
-        "validation_function": validation.is_double,
-        "value": 0.75,
-        "type": Attribute.DOUBLE,
-        "help": "m2 for distances greater than Distance2. Default is 0.75."
-    }),
-    "Distance": dict({
-        "name": "Distance",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "Change current direction and speed after moving for this distance."
-    }),
-    "InterframeGap": dict({
-        "name": "InterframeGap",
-        "validation_function": validation.is_time,
-        "value": "0ns",
-        "type": Attribute.STRING,
-        "help": "The time to wait between packet (frame) transmissions"
-    }),
-    "EnableBroadcast": dict({
-        "name": "EnableBroadcast",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Indicates whether a broadcast data packets forwarding enable."
-    }),
-    "HelloInterval": dict({
-        "name": "HelloInterval",
-        "validation_function": validation.is_time,
-        "value": "2000000000ns",
-        "type": Attribute.STRING,
-        "help": "HELLO messages emission interval."
-    }),
-    "RemoteAddress": dict({
-        "name": "RemoteAddress",
-        "validation_function": validation.is_ip4_address,
-        "value": None,
-        "type": Attribute.STRING,
-        "help": "The destination Ipv4Address of the outbound packets"
-    }),
-    "Rss": dict({
-        "name": "Rss",
-        "validation_function": validation.is_double,
-        "value": -150.0,
-        "type": Attribute.DOUBLE,
-        "help": "The fixed receiver Rss."
-    }),
-    "EWMA": dict({
-        "name": "EWMA",
-        "validation_function": validation.is_double,
-        "value": 75.0,
-        "type": Attribute.DOUBLE,
-        "help": "EWMA level"
-    }),
-    "FailureRatio": dict({
-        "name": "FailureRatio",
-        "validation_function": validation.is_double,
-        "value": 0.33333299999999999,
-        "type": Attribute.DOUBLE,
-        "help": "Ratio of minimum erroneous transmissions needed to switch to a lower rate"
-    }),
-    "Bounds": dict({
-        "name": "Bounds",
-        "validation_function": validation.is_string,
-        "value": "-100|100|-100|100|0|100",
-        "type": Attribute.STRING,
-        "help": "Bounds of the area to cruise."
-    }),
-    "pmtlFor18mbps": dict({
-        "name": "pmtlFor18mbps",
-        "validation_function": validation.is_double,
-        "value": 0.37219999999999998,
-        "type": Attribute.DOUBLE,
-        "help": "Pmtl parameter for 18 Mbs data mode"
-    }),
-    "MinX": dict({
-        "name": "MinX",
-        "validation_function": validation.is_double,
-        "value": 1.0,
-        "type": Attribute.DOUBLE,
-        "help": "The x coordinate where the grid starts."
-    }),
-    "TotalRate": dict({
-        "name": "TotalRate",
-        "validation_function": validation.is_integer,
-        "value": 4096,
-        "type": Attribute.INTEGER,
-        "help": "Total available channel rate in bps (for a single channel, without splitting reservation channel)"
-    }),
-    "Exponent2": dict({
-        "name": "Exponent2",
-        "validation_function": validation.is_double,
-        "value": 3.7999999999999998,
-        "type": Attribute.DOUBLE,
-        "help": "The exponent for the third field."
-    }),
-    "MaxDelay": dict({
-        "name": "MaxDelay",
-        "validation_function": validation.is_time,
-        "value": "10000000000ns",
-        "type": Attribute.STRING,
-        "help": "If a packet stays longer than this delay in the queue, it is dropped."
-    }),
-    "MaxQueueSize": dict({
-        "name": "MaxQueueSize",
-        "validation_function": validation.is_integer,
-        "value": 255,
-        "type": Attribute.INTEGER,
-        "help": "Maximum number of packets we can store when resolving route"
-    }),
-    "Mode": dict({
-        "name": "Mode",
-        "validation_function": validation.is_enum,
-        "value": "Distance",
-        "allowed": ["Distance",
-     "Time"],
-        "type": Attribute.ENUM,
-        "help": "The mode indicates the condition used to change the current speed and direction"
-    }),
-    "rho": dict({
-        "name": "rho",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The radius of the disc"
-    }),
-    "ProbeThreshold": dict({
-        "name": "ProbeThreshold",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "type": Attribute.INTEGER,
-        "help": "The number of consecutive transmissions failure to activate the RTS probe."
-    }),
-    "Y": dict({
-        "name": "Y",
-        "validation_function": validation.is_double,
-        "value": 0.0,
-        "type": Attribute.DOUBLE,
-        "help": "The y coordinate of the center of the  disc."
-    }),
-    "poriFor6mbps": dict({
-        "name": "poriFor6mbps",
-        "validation_function": validation.is_double,
-        "value": 0.5,
-        "type": Attribute.DOUBLE,
-        "help": "Pori parameter for 6 Mbs data mode"
-    }),
-    "Root": dict({
-        "name": "Root",
-        "validation_function": validation.is_string,
-        "value": "ff:ff:ff:ff:ff:ff",
-        "type": Attribute.STRING,
-        "help": "The MAC address of root mesh point."
-    }),
-    "RxQueueSize": dict({
-        "name": "RxQueueSize",
-        "validation_function": validation.is_integer,
-        "value": 1000,
-        "type": Attribute.INTEGER,
-        "help": "Maximum size of the read queue.  This value limits number of packets that have been read from the network into a memory buffer but have not yet been processed by the simulator."
-    }),
-    "IntervalT8": dict({
-        "name": "IntervalT8",
-        "validation_function": validation.is_time,
-        "value": "50000000ns",
-        "type": Attribute.STRING,
-        "help": "Wait for DSA/DSC Acknowledge timeout. Maximum 300ms."
-    }),
-    "NetDiameter": dict({
-        "name": "NetDiameter",
-        "validation_function": validation.is_integer,
-        "value": 35,
-        "type": Attribute.INTEGER,
-        "help": "Net diameter measures the maximum possible number of hops between two nodes in the network"
-    }),
-    "Dot11sMeshHeaderLength": dict({
-        "name": "Dot11sMeshHeaderLength",
-        "validation_function": validation.is_integer,
-        "value": 6,
-        "type": Attribute.INTEGER,
-        "help": "Length of the mesh header"
-    }),
-    "JitterBinWidth": dict({
-        "name": "JitterBinWidth",
-        "validation_function": validation.is_double,
-        "value": 0.001,
-        "type": Attribute.DOUBLE,
-        "help": "The width used in the jitter histogram."
-    }),
-    "IntervalT7": dict({
-        "name": "IntervalT7",
-        "validation_function": validation.is_time,
-        "value": "100000000ns",
-        "type": Attribute.STRING,
-        "help": "wait for DSA/DSC/DSD Response timeout. Maximum is 1s"
-    }),
-    "Verbose": dict({
-        "name": "Verbose",
-        "validation_function": validation.is_bool,
-        "value": False,
-        "type": Attribute.BOOL,
-        "help": "Produce usual output."
-    }),
-    "IntervalT1": dict({
-        "name": "IntervalT1",
-        "validation_function": validation.is_time,
-        "value": "50000000000ns",
-        "type": Attribute.STRING,
-        "help": "Wait for DCD timeout. Maximum is 5*maxDcdInterval"
-    }),
-    "DefaultLoss": dict({
-        "name": "DefaultLoss",
-        "validation_function": validation.is_double,
-        "value": 1.7976900000000001e+308,
-        "type": Attribute.DOUBLE,
-        "help": "The default value for propagation loss, dB."
-    }),
-    "Loss": dict({
-        "name": "Loss",
-        "validation_function": validation.is_double,
-        "value": 1.7976900000000001e+308,
-        "flags": Attribute.NoDefaultValue,
-        "type": Attribute.DOUBLE,
-        "help": "The default value for propagation loss, dB."
-    }),
-   "Symmetric": dict({
-        "name": "Symmetric",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "flags": Attribute.NoDefaultValue,
-        "type": Attribute.BOOL,
-        "help": "Symmetry between in the propagation loss model between nodes."
-    }),
-    "IntervalT3": dict({
-        "name": "IntervalT3",
-        "validation_function": validation.is_time,
-        "value": "200000000ns",
-        "type": Attribute.STRING,
-        "help": "ranging Response reception timeout following the transmission of a ranging request. Maximum is 200ms"
-    }),
-    "MaxPackets": dict({
-        "name": "MaxPackets",
-        "validation_function": validation.is_integer,
-        "value": 100,
-        "type": Attribute.INTEGER,
-        "help": "The maximum number of packets accepted by this DropTailQueue."
-    }),
-    "EnableLearning": dict({
-        "name": "EnableLearning",
-        "validation_function": validation.is_bool,
-        "value": True,
-        "type": Attribute.BOOL,
-        "help": "Enable the learning mode of the Learning Bridge"
-    }),
-    "Rate": dict({
-        "name": "Rate",
-        "validation_function": validation.is_string,
-        "value": "1000000bps",
-        "type": Attribute.STRING,
-        "help": "The PHY rate used by this device"
-    }),
-    "RetryRate": dict({
-        "name": "RetryRate",
-        "validation_function": validation.is_double,
-        "value": 0.20000000000000001,
-        "type": Attribute.DOUBLE,
-        "help": "Number of retry attempts per second (of RTS/GWPING)"
-    }),
-    "Threshold": dict({
-        "name": "Threshold",
-        "validation_function": validation.is_double,
-        "value": 8.0,
-        "type": Attribute.DOUBLE,
-        "help": "SINR cutoff for good packet reception"
-    }),
-    "SuccessThreshold": dict({
-        "name": "SuccessThreshold",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "The minimum number of sucessfull transmissions to try a new rate."
-    }),
-    "Speed": dict({
-        "name": "Speed",
-        "validation_function": validation.is_double,
-        "value": 300000000.0,
-        "type": Attribute.DOUBLE,
-        "help": "The speed (m/s)"
-    }),
-    "RndSpeed": dict({
-        "name": "Speed",
-        "validation_function": validation.is_string,
-        "value": "Uniform:1:2",
-        "type": Attribute.STRING,
-        "help": "Random variable to control the speed (m/s)."
-    }),
-    "Port": dict({
-        "name": "Port",
-        "validation_function": validation.is_integer,
-        "value": 9,
-        "type": Attribute.INTEGER,
-        "help": "Port on which we listen for incoming packets."
-    }),
-    "NoisePowerSpectralDensity": dict({
-        "name": "NoisePowerSpectralDensity",
-        "validation_function": validation.is_double,
-        "value": 4.1400000000000002e-21,
-        "type": Attribute.DOUBLE,
-        "help": "the power spectral density of the measuring instrument noise, in Watt/Hz. Mostly useful to make spectrograms look more similar to those obtained by real devices. Defaults to the value for thermal noise at 300K."
-    }),
-    "RaiseThreshold": dict({
-        "name": "RaiseThreshold",
-        "validation_function": validation.is_integer,
-        "value": 10,
-        "type": Attribute.INTEGER,
-        "help": "Attempt to raise the rate if we hit that threshold"
-     }),
-    "ProtocolNumber": dict({
-        "name": "ProtocolNumber",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-        "type": Attribute.INTEGER,
-        "help": "The Ipv4 protocol number."
-     }),
-    "Position": dict({
-        "name": "Position",
-        "validation_function": validation.is_string,
-        "value": "0:0:0",
-        "type": Attribute.STRING,
-        "help": "The current position of the mobility model."
-     }),
-    "Velocity": dict({
-        "name": "Velocity",
-        "validation_function": validation.is_string,
-        "value": "0:0:0", 
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-        "type": Attribute.STRING,
-        "help": "The current velocity of the mobility model."
-     }),
-    "StartTime": dict({
-        "name": "StartTime",
-        "validation_function": validation.is_string,
-        "value": "0ns", 
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-        "type": Attribute.STRING,
-        "help": "Time at which the application will start"
-     }),
-     "StopTime": dict({
-        "name": "StopTime",
-        "validation_function": validation.is_string,
-        "value": "0ns", 
-        "type": Attribute.STRING,
-        "help": "Time at which the application will stop"
-     }),
-    "IsLowLatency": dict({
-        "name": "IsLowLatency",
-        "validation_function": validation.is_bool,
-        "value": True, 
-        "type": Attribute.BOOL,
-        "help": "If true, we attempt to modelize a so-called low-latency device: a device where decisions about tx parameters can be made on a per-packet basis and feedback about the transmission of each packet is obtained before sending the next. Otherwise, we modelize a  high-latency device, that is a device where we cannot update our decision about tx parameters after every packet transmission."
-     }),
-     "MaxSsrc": dict({
-        "name": "MaxSsrc",
-        "validation_function": validation.is_integer,
-        "value": 7,
-        "type": Attribute.INTEGER,
-        "help": "The maximum number of retransmission attempts for an RTS. This value will not have any effect on some rate control algorithms."
-    }),
-    "MaxSlrc": dict({
-        "name": "MaxSlrc",
-        "validation_function": validation.is_integer,
-        "value": 7,
-        "type": Attribute.INTEGER,
-        "help": "The maximum number of retransmission attempts for a DATA packet. This value will not have any effect on some rate control algorithms."
-    }),
-    "NonUnicastMode": dict({
-        "name": "NonUnicastMode",
-        "validation_function": validation.is_string,
-        "value": "Invalid-WifiMode",
-        "type": Attribute.STRING,
-        "help": "Wifi mode used for non-unicast transmissions."
-    }),
-    "RtsCtsThreshold": dict({
-        "name": "RtsCtsThreshold",
-        "validation_function": validation.is_integer,
-        "value": 2346,
-        "type": Attribute.INTEGER,
-        "help": "If  the size of the data packet + LLC header + MAC header + FCS trailer is bigger than this value, we use an RTS/CTS handshake before sending the data, as per IEEE Std. 802.11-2007, Section 9.2.6. This value will not have any effect on some rate control algorithms."
-    }),
-    "FragmentationThreshold": dict({
-        "name": "FragmentationThreshold",
-        "validation_function": validation.is_integer,
-        "value": 2346,
-        "type": Attribute.INTEGER,
-        "help": "If the size of the data packet + LLC header + MAC header + FCS trailer is biggerthan this value, we fragment it such that the size of the fragments are equal or smaller than this value, as per IEEE Std. 802.11-2007, Section 9.4. This value will not have any effect on some rate control algorithms."
-    }),
-    "Ssid": dict({
-        "name": "Ssid",
-        "validation_function": validation.is_string,
-        "value": "default",
-        "type": Attribute.STRING,
-        "help": "The ssid we want to belong to."
-    }),
-    "AckTimeout": dict({
-        "name": "AckTimeout",
-        "validation_function": validation.is_time,
-        "value": "75000ns",
-        "type": Attribute.STRING,
-        "help": "When this timeout expires, the DATA/ACK handshake has failed."
-    }),
-    "Sifs": dict({
-        "name": "Sifs",
-        "validation_function": validation.is_time,
-        "value": "16000ns",
-        "type": Attribute.STRING,
-        "help": "The value of the SIFS constant."
-    }),
-    "MinCw": dict({
-        "name": "MinCw",
-        "validation_function": validation.is_integer,
-        "value": 15,
-        "type": Attribute.INTEGER,
-        "help": "The minimum value of the contention window."
-    }),
-    "IsEnabled": dict({
-        "name": "IsEnabled",
-        "validation_function": validation.is_bool,
-        "value": True, 
-        "type": Attribute.BOOL,
-        "help": "Whether this ErrorModel is enabled or not."
-    }),
-    "CompressedBlockAckTimeout": dict({
-        "name": "CompressedBlockAckTimeout",
-        "validation_function": validation.is_time,
-        "value": "99000ns",
-        "type": Attribute.STRING,
-        "help": "When this timeout expires, the COMPRESSED_BLOCK_ACK_REQ/COMPRESSED_BLOCK_ACK handshake has failed."
-    }),
-    "MaxCw": dict({
-        "name": "MaxCw",
-        "validation_function": validation.is_integer,
-        "value": 1023, 
-        "type": Attribute.INTEGER,
-        "help": "The maximum value of the contention window."
-    }),
-    "RTG": dict({
-        "name": "RTG",
-        "validation_function": validation.is_integer,
-        "value": 0, 
-        "type": Attribute.INTEGER,
-        "help": "receive/transmit transition gap."
-    }),
-    "TTG": dict({
-        "name": "TTG",
-        "validation_function": validation.is_integer,
-        "value": 0, 
-        "type": Attribute.INTEGER,
-        "help": "transmit/receive transition gap."
-    }),
-    "MinRTO": dict({
-        "name": "MinRTO",
-        "validation_function": validation.is_time,
-        "value": "200000000ns",
-        "type": Attribute.STRING,
-        "help": "Minimum retransmit timeout value"
-    }),
-    "Pifs": dict({
-        "name": "Pifs",
-        "validation_function": validation.is_time,
-        "value": "25000ns",
-        "type": Attribute.STRING,
-        "help": "The value of the PIFS constant."
-    }),
-    "InitialEstimation": dict({
-        "name": "InitialEstimation",
-        "validation_function": validation.is_time,
-        "value": "1000000000ns",
-        "type": Attribute.STRING,
-        "help": "XXX"
-    }),
-    "BasicBlockAckTimeout": dict({
-        "name": "BasicBlockAckTimeout",
-        "validation_function": validation.is_time,
-        "value": "281000ns",
-        "type": Attribute.STRING,
-        "help": "When this timeout expires, the BASIC_BLOCK_ACK_REQ/BASIC_BLOCK_ACK handshake has failed."
-    }),
-    "MaxMultiplier": dict({
-        "name": "MaxMultiplier",
-        "validation_function": validation.is_double,
-        "value": 64.0,
-        "type": Attribute.DOUBLE,
-        "help": "XXX"
-    }),
-    "Aifsn": dict({
-        "name": "Aifsn",
-        "validation_function": validation.is_integer,
-        "value": 2, 
-        "type": Attribute.INTEGER,
-        "help": "The AIFSN: the default value conforms to simple DCA."
-    }),
-    "OptionNumber": dict({
-        "name": "OptionNumber",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "type": Attribute.INTEGER,
-        "help": "The IPv6 option number."
-    }),
-    "Slot": dict({
-        "name": "Slot",
-        "validation_function": validation.is_time,
-        "value": "9000ns",
-        "type": Attribute.STRING,
-        "help": "The duration of a Slot."
-    }),
-    "IpForward": dict({
-        "name": "IpForward",
-        "validation_function": validation.is_bool,
-        "value": True, 
-        "type": Attribute.BOOL,
-        "help": "Globally enable or disable IP forwarding for all current and future Ipv4 devices."
-    }),
-    "WeakEsModel": dict({
-        "name": "WeakEsModel",
-        "validation_function": validation.is_bool,
-        "value": True, 
-        "type": Attribute.BOOL,
-        "help": "RFC1122 term for whether host accepts datagram with a dest. address on another interface"
-    }),
-    "MaxPropagationDelay": dict({
-        "name": "MaxPropagationDelay",
-        "validation_function": validation.is_time,
-        "value": "3333ns",
-        "type": Attribute.STRING,
-        "help": "The maximum propagation delay. Unused for now."
-    }),
-    "ExtensionNumber": dict({
-        "name": "ExtensionNumber",
-        "validation_function": validation.is_integer,
-        "value": 0, 
-        "type": Attribute.INTEGER,
-        "help": "The IPv6 extension number."
-    }),
-    "EifsNoDifs": dict({
-        "name": "EifsNoDifs",
-        "validation_function": validation.is_time,
-        "value": "60000ns",
-        "type": Attribute.STRING,
-        "help": "The value of EIFS-DIFS"
-    }),
-    "CtsTimeout": dict({
-        "name": "CtsTimeout",
-        "validation_function": validation.is_time,
-        "value": "75000ns",
-        "type": Attribute.STRING,
-        "help": "When this timeout expires, the RTS/CTS handshake has failed."
-    }),
-    "Standard": dict({
-        "name": "Standard",
-        "validation_function": validation.is_string,
-        "value": "WIFI_PHY_STANDARD_80211a",
-        "flags": Attribute.ExecReadOnly | \
-                Attribute.ExecImmutable | \
-                Attribute.NoDefaultValue |
-                Attribute.Metadata,
-        "type": Attribute.ENUM,
-        "allowed": wifi_standards.keys(),
-        "help": "Wifi PHY standard"
-    }),
-    "ClassifierSrcAddress": dict({
-        "name": "SrcAddress",
-        "validation_function": validation.is_string, # TODO:! Address + Netref
-        "value": "",
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.STRING,
-        "help": "The source ip address for the IpcsClassifierRecord"
-    }),
-    "ClassifierSrcMask": dict({
-        "name": "SrcMask",
-        "validation_function": validation.is_string, # TODO:! NetworkMask
-        "value": "",
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.STRING,
-        "help": "The mask to apply on the source ip address for the IpcsClassifierRecord"
-    }),
-    "ClassifierDstAddress": dict({
-        "name": "DstAddress",
-        "validation_function": validation.is_string, # TODO:! Address + Netref
-        "value": "",
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.STRING,
-        "help": "The destination ip address for the IpcsClassifierRecord"
-    }),
-    "ClassifierDstMask": dict({
-        "name": "DstMask",
-        "validation_function": validation.is_string, # TODO:! NetworkMask
-        "value": "",
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.STRING,
-        "help": "The mask to apply on the destination ip address for the IpcsClassifierRecord"
-    }),
-    "ClassifierSrcPortLow": dict({
-        "name": "SrcPortLow",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.INTEGER,
-        "help": "The lower boundary of the source port range for the IpcsClassifierRecord"
-    }),
-    "ClassifierSrcPortHigh": dict({
-        "name": "SrcPortHigh",
-        "validation_function": validation.is_integer,
-        "value": 65000,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.INTEGER,
-        "help": "The higher boundary of the source port range for the IpcsClassifierRecord"
-    }),
-    "ClassifierDstPortLow": dict({
-        "name": "DstPortLow",
-        "validation_function": validation.is_integer,
-        "value": 0,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.INTEGER,
-        "help": "The lower boundary of the destination port range for the IpcsClassifierRecord"
-    }),
-    "ClassifierDstPortHigh": dict({
-        "name": "DstPortHigh",
-        "validation_function": validation.is_integer,
-        "value": 65000,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.INTEGER,
-        "help": "The higher boundary of the destination port range for the IpcsClassifierRecord"
-    }),
-    "ClassifierProtocol": dict({
-        "name": "Protocol",
-        "validation_function": validation.is_string,
-        "value": "UdpL4Protocol",
-        "allowed": l4_protocols.keys(),
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.ENUM,
-        "help": "The L4 protocol for the IpcsClassifierRecord"
-    }),
-    "ClassifierPriority": dict({
-        "name": "Priority",
-        "validation_function": validation.is_integer,
-        "value": 1,
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.INTEGER,
-        "help": "The priority of the IpcsClassifierRecord"
-    }),
-    "ServiceFlowDirection": dict({
-        "name": "Direction",
-        "validation_function": validation.is_string,
-        "value": "SF_DIRECTION_UP",
-        "allowed": service_flow_direction.keys(),
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.ENUM,
-        "help": "Service flow direction as described by the IEEE-802.16 standard"
-    }),
-    "ServiceFlowSchedulingType": dict({
-        "name": "SchedulingType",
-        "validation_function": validation.is_string,
-        "value": "SF_TYPE_RTPS",
-        "allowed": service_flow_scheduling_type.keys(),
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.ENUM,
-        "help": "Service flow scheduling type",
-    }),
-   "WaypointList": dict({
-        "name": "WaypointList",
-        "validation_function": validation.is_string, # TODO: SPECIAL VALIDATION FUNC
-        "value": "",
-        "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.Metadata,
-        "type": Attribute.STRING,
-        "help": "Comma separated list of waypoints in format t:x:y:z. Ex: 0s:0:0:0, 1s:1:0:0"
-    }),
-    "tun_cipher_fdnd" : dict({
-        "name" : "tun_cipher", 
-        "help" : "Tunnel cryptography not supported",
-        "type" : Attribute.ENUM,
-        "value" : "PLAIN",
-        "allowed" : [
-            "PLAIN",
-        ],
-        "flags" : Attribute.ExecImmutable | Attribute.Metadata,
-        "validation_function" : validation.is_enum,
-        }),
-    "Up" : dict({
-        "name" : "Up", 
-        "help" : "Flag to enable or disable interface",
-        "type" : Attribute.BOOL,
-        "value" : True,
-        "validation_function" : validation.is_bool,
-        }),
-    "QosSupported" : dict({
-        "name" : "QosSupported", 
-        "help" : "Flag to enable or disable QoS support at wifi MAC layer",
-        "type" : Attribute.BOOL,
-        "value" : False,
-        "validation_function" : validation.is_bool,
-        }),
-})
diff --git a/src/nepi/testbeds/ns3/connection_metadata.py b/src/nepi/testbeds/ns3/connection_metadata.py
deleted file mode 100644 (file)
index 865e4f8..0000000
+++ /dev/null
@@ -1,870 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID
-import functools
-from nepi.util.constants import CONNECTION_DELAY
-from nepi.util.tunchannel_impl import \
-    crossconnect_tunchannel_peer_init, \
-    crossconnect_tunchannel_peer_compl
-
-### Connection functions ####
-
-def connect_dummy(testbed_instance, guid1, guid2):
-    pass
-
-def connect_node_device(testbed_instance, node_guid, device_guid):
-    node = testbed_instance._elements[node_guid]
-    device = testbed_instance._elements[device_guid]
-    node.AddDevice(device)
-
-def connect_queue_device(testbed_instance, queue_guid, device_guid):
-    queue = testbed_instance._elements[queue_guid]
-    device = testbed_instance._elements[device_guid]
-    device.SetQueue(queue)
-
-def connect_manager_device(testbed_instance, manager_guid, device_guid):
-    manager = testbed_instance._elements[manager_guid]
-    device = testbed_instance._elements[device_guid]
-    device.SetRemoteStationManager(manager)
-
-def connect_phy_device(testbed_instance, phy_guid, device_guid):
-    phy = testbed_instance._elements[phy_guid]
-    if hasattr(phy, "GetErrorRateModel") and phy.GetErrorRateModel() == None:
-        return CONNECTION_DELAY
-    device = testbed_instance._elements[device_guid]
-    device.SetPhy(phy)
-    phy.SetDevice(device)
-    # search for the node asociated with the device
-    node_guid = testbed_instance.get_connected(device_guid, "node", "devs")
-    if len(node_guid) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside netns \
-                node" % device_guid)
-    node = testbed_instance.elements[node_guid[0]]
-    phy.SetMobility(node)
-
-def connect_mac_device(testbed_instance, mac_guid, device_guid):
-    mac = testbed_instance._elements[mac_guid]
-    device = testbed_instance._elements[device_guid]
-    device.SetMac(mac)
-
-def connect_error_model_device(testbed_instance, model_guid, device_guid):
-    model = testbed_instance._elements[model_guid]
-    device = testbed_instance._elements[device_guid]
-    device.SetReceiveErrorModel(model)
-
-def connect_error_model_phy(testbed_instance, err_guid, phy_guid):
-    err = testbed_instance._elements[err_guid]
-    phy = testbed_instance._elements[phy_guid]
-    phy.SetErrorRateModel(err)
-
-def connect_channel_device(testbed_instance, channel_guid, device_guid):
-    channel = testbed_instance._elements[channel_guid]
-    device = testbed_instance._elements[device_guid]
-    device.Attach(channel)
-
-def connect_simple_channel_device(testbed_instance, channel_guid, device_guid):
-    channel = testbed_instance._elements[channel_guid]
-    device = testbed_instance._elements[device_guid]
-    device.SetChannel(channel)
-
-def connect_loss_channel(testbed_instance, loss_guid, channel_guid):
-    loss = testbed_instance._elements[loss_guid]
-    channel = testbed_instance._elements[channel_guid]
-    channel.SetPropagationLossModel(loss)
-
-def connect_next_loss(testbed_instance, prev_guid, next_guid):
-    prev = testbed_instance._elements[prev_guid]
-    next = testbed_instance._elements[next_guid]
-    prev.SetNext(next)
-
-def connect_delay_channel(testbed_instance, delay_guid, channel_guid):
-    delay = testbed_instance._elements[delay_guid]
-    channel = testbed_instance._elements[channel_guid]
-    channel.SetPropagationDelayModel(delay)
-
-def connect_node_application(testbed_instance, node_guid, application_guid):
-    node = testbed_instance._elements[node_guid]
-    application = testbed_instance._elements[application_guid]
-    node.AddApplication(application)
-# works for ArpL3Protocol, Ipv4L3Protocol, UdpL4Protocol, TcpL4Protocol,
-# NscTcpL4Protocol, MobilityModel (every subclass), 
-# RoutingProtocol (every subclass)
-
-def connect_node_other(testbed_instance, node_guid, other_guid):
-    node = testbed_instance._elements[node_guid]
-    other = testbed_instance._elements[other_guid]
-    node.AggregateObject(other)
-
-def connect_ss_sflow(testbed_instance, station_guid, sflow_guid):
-    ss = testbed_instance._elements[station_guid]
-    sflow = testbed_instance._elements[sflow_guid]
-    ss.AddServiceFlow(sflow)
-
-#def connect_bs_sflow(testbed_instance, station_guid, sflow_guid):
-#    bs = testbed_instance._elements[station_guid]
-#    sflow = testbed_instance._elements[sflow_guid]
-#    bs.GetServiceFlowManager().AddServiceFlow (sflow)
-
-def connect_bstation_linksched(testbed_instance, bstation_guid, linksched_guid):
-    bstation = testbed_instance._elements[bstation_guid]
-    linksched = testbed_instance._elements[linksched_guid]
-    linksched.SetBs(bstation)
-
-def connect_classifier_sflow(testbed_instance, classifier_guid, sflow_guid):
-    classifier = testbed_instance._elements[classifier_guid]
-    sflow = testbed_instance._elements[sflow_guid]
-    csparam = testbed_instance.ns3.CsParameters(testbed_instance.ns3.CsParameters.ADD, classifier)
-    sflow.SetConvergenceSublayerParam (csparam); 
-
-def connect_fd(testbed_instance, fdnd_guid, cross_data):
-    def recvfd(sock, fdnd):
-        (fd, msg) = passfd.recvfd(sock)
-        # Store a reference to the endpoint to keep the socket alive
-        fdnd.SetFileDescriptor(fd)
-    
-    import threading
-    import passfd
-    import socket
-    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-    sock.bind("")
-    address = sock.getsockname()
-    # Set tun standard contract attributes
-    testbed_instance.set(fdnd_guid, "tun_addr", address)
-    testbed_instance.set(fdnd_guid, "tun_proto", "fd")
-    testbed_instance.set(fdnd_guid, "tun_port", 0)
-    testbed_instance.set(fdnd_guid, "tun_key", ("\xfa"*32).encode("base64").strip()) # unimportant, fds aren't encrypted
-    fdnd = testbed_instance._elements[fdnd_guid]
-    t = threading.Thread(target=recvfd, args=(sock,fdnd))
-    t.start()
-
-def connect_tunchannel_fd(testbed_instance, tun_guid, fdnd_guid):
-    fdnd = testbed_instance._elements[fdnd_guid]
-    tun = testbed_instance._elements[tun_guid]
-
-    # Create socket pair to connect the FDND and the TunChannel with it
-    import socket
-    sock1, sock2 = socket.socketpair(
-        socket.AF_UNIX, socket.SOCK_SEQPACKET)
-
-    # Store a reference to the endpoint to keep the socket alive
-    fdnd._endpoint_socket = sock1
-    fdnd.SetFileDescriptor(sock1.fileno())
-   
-    # Send the other endpoint to the TUN channel
-    tun.tun_socket = sock2
-    
-    # With this kind of tun_socket, NS3 will expect a PI header
-    # (sockets don't support the TUNGETIFF ioctl, so it will assume
-    # the default presence of PI headers)
-    tun.with_pi = True
-
-
-### Connector information ###
-
-connector_types = dict({
-    "node": dict({
-                "help": "Connector to a ns3::Node object (mandatory)",
-                "name": "node",
-                "max": 1,
-                "min": 1
-            }),
-    "devs": dict({
-                "help": "Connector to network interfaces",
-                "name": "devs",
-                "max": -1,
-                "min": 0
-            }),
-    "dev2": dict({
-                "help": "Connector to exactly two network interfaces (mandatory)",
-                "name": "dev2",
-                "max": 2,
-                "min": 2
-            }),
-    "dev": dict({
-                "help": "Connector to exactly one network interface (mandatory)",
-                "name": "dev",
-                "max": 1,
-                "min": 1
-            }),
-    "apps": dict({
-                "help": "Connector to applications", 
-                "name": "apps",
-                "max": -1,
-                "min": 0
-            }),
-    "protos": dict({
-                "help": "Connector to network stacks and protocols", 
-                "name": "protos",
-                "max": -1,
-                "min": 0
-            }),
-    "chan": dict({
-                "help": "Connector to a channel for the device (mandatory)", 
-                "name": "chan",
-                "max": 1,
-                "min": 1
-            }),
-    "queue": dict({
-                "help": "Connector to a queueing discipline (mandatory)", 
-                "name": "queue",
-                "max": 1,
-                "min": 1
-            }),
-    "err": dict({
-                "help": "Connector to an error model for the device", 
-                "name": "err",
-                "max": 1,
-                "min": 0
-            }),
-    "->fd": dict({
-                "help": "Connector for file descriptor reception for devices with file descriptors",
-                "name": "->fd",
-                "max": 1,
-                "min": 0
-            }),
-    "fd->": dict({
-                "help": "Connector for file descriptor providing for devices with file descriptors",
-                "name": "fd->",
-                "max": 1,
-                "min": 0
-            }),
-    "phy": dict({
-                "help": "Connector to a PHY wifi model", 
-                "name": "phy",
-                "max": 1,
-                "min": 0
-            }),
-    "phys": dict({
-                "help": "Connector to a wifi channel with PHY wifi models", 
-                "name": "phys",
-                "max": -1,
-                "min": 0
-            }),
-    "mac": dict({
-                "help": "Connector to a MAC wifi model", 
-                "name": "mac",
-                "max": 1,
-                "min": 0
-            }),
-    "manager": dict({
-                "help": "Connector to a wifi manager", 
-                "name": "manager",
-                "max": 1,
-                "min": 0
-            }),
-    "delay": dict({
-                "help": "Connector to a delay model", 
-                "name": "delay",
-                "max": 1,
-                "min": 0
-            }),
-    "loss": dict({
-                "help": "Connector to a loss model", 
-                "name": "loss",
-                "max": 1,
-                "min": 0
-            }),
-    "prev": dict({
-                "help": "Connector to the previous loss model", 
-                "name": "prev",
-                "max": 1,
-                "min": 0
-            }),
-    "next": dict({
-                "help": "Connector to the next loss model", 
-                "name": "next",
-                "max": 1,
-                "min": 0
-            }),
-    "mobility": dict({
-                "help": "Connector to a mobility model", 
-                "name": "mobility",
-                "max": 1,
-                "min": 0
-            }),
-    "mobpair": dict({
-                "help": "Connector from MatrixPropagationLossModel to MobilityPair", 
-                "name": "mobpair",
-                "max": -1,
-                "min": 0
-            }),
-   "matrix": dict({
-                "help": "Connector from MobilityPair to MatrixPropagationLossModel", 
-                "name": "matrix",
-                "max": 1,
-                "min": 0
-            }),
-    "mp": dict({
-                "help": "Connector from MobilityModel to MobilityPair", 
-                "name": "mp",
-                "max": -1,
-                "min": 0
-            }),
-    "ma": dict({
-                "help": "Connector to the 'ma' source mobility model, in the context of a mobility loss pair ", 
-                "name": "ma",
-                "max": 1,
-                "min": 0 
-            }),
-    "mb": dict({
-                "help": "Connector to the 'mb' destination mobility model, in the context of a mobility loss pair ", 
-                "name": "mb",
-                "max": 1,
-                "min": 1
-            }),
-    "tcp": dict({
-                "help": "Connector for ip-ip tunneling over TCP link", 
-                "name": "tcp",
-                "max": 1, 
-                "min": 0
-            }),
-    "udp": dict({
-                "help": "Connector for ip-ip tunneling over UDP datagrams", 
-                "name": "udp",
-                "max": 1, 
-                "min": 0
-            }),
-    "sflows": dict({
-                "help": "Connector to service flows",
-                "name": "sflows",
-                "max": -1, 
-                "min": 0
-            }),
-    "uplnk": dict({
-                "help": "Connector to a uplink scheduler",
-                "name": "uplnk",
-                "max": 1, 
-                "min": 0
-            }),
-    "dwnlnk": dict({
-                "help": "Connector to a dowlink scheduler",
-                "name": "dwnlnk",
-                "max": 1, 
-                "min": 0
-            }),
-    "classif": dict({
-                "help": "Connector to a classifier recod",
-                "name": "classif",
-                "max": 1, 
-                "min": 0
-            }),
-    "sflow": dict({
-                "help": "Connector to a service flow",
-                "name": "sflow",
-                "max": 1, 
-                "min": 0
-            }),
-    })
-
-connections = [
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::BridgeNetDevice", "node" ),
-            "init_code": connect_node_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::CsmaNetDevice", "node" ),
-            "init_code": connect_node_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::EmuNetDevice", "node" ),
-            "init_code": connect_node_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::PointToPointNetDevice", "node" ),
-            "init_code": connect_node_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::SimpleNetDevice", "node" ),
-            "init_code": connect_node_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::FdNetDevice", "node" ),
-            "init_code": connect_node_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "node" ),
-            "init_code": connect_node_device,   
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::SubscriberStationNetDevice", "node" ),
-            "init_code": connect_node_device,   
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::Node", "devs" ),
-            "to":   ( "ns3", "ns3::BaseStationNetDevice", "node" ),
-            "init_code": connect_node_device,   
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::DropTailQueue", "dev" ),
-            "to":   ( "ns3", "ns3::CsmaNetDevice", "queue" ),
-            "init_code": connect_queue_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::DropTailQueue", "dev" ),
-            "to":   ( "ns3", "ns3::EmuNetDevice", "queue" ),
-            "init_code": connect_queue_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::DropTailQueue", "dev" ),
-            "to":   ( "ns3", "ns3::PointToPointNetDevice", "queue" ),
-            "init_code": connect_queue_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::ArfWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::ConstantRateWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::AarfcdWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::AarfWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::AmrrWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::CaraWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::IdealWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::MinstrelWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::OnoeWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::RraaWifiManager", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "manager" ),  
-            "init_code": connect_manager_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::YansWifiPhy", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "phy" ),  
-            "init_code": connect_phy_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::SimpleOfdmWimaxPhy", "dev" ),
-            "to":   ( "ns3", "ns3::SubscriberStationNetDevice", "phy" ),  
-            "init_code": connect_phy_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::SimpleOfdmWimaxPhy", "dev" ),
-            "to":   ( "ns3", "ns3::BaseStationNetDevice", "phy" ),  
-            "init_code": connect_phy_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::ApWifiMac", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "mac" ),
-            "init_code": connect_mac_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::StaWifiMac", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "mac" ),
-            "init_code": connect_mac_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::AdhocWifiMac", "dev" ),
-            "to":   ( "ns3", "ns3::WifiNetDevice", "mac" ),
-            "init_code": connect_mac_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::RateErrorModel", "dev" ),
-            "to":   ( "ns3", "ns3::CsmaNetDevice", "err" ),
-            "init_code": connect_error_model_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::RateErrorModel", "dev" ),
-            "to":   ( "ns3", "ns3::PointToPointNetDevice", "err" ),
-            "init_code": connect_error_model_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::ListErrorModel", "dev" ),
-            "to":   ( "ns3", "ns3::CsmaNetDevice", "err" ),
-            "init_code": connect_error_model_device,
-            "can_cross": False
-    }),
-    dict({
-            "from": ( "ns3", "ns3::ListErrorModel", "dev" ),
-            "to":   ( "ns3", "ns3::PointToPointNetDevice", "err" ),
-            "init_code": connect_error_model_device,
-            "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::NistErrorRateModel", "phy" ),        
-        "to":   ( "ns3", "ns3::YansWifiPhy", "err" ),
-        "init_code": connect_error_model_phy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::CsmaChannel", "devs" ),
-        "to":   ( "ns3", "ns3::CsmaNetDevice", "chan" ),
-        "init_code": connect_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::PointToPointChannel", "dev2" ),
-        "to":   ( "ns3", "ns3::PointToPointNetDevice", "chan" ),
-        "init_code": connect_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::SimpleChannel", "devs" ),
-        "to":   ( "ns3", "ns3::SimpleNetDevice", "chan" ),
-        "init_code": connect_simple_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::SimpleOfdmWimaxChannel", "devs" ),
-        "to":   ( "ns3", "ns3::SubscriberStationNetDevice", "chan" ),
-        "init_code": connect_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::SimpleOfdmWimaxChannel", "devs" ),
-        "to":   ( "ns3", "ns3::BaseStationNetDevice", "chan" ),
-        "init_code": connect_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::YansWifiChannel", "phys" ),
-        "to":   ( "ns3", "ns3::YansWifiPhy", "chan" ),  
-        "init_code": connect_simple_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::YansWifiChannel", "phys" ),
-        "to":   ( "ns3", "ns3::YansWifiPhy", "chan" ),  
-        "init_code": connect_simple_channel_device,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::LogDistancePropagationLossModel", "prev" ),
-        "to":   ( "ns3", "ns3::YansWifiChannel", "loss" ),  
-        "init_code": connect_loss_channel,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::LogDistancePropagationLossModel", "prev" ),
-        "to":   ( "ns3", "ns3::LogDistancePropagationLossModel", "next" ),  
-        "init_code": connect_next_loss,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::ConstantSpeedPropagationDelayModel", "chan" ),
-        "to":   ( "ns3", "ns3::YansWifiChannel", "delay" ),  
-        "init_code": connect_delay_channel,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::OnOffApplication", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::PacketSink", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::UdpEchoClient", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::UdpEchoServer", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::UdpClient", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::UdpServer", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),    dict({
-        "from": ( "ns3", "ns3::Node", "apps" ),
-        "to":   ( "ns3", "ns3::V4Ping", "node" ),
-        "init_code": connect_node_application,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::ArpL3Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::Icmpv4L4Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::Icmpv6L4Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::Ipv4L3Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::Ipv6L3Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::UdpL4Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "protos" ),
-        "to":   ( "ns3", "ns3::TcpL4Protocol", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-
-
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "matrix" ),
-        "to":   ( "ns3", "ns3::MatrixPropagationLossModel", "mobpair" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "ma" ),
-        "to":   ( "ns3", "ns3::ConstantVelocityMobilityModel", "mp" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "mb" ),
-        "to":   ( "ns3", "ns3::ConstantVelocityMobilityModel", "mp" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "ma" ),
-        "to":   ( "ns3", "ns3::ConstantAccelerationMobilityModel", "mp" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "mb" ),
-        "to":   ( "ns3", "ns3::ConstantAccelerationMobilityModel", "mp" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "ma" ),
-        "to":   ( "ns3", "ns3::ConstantPositionMobilityModel", "mp" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::MobilityPair", "mb" ),
-        "to":   ( "ns3", "ns3::ConstantPositionMobilityModel", "mp" ),
-        "init_code": connect_dummy,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::MatrixPropagationLossModel", "chan" ),  
-        "to":   ( "ns3", "ns3::YansWifiChannel", "loss" ),
-        "init_code": connect_loss_channel,
-        "can_cross": False
-    }),
-
-
-
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::ConstantAccelerationMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::ConstantPositionMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::ConstantVelocityMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::HierarchicalMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::RandomDirection2dMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::RandomWalk2dMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::RandomWaypointMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Node", "mobility" ),
-        "to":   ( "ns3", "ns3::WaypointMobilityModel", "node" ),
-        "init_code": connect_node_other,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::SubscriberStationNetDevice", "sflows" ),
-        "to":   ( "ns3", "ns3::ServiceFlow", "dev" ),
-        "init_code": connect_ss_sflow,
-        "can_cross": False
-    }),
-#    dict({
-#        "from": ( "ns3", "ns3::BaseStationNetDevice", "sflows" ),
-#        "to":   ( "ns3", "ns3::ServiceFlow", "dev" ),
-#        "init_code": connect_bs_sflow,
-#        "can_cross": False
-#    }),
-    dict({
-        "from": ( "ns3", "ns3::BaseStationNetDevice", "uplnk" ),
-        "to":   ( "ns3", "ns3::UplinkSchedulerSimple", "dev" ),
-        "init_code": connect_bstation_linksched,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::BaseStationNetDevice", "uplnk" ),
-        "to":   ( "ns3", "ns3::UplinkSchedulerRtps", "dev" ),
-        "init_code": connect_bstation_linksched,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::BaseStationNetDevice", "dwnlnk" ),
-        "to":   ( "ns3", "ns3::BSSchedulerSimple", "dev" ),
-        "init_code": connect_bstation_linksched,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::BaseStationNetDevice", "dwnlnk" ),
-        "to":   ( "ns3", "ns3::BSSchedulerRtps", "dev" ),
-        "init_code": connect_bstation_linksched,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::IpcsClassifierRecord", "sflow" ),
-        "to":   ( "ns3", "ns3::ServiceFlow", "classif" ),
-        "init_code": connect_classifier_sflow,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::FdNetDevice", "->fd" ),
-        "to":   ( None, None, "fd->" ),
-        "init_code": connect_fd,
-        "can_cross": True
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::TunChannel", "fd->" ),
-        "to":   ( "ns3", "ns3::FdNetDevice", "->fd" ),
-        "init_code": connect_tunchannel_fd,
-        "can_cross": False
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::TunChannel", "tcp"),
-        "to":   (None, None, "tcp"),
-        "init_code": functools.partial(crossconnect_tunchannel_peer_init,"tcp"),
-        "compl_code": functools.partial(crossconnect_tunchannel_peer_compl,"tcp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": ( "ns3", "ns3::Nepi::TunChannel", "udp"),
-        "to":   (None, None, "udp"),
-        "init_code": functools.partial(crossconnect_tunchannel_peer_init,"udp"),
-        "compl_code": functools.partial(crossconnect_tunchannel_peer_compl,"udp"),
-        "can_cross": True
-    }),
-]
diff --git a/src/nepi/testbeds/ns3/constants.py b/src/nepi/testbeds/ns3/constants.py
deleted file mode 100644 (file)
index 27fb726..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-TESTBED_ID = "ns3"
-TESTBED_VERSION = "3.11"
-
diff --git a/src/nepi/testbeds/ns3/execute.py b/src/nepi/testbeds/ns3/execute.py
deleted file mode 100644 (file)
index 7de7bde..0000000
+++ /dev/null
@@ -1,357 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from util import  _get_ipv4_protocol_guid, _get_node_guid, _get_dev_number
-from nepi.core import testbed_impl
-from nepi.core.attributes import Attribute
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.util.constants import TIME_NOW, TestbedStatus as TS
-import os
-import sys
-import threading
-import random
-import socket
-import weakref
-
-def load_ns3_module():
-    import sys
-    if 'ns3' in sys.modules:
-        return
-
-    import ctypes
-    import imp
-    import re
-
-    bindings = os.environ["NEPI_NS3BINDINGS"] \
-               if "NEPI_NS3BINDINGS" in os.environ else None
-    libdir = os.environ["NEPI_NS3LIBRARY"] \
-               if "NEPI_NS3LIBRARY" in os.environ else None
-
-    if libdir:
-        files = os.listdir(libdir)
-        regex = re.compile("(.*\.so)$")
-        libs = [m.group(1) for filename in files for m in [regex.search(filename)] if m]
-
-        libscp = list(libs)
-        while len(libs) > 0:
-            for lib in libscp:
-                libfile = os.path.join(libdir, lib)
-                try:
-                    ctypes.CDLL(libfile, ctypes.RTLD_GLOBAL)
-                    libs.remove(lib)
-                except:
-                    pass
-            # if did not load any libraries in the last iteration
-            if len(libscp) == len(libs):
-                raise RuntimeError("Imposible to load shared libraries %s" % str(libs))
-            libscp = list(libs)
-
-    if bindings:
-        sys.path.append(bindings)
-
-    import ns3_bindings_import as mod
-    sys.modules["ns3"] = mod
-
-class TestbedController(testbed_impl.TestbedController):
-    from nepi.util.tunchannel_impl import TunChannel
-    
-    LOCAL_FACTORIES = {
-        'ns3::Nepi::TunChannel' : TunChannel,
-    }
-   
-    DUMMY_FACTORIES = ['ns3::Nepi::MobilityPair']
-
-    LOCAL_TYPES = tuple(LOCAL_FACTORIES.values())
-
-    def __init__(self):
-        super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
-        self._ns3 = None
-        self._home_directory = None
-        self._traces = dict()
-        self._simulator_thread = None
-        self._condition = None
-
-    @property
-    def home_directory(self):
-        return self._home_directory
-
-    @property
-    def ns3(self):
-        return self._ns3
-
-    def do_setup(self):
-        self._home_directory = self._attributes.\
-            get_attribute_value("homeDirectory")
-        self._ns3 = self._configure_ns3_module()
-        
-        # create home...
-        home = os.path.normpath(self.home_directory)
-        if not os.path.exists(home):
-            os.makedirs(home, 0755)
-        
-        super(TestbedController, self).do_setup()
-
-    def start(self):
-        super(TestbedController, self).start()
-        self._condition = threading.Condition()
-        self._simulator_thread = threading.Thread(target = self._simulator_run,
-                args = [self._condition])
-        self._simulator_thread.setDaemon(True)
-        self._simulator_thread.start()
-
-    def stop(self, time = TIME_NOW):
-        super(TestbedController, self).stop(time)
-        self._stop_simulation(time)
-
-    def set(self, guid, name, value, time = TIME_NOW):
-        super(TestbedController, self).set(guid, name, value, time)
-
-        # TODO: take on account schedule time for the task
-        factory_id = self._create[guid]
-        if factory_id in self.DUMMY_FACTORIES:
-            return 
-
-        factory = self._factories[factory_id]
-        element = self._elements[guid]
-        if factory_id in self.LOCAL_FACTORIES:
-            setattr(element, name, value)
-        elif not factory.box_attributes.is_attribute_metadata(name):
-            if name == "Up":
-                ipv4_guid =  _get_ipv4_protocol_guid(self, guid)
-                if not ipv4_guid in self._elements:
-                    return
-                ipv4 = self._elements[ipv4_guid]
-                if value == False:
-                    nint = ipv4.GetNInterfaces()
-                    for i in xrange(0, nint):
-                        ipv4.SetDown(i)
-                else:
-                    nint = ipv4.GetNInterfaces()
-                    for i in xrange(0, nint):
-                        ipv4.SetUp(i)
-            else:
-                ns3_value = self._to_ns3_value(guid, name, value)
-                self._set_attribute(name, ns3_value, element)
-
-    def get(self, guid, name, time = TIME_NOW):
-        value = super(TestbedController, self).get(guid, name, time)
-
-        # TODO: take on account schedule time for the task
-        factory_id = self._create[guid]
-        if factory_id in self.DUMMY_FACTORIES:
-            return 
-
-        factory = self._factories[factory_id]
-        element = self._elements[guid]
-        if factory_id in self.LOCAL_FACTORIES:
-            if hasattr(element, name):
-                return getattr(element, name)
-            else:
-                return value
-        else: 
-            if name == "Up":
-                ipv4_guid =  _get_ipv4_protocol_guid(self, guid)
-                if not ipv4_guid in self._elements:
-                    return True
-                ipv4 = self._elements[ipv4_guid]
-                nint = ipv4.GetNInterfaces()
-                value = True
-                for i in xrange(0, nint):
-                    value = ipv4.IsUp(i)
-                    if not value: break
-                return value
-
-        if factory.box_attributes.is_attribute_metadata(name):
-            return value
-
-        TypeId = self.ns3.TypeId()
-        typeid = TypeId.LookupByName(factory_id)
-        info = TypeId.AttributeInformation()
-        if not typeid or not typeid.LookupAttributeByName(name, info):
-            raise AttributeError("Invalid attribute %s for element type %d" % \
-                (name, guid))
-        checker = info.checker
-        ns3_value = checker.Create() 
-        self._get_attribute(name, ns3_value, element)
-        value = ns3_value.SerializeToString(checker)
-        attr_type = factory.box_attributes.get_attribute_type(name)
-
-        if attr_type == Attribute.INTEGER:
-            return int(value)
-        if attr_type == Attribute.DOUBLE:
-            return float(value)
-        if attr_type == Attribute.BOOL:
-            return value == "true"
-        return value
-
-    def action(self, time, guid, action):
-        raise NotImplementedError
-
-    def trace_filepath(self, guid, trace_id):
-        filename = self._traces[guid][trace_id]
-        return os.path.join(self.home_directory, filename)
-
-    def trace_filename(self, guid, trace_id):
-        return self._traces[guid][trace_id]
-
-    def follow_trace(self, guid, trace_id, filename):
-        if not guid in self._traces:
-            self._traces[guid] = dict()
-        self._traces[guid][trace_id] = filename
-
-    def shutdown(self):
-        for element in self._elements.itervalues():
-            if isinstance(element, self.LOCAL_TYPES):
-                # graceful shutdown of locally-implemented objects
-                element.cleanup()
-        if self.ns3:
-            if not self.ns3.Simulator.IsFinished():
-                self.stop()
-            
-            # TODO!!!! SHOULD WAIT UNTIL THE THREAD FINISHES
-            if self._simulator_thread:
-                self._simulator_thread.join()
-            
-            self.ns3.Simulator.Destroy()
-        
-        self._elements.clear()
-        
-        self._ns3 = None
-        sys.stdout.flush()
-        sys.stderr.flush()
-
-    def _simulator_run(self, condition):
-        # Run simulation
-        self.ns3.Simulator.Run()
-        # Signal condition on simulation end to notify waiting threads
-        condition.acquire()
-        condition.notifyAll()
-        condition.release()
-
-    def _schedule_event(self, condition, func, *args):
-        """Schedules event on running experiment"""
-        def execute_event(contextId, condition, has_event_occurred, func, *args):
-            # exec func
-            try:
-                func(*args)
-            finally:
-                # flag event occured
-                has_event_occurred[0] = True
-                # notify condition indicating attribute was set
-                condition.acquire()
-                condition.notifyAll()
-                condition.release()
-
-        # contextId is defined as general context
-        contextId = long(0xffffffff)
-        # delay 0 means that the event is expected to execute inmediately
-        delay = self.ns3.Seconds(0)
-        # flag to indicate that the event occured
-        # because bool is an inmutable object in python, in order to create a
-        # bool flag, a list is used as wrapper
-        has_event_occurred = [False]
-        condition.acquire()
-        try:
-            if not self.ns3.Simulator.IsFinished():
-                self.ns3.Simulator.ScheduleWithContext(contextId, delay, execute_event,
-                     condition, has_event_occurred, func, *args)
-                while not has_event_occurred[0] and not self.ns3.Simulator.IsFinished():
-                    condition.wait()
-        finally:
-            condition.release()
-
-    def _set_attribute(self, name, ns3_value, element):
-        if self.status() == TS.STATUS_STARTED:
-            # schedule the event in the Simulator
-            self._schedule_event(self._condition, self._set_ns3_attribute, 
-                    name, ns3_value, element)
-        else:
-            self._set_ns3_attribute(name, ns3_value, element)
-
-    def _get_attribute(self, name, ns3_value, element):
-        if self.status() == TS.STATUS_STARTED:
-            # schedule the event in the Simulator
-            self._schedule_event(self._condition, self._get_ns3_attribute, 
-                    name, ns3_value, element)
-        else:
-            self._get_ns3_attribute(name, ns3_value, element)
-
-    def _set_ns3_attribute(self, name, ns3_value, element):
-        element.SetAttribute(name, ns3_value)
-
-    def _get_ns3_attribute(self, name, ns3_value, element):
-        element.GetAttribute(name, ns3_value)
-
-    def _stop_simulation(self, time):
-        if self.status() == TS.STATUS_STARTED:
-            # schedule the event in the Simulator
-            self._schedule_event(self._condition, self._stop_ns3_simulation, 
-                    time)
-        else:
-            self._stop_ns3_simulation(time)
-
-    def _stop_ns3_simulation(self, time = TIME_NOW):
-        if not self.ns3:
-            return
-        if time == TIME_NOW:
-            self.ns3.Simulator.Stop()
-        else:
-            self.ns3.Simulator.Stop(self.ns3.Time(time))
-
-    def _to_ns3_value(self, guid, name, value):
-        factory_id = self._create[guid]
-        TypeId = self.ns3.TypeId()
-        typeid = TypeId.LookupByName(factory_id)
-        info = TypeId.AttributeInformation()
-        if not typeid.LookupAttributeByName(name, info):
-            raise RuntimeError("Attribute %s doesn't belong to element %s" \
-                   % (name, factory_id))
-        str_value = str(value)
-        if isinstance(value, bool):
-            str_value = str_value.lower()
-        checker = info.checker
-        ns3_value = checker.Create()
-        ns3_value.DeserializeFromString(str_value, checker)
-        return ns3_value
-
-    def _configure_ns3_module(self):
-        simu_impl_type = self._attributes.get_attribute_value(
-                "SimulatorImplementationType")
-        sched_impl_type = self._attributes.get_attribute_value(
-                "SchedulerType")
-        checksum = self._attributes.get_attribute_value("ChecksumEnabled")
-        stop_time = self._attributes.get_attribute_value("StopTime")
-
-        load_ns3_module()
-
-        import ns3 as mod
-        if simu_impl_type:
-            value = mod.StringValue(simu_impl_type)
-            mod.GlobalValue.Bind ("SimulatorImplementationType", value)
-        if sched_impl_type:
-            value = mod.StringValue(sched_impl_type)
-            mod.GlobalValue.Bind ("SchedulerType", value)
-        if checksum:
-            value = mod.BooleanValue(checksum)
-            mod.GlobalValue.Bind ("ChecksumEnabled", value)
-        if stop_time:
-            value = mod.Time(stop_time)
-            mod.Simulator.Stop (value)
-        return mod
-
-    def _get_construct_parameters(self, guid):
-        params = self._get_parameters(guid)
-        construct_params = dict()
-        factory_id = self._create[guid]
-        TypeId = self.ns3.TypeId()
-        typeid = TypeId.LookupByName(factory_id)
-        for name, value in params.iteritems():
-            info = self.ns3.TypeId.AttributeInformation()
-            found = typeid.LookupAttributeByName(name, info)
-            if found and \
-                (info.flags & TypeId.ATTR_CONSTRUCT == TypeId.ATTR_CONSTRUCT):
-                construct_params[name] = value
-        return construct_params
-
-
-
diff --git a/src/nepi/testbeds/ns3/factories_metadata.py b/src/nepi/testbeds/ns3/factories_metadata.py
deleted file mode 100644 (file)
index a7921a8..0000000
+++ /dev/null
@@ -1,2642 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from util import  _get_ipv4_protocol_guid, _get_node_guid, _get_dev_number
-from nepi.util import tags
-from nepi.util.constants import AF_INET, ApplicationStatus as AS, \
-        FactoryCategories as FC
-from nepi.util.tunchannel_impl import \
-    preconfigure_tunchannel, postconfigure_tunchannel, \
-    prestart_tunchannel, create_tunchannel
-import re
-
-wifi_standards = dict({
-    "WIFI_PHY_STANDARD_holland": 5,
-    "WIFI_PHY_STANDARD_80211p_SCH": 7,
-    "WIFI_PHY_STANDARD_80211_5Mhz": 4,
-    "WIFI_PHY_UNKNOWN": 8,
-    "WIFI_PHY_STANDARD_80211_10Mhz": 3,
-    "WIFI_PHY_STANDARD_80211g": 2,
-    "WIFI_PHY_STANDARD_80211p_CCH": 6,
-    "WIFI_PHY_STANDARD_80211a": 0,
-    "WIFI_PHY_STANDARD_80211b": 1
-})
-
-l4_protocols = dict({
-    "Icmpv4L4Protocol": 1,
-    "UdpL4Protocol": 17,
-    "TcpL4Protocol": 6,
-})
-
-service_flow_direction = dict({
-    "SF_DIRECTION_UP": 1,
-    "SF_DIRECTION_DOWN": 0,
-})
-
-service_flow_scheduling_type = dict ({
-    "SF_TYPE_NONE": 0,
-    "SF_TYPE_UNDEF": 1, 
-    "SF_TYPE_BE": 2,
-    "SF_TYPE_NRTPS": 3,
-    "SF_TYPE_RTPS": 4,
-    "SF_TYPE_UGS": 6, 
-    "SF_TYPE_ALL": 255
-})
-
-def _follow_trace(testbed_instance, guid, trace_id, filename):
-    testbed_instance.follow_trace(guid, trace_id, filename)
-    filepath = testbed_instance.trace_filepath(guid, trace_id)
-    return filepath
-
-### create traces functions ###
-
-def p2pascii_trace(testbed_instance, guid, trace_id):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-p2p-node-%d-dev-%d.tr" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.PointToPointHelper()
-    asciiHelper = testbed_instance.ns3.AsciiTraceHelper()
-    stream = asciiHelper.CreateFileStream(filepath)
-    helper.EnableAscii(stream, element)
-
-def p2ppcap_trace(testbed_instance, guid, trace_id):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-p2p-node-%d-dev-%d.pcap" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.PointToPointHelper()
-    helper.EnablePcap(filepath, element, explicitFilename = True)
-
-def _csmapcap_trace(testbed_instance, guid, trace_id, promisc):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-csma-node-%d-dev-%d.pcap" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.CsmaHelper()
-    helper.EnablePcap(filepath, element, promiscuous = promisc, 
-            explicitFilename = True)
-
-def csmapcap_trace(testbed_instance, guid, trace_id):
-    promisc = False
-    _csmapcap_trace(testbed_instance, guid, trace_id, promisc)
-
-def csmapcap_promisc_trace(testbed_instance, guid, trace_id):
-    promisc = True
-    _csmapcap_trace(testbed_instance, guid, trace_id, promisc)
-
-def fdpcap_trace(testbed_instance, guid, trace_id):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-fd-node-%d-dev-%d.pcap" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.FdNetDeviceHelper()
-    helper.EnablePcap(filepath, element, explicitFilename = True)
-
-def fdascii_trace(testbed_instance, guid, trace_id):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-fd-node-%d-dev-%d.tr" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.FdNetDeviceHelper()
-    asciiHelper = testbed_instance.ns3.AsciiTraceHelper()
-    stream = asciiHelper.CreateFileStream(filepath)
-    helper.EnableAscii(stream, element)
-
-def yanswifipcap_trace(testbed_instance, guid, trace_id):
-    dev_guid = testbed_instance.get_connected(guid, "dev", "phy")[0]
-    node_guid = _get_node_guid(testbed_instance, dev_guid)
-    interface_number = _get_dev_number(testbed_instance, dev_guid)
-    element = testbed_instance._elements[dev_guid]
-    filename = "trace-yanswifi-node-%d-dev-%d.pcap" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.YansWifiPhyHelper()
-    helper.EnablePcap(filepath, element, explicitFilename = True)
-
-def wimaxascii_trace(testbed_instance, guid, trace_id):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-wimax-node-%d-dev-%d.tr" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.WimaxHelper()
-    asciiHelper = testbed_instance.ns3.AsciiTraceHelper()
-    stream = asciiHelper.CreateFileStream (filepath)
-    helper.EnableAscii(stream, element)
-    #helper.EnableLogComponents()
-
-def wimaxpcap_trace(testbed_instance, guid, trace_id):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    interface_number = _get_dev_number(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    filename = "trace-wimax-node-%d-dev-%d.pcap" % (node_guid, interface_number)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    helper = testbed_instance.ns3.WimaxHelper()
-    helper.EnablePcap(filepath, element, explicitFilename = True)
-
-def rtt_trace(testbed_instance, guid, trace_id):
-    element = testbed_instance._elements[guid]
-    helper = testbed_instance.ns3.ScalarTraceHelper()
-    prefix = "trace-app-%d" % (guid, )
-    filename = helper.GetFilenameFromSource(prefix, element, trace_id)
-    filepath = _follow_trace(testbed_instance, guid, trace_id, filename)
-    prefix = filepath[:filepath.find(prefix)+len(prefix)]
-    helper.EnableTrace(element, trace_id, prefix, "T")
-
-trace_functions = dict({
-    "P2PPcapTrace": p2ppcap_trace,
-    "P2PAsciiTrace": p2pascii_trace,
-    "CsmaPcapTrace": csmapcap_trace,
-    "CsmaPcapPromiscTrace": csmapcap_promisc_trace,
-    "FdPcapTrace": fdpcap_trace,
-    "FdAsciiTrace": fdascii_trace,
-    "YansWifiPhyPcapTrace": yanswifipcap_trace,
-    "WimaxPcapTrace": wimaxpcap_trace,
-    "WimaxAsciiTrace": wimaxascii_trace,
-    "Rtt": rtt_trace,
-    })
-
-### Creation functions ###
-
-def create_element(testbed_instance, guid):
-    element_factory = testbed_instance.ns3.ObjectFactory()
-    factory_id = testbed_instance._create[guid]
-    element_factory.SetTypeId(factory_id) 
-    construct_parameters = testbed_instance._get_construct_parameters(guid)
-    for name, value in construct_parameters.iteritems():
-        ns3_value = testbed_instance._to_ns3_value(guid, name, value)
-        element_factory.Set(name, ns3_value)
-    element = element_factory.Create()
-    testbed_instance._elements[guid] = element
-
-def create_node(testbed_instance, guid):
-    create_element(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    element.AggregateObject(testbed_instance.ns3.PacketSocketFactory())
-
-def create_wifi_phy(testbed_instance, guid):
-    create_element(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    parameters = testbed_instance._get_parameters(guid)
-    standard = parameters.get("Standard")
-    if not standard:
-        raise RuntimeError("No wifi standard set for %d" % guid)
-    element.ConfigureStandard(wifi_standards[standard])
-
-def create_wifi_mac(testbed_instance, guid):
-    create_element(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    parameters = testbed_instance._get_parameters(guid)
-    standard = parameters.get("Standard")
-    if not standard:
-        raise RuntimeError("No wifi standard set for %d" % guid)
-    element.ConfigureStandard(wifi_standards[standard])
-    qos = parameters.get("QosSupported")
-    # BUG: There seems to be an inheritance problem with the Python Bindings and SetQosSupported.
-    #      It seems to be onbly defined for regular-wifi-mac.h and not for its children...
-    #      Report this!
-    # element.SetQosSupported(qos)
-
-def create_waypoint_mobility(testbed_instance, guid):
-    create_element(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    parameters = testbed_instance._get_parameters(guid)
-    ns3 = testbed_instance.ns3
-    waypoints = parameters.get("WaypointList", "")
-    waypoints = re.sub(" |\(|\)", "", waypoints)
-    for swp in waypoints.split(","):
-        dwp = swp.split(":")
-        t = str(dwp[0])
-        time = ns3.Time(t)
-        pos = ns3.Vector(float(dwp[1]), float(dwp[2]), float(dwp[3]))
-        waypoint = ns3.Waypoint(time, pos)
-        element.AddWaypoint(waypoint)
-
-def create_ipv4protocol(testbed_instance, guid):
-    create_element(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    list_routing = testbed_instance.ns3.Ipv4ListRouting()
-    element.SetRoutingProtocol(list_routing)
-    static_routing = testbed_instance.ns3.Ipv4StaticRouting()
-    list_routing.AddRoutingProtocol(static_routing, 1)
-
-def create_element_no_constructor(testbed_instance, guid):
-    """ Create function for ns3 classes for which 
-        TypeId.HasConstructor == False"""
-    factory_id = testbed_instance._create[guid]
-    factory_name = factory_id.replace("ns3::", "")
-    constructor = getattr(testbed_instance.ns3, factory_name)
-    element = constructor()
-    testbed_instance._elements[guid] = element
-
-def create_base_station(testbed_instance, guid):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    node = testbed_instance._elements[node_guid]
-    phy_guids = testbed_instance.get_connected(guid, "phy", "dev")
-    if len(phy_guids) == 0:
-        raise RuntimeError("No PHY was found for station %d" % guid)
-    phy = testbed_instance._elements[phy_guids[0]]
-    uplnk_guids = testbed_instance.get_connected(guid, "uplnk", "dev")
-    if len(uplnk_guids) == 0:
-        raise RuntimeError("No uplink scheduler was found for station %d" % guid)
-    uplnk = testbed_instance._elements[uplnk_guids[0]]
-    dwnlnk_guids = testbed_instance.get_connected(guid, "dwnlnk", "dev")
-    if len(dwnlnk_guids) == 0:
-        raise RuntimeError("No downlink scheduler was found for station %d" % guid)
-    dwnlnk = testbed_instance._elements[dwnlnk_guids[0]]
-    element = testbed_instance.ns3.BaseStationNetDevice(node, phy, uplnk, dwnlnk)
-    testbed_instance._elements[guid] = element
-
-def create_subscriber_station(testbed_instance, guid):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    node = testbed_instance._elements[node_guid]
-    phy_guids = testbed_instance.get_connected(guid, "phy", "dev")
-    if len(phy_guids) == 0:
-        raise RuntimeError("No PHY was found for station %d" % guid)
-    phy = testbed_instance._elements[phy_guids[0]]
-    element = testbed_instance.ns3.SubscriberStationNetDevice(node, phy)
-    element.SetModulationType(testbed_instance.ns3.WimaxPhy.MODULATION_TYPE_QAM16_12)
-    testbed_instance._elements[guid] = element
-
-def create_wimax_channel(testbed_instance, guid):
-    element = testbed_instance.ns3.SimpleOfdmWimaxChannel(testbed_instance.ns3.SimpleOfdmWimaxChannel.COST231_PROPAGATION)
-    testbed_instance._elements[guid] = element
-
-def create_wimax_phy(testbed_instance, guid):
-    element = testbed_instance.ns3.SimpleOfdmWimaxPhy()
-    testbed_instance._elements[guid] = element
-
-def create_service_flow(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    direction = parameters.get("Direction")
-    if direction == None:
-        raise RuntimeError("No SchedulingType was found for service flow %d" % guid)
-    sched = parameters.get("SchedulingType")
-    if sched == None:
-        raise RuntimeError("No SchedulingType was found for service flow %d" % guid)
-    ServiceFlow = testbed_instance.ns3.ServiceFlow
-    direction = service_flow_direction[direction]
-    sched = service_flow_scheduling_type[sched]
-    element = ServiceFlow(direction)
-    element.SetCsSpecification(ServiceFlow.IPV4)
-    element.SetServiceSchedulingType(sched) 
-    element.SetMaxSustainedTrafficRate(100)
-    element.SetMinReservedTrafficRate(1000000)
-    element.SetMinTolerableTrafficRate(1000000)
-    element.SetMaximumLatency(100)
-    element.SetMaxTrafficBurst(2000)
-    element.SetTrafficPriority(1)
-    element.SetUnsolicitedGrantInterval(1)
-    element.SetMaxSustainedTrafficRate(70)
-    element.SetToleratedJitter(10)
-    element.SetSduSize(49)
-    element.SetRequestTransmissionPolicy(0)
-    testbed_instance._elements[guid] = element
-
-def create_ipcs_classifier_record(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    src_address = parameters.get("SrcAddress")
-    if src_address == None:
-        raise RuntimeError("No SrcAddress was found for classifier %d" % guid)
-    src_address = testbed_instance.ns3.Ipv4Address(src_address)
-    src_mask = parameters.get("SrcMask")
-    if src_mask == None:
-        raise RuntimeError("No SrcMask was found for classifier %d" % guid)
-    src_mask = testbed_instance.ns3.Ipv4Mask(src_mask)
-    dst_address = parameters.get("DstAddress")
-    if dst_address == None:
-        raise RuntimeError("No Dstddress was found for classifier %d" % guid)
-    dst_address = testbed_instance.ns3.Ipv4Address(dst_address)
-    dst_mask = parameters.get("DstMask")
-    if dst_mask == None:
-        raise RuntimeError("No DstMask was found for classifier %d" % guid)
-    dst_mask = testbed_instance.ns3.Ipv4Mask(dst_mask)
-    src_port_low = parameters.get("SrcPortLow")
-    if src_port_low == None:
-        raise RuntimeError("No SrcPortLow was found for classifier %d" % guid)
-    src_port_high = parameters.get("SrcPortHigh")
-    if src_port_high == None:
-        raise RuntimeError("No SrcPortHigh was found for classifier %d" % guid)
-    dst_port_low = parameters.get("DstPortLow")
-    if dst_port_low == None:
-        raise RuntimeError("No DstPortLow was found for classifier %d" % guid)
-    dst_port_high = parameters.get("DstPortHigh")
-    if dst_port_high == None:
-        raise RuntimeError("No DstPortHigh was found for classifier %d" % guid)
-    protocol = parameters.get("Protocol")
-    if protocol == None or protocol not in l4_protocols:
-        raise RuntimeError("No Protocol was found for classifier %d" % guid)
-    priority = parameters.get("Priority")
-    if priority == None:
-        raise RuntimeError("No Priority was found for classifier %d" % guid)
-    element = testbed_instance.ns3.IpcsClassifierRecord(src_address, src_mask,
-        dst_address, dst_mask, src_port_low, src_port_high, dst_port_low, 
-        dst_port_high, l4_protocols[protocol], priority)
-    testbed_instance._elements[guid] = element
-
-def create_matrix_propagation(testbed_instance, guid):
-    create_element(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    mp_guids = testbed_instance.get_connected(guid, "mobpair", "matrix")
-    for mpg in mp_guids:
-        mas = testbed_instance.get_connected(mpg, "ma", "mp")
-        if len(mas) != 1:
-            raise RuntimeError("Wrong number of source mobility models for MobilityPair %d" % guid)
-        mbs = testbed_instance.get_connected(mpg, "mb", "mp")
-        if len(mbs) != 1:
-            raise RuntimeError("Wrong number of destination mobility models for MobilityPair %d" % guid)
-        parameters = testbed_instance._get_parameters(mpg)
-        loss = parameters.get("Loss")
-        symmetric = parameters.get("Symmetric")
-        mas_elem = testbed_instance._elements[mas[0]]
-        mbs_elem = testbed_instance._elements[mbs[0]]
-        element.SetLoss(mas_elem, mbs_elem, loss, symmetric)
-
-### Start/Stop functions ###
-
-def start_application(testbed_instance, guid):
-    element = testbed_instance.elements[guid]
-    # BUG: without doing this explicit call it doesn't start!!!
-    # Shouldn't be enough to set the StartTime?
-    element.Start()
-
-def stop_application(testbed_instance, guid):
-    element = testbed_instance.elements[guid]
-    now = testbed_instance.ns3.Simulator.Now()
-    element.SetStopTime(now)
-
-### Status functions ###
-
-def status_application(testbed_instance, guid):
-    if guid not in testbed_instance.elements.keys():
-        raise RuntimeError("Can't get status on guid %d" % guid )
-    now = testbed_instance.ns3.Simulator.Now()
-    if now.IsZero():
-        return AS.STATUS_NOT_STARTED
-    app = testbed_instance.elements[guid]
-    parameters = testbed_instance._get_parameters(guid)
-    start_value = parameters.get("StartTime")
-    if start_value != None:
-        start_time = testbed_instance.ns3.Time(start_value)
-        if now.Compare(start_time) < 0:
-            return AS.STATUS_NOT_STARTED
-    stop_value = parameters.get("StopTime")
-    if stop_value != None:
-        stop_time = testbed_instance.ns3.Time(stop_value)
-        if now.Compare(stop_time) < 0:
-            return AS.STATUS_RUNNING
-        else:
-            return AS.STATUS_FINISHED
-    return AS.STATUS_UNDETERMINED
-
-### Configure functions ###
-
-def configure_traces(testbed_instance, guid):
-    traces = testbed_instance._get_traces(guid)
-    for trace_id in traces:
-        trace_func = trace_functions[trace_id]
-        trace_func(testbed_instance, guid, trace_id)
-
-def configure_element(testbed_instance, guid):
-    configure_traces(testbed_instance, guid)
-
-def configure_device(testbed_instance, guid):
-    configure_traces(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-
-    parameters = testbed_instance._get_parameters(guid)
-    address = parameters.get("macAddress")
-    if address:
-        macaddr = testbed_instance.ns3.Mac48Address(address)
-    else:
-        macaddr = testbed_instance.ns3.Mac48Address.Allocate()
-    element.SetAddress(macaddr)
-
-    if not guid in testbed_instance._add_address:
-        return
-    # search for the node asociated with the device
-    node_guid = _get_node_guid(testbed_instance, guid)
-    node = testbed_instance.elements[node_guid]
-    # search for the Ipv4L3Protocol asociated with the device
-    ipv4_guid = _get_ipv4_protocol_guid(testbed_instance, node_guid)
-    ipv4 = testbed_instance._elements[ipv4_guid]
-    ns3 = testbed_instance.ns3
-    # add addresses 
-    addresses = testbed_instance._add_address[guid]
-    for address in addresses:
-        (address, netprefix, broadcast) = address
-        # TODO: missing IPV6 addresses!!
-        ifindex = ipv4.AddInterface(element)
-        inaddr = ns3.Ipv4InterfaceAddress(ns3.Ipv4Address(address),
-                ns3.Ipv4Mask("/%d" % netprefix))
-        ipv4.AddAddress(ifindex, inaddr)
-        ipv4.SetMetric(ifindex, 1)
-        ipv4.SetUp(ifindex)
-
-def _add_static_route(ns3, static_routing, 
-        address, netprefix, nexthop_address, ifindex):
-    if netprefix == 0:
-        # Default route: 0.0.0.0/0
-        static_routing.SetDefaultRoute(nexthop_address, ifindex) 
-    elif netprefix == 32:
-        # Host route: x.y.z.w/32
-        static_routing.AddHostRouteTo(address, nexthop_address, ifindex) 
-    else:
-        # Network route: x.y.z.w/n
-        mask = ns3.Ipv4Mask("/%d" % netprefix) 
-        static_routing.AddNetworkRouteTo(address, mask, nexthop_address, 
-                ifindex) 
-
-def _add_static_route_if(ns3, static_routing, address, netprefix, ifindex):
-    if netprefix == 0:
-        # Default route: 0.0.0.0/0
-        static_routing.SetDefaultRoute(ifindex) 
-    elif netprefix == 32:
-        # Host route: x.y.z.w/32
-        static_routing.AddHostRouteTo(address, ifindex) 
-    else:
-        # Network route: x.y.z.w/n
-        mask = ns3.Ipv4Mask("/%d" % netprefix) 
-        static_routing.AddNetworkRouteTo(address, mask, ifindex) 
-
-def configure_node(testbed_instance, guid):
-    configure_traces(testbed_instance, guid)
-
-    element = testbed_instance._elements[guid]
-    if not guid in testbed_instance._add_route:
-        return
-    # search for the Ipv4L3Protocol asociated with the device
-    ipv4_guid = _get_ipv4_protocol_guid(testbed_instance, guid)
-    ipv4 = testbed_instance._elements[ipv4_guid]
-    list_routing = ipv4.GetRoutingProtocol()
-    (static_routing, priority) = list_routing.GetRoutingProtocol(0)
-    ns3 = testbed_instance.ns3
-    routes = testbed_instance._add_route[guid]
-    for route in routes:
-        (destination, netprefix, nexthop, metric, device) = route
-        address = ns3.Ipv4Address(destination)
-        if nexthop:
-            nexthop_address = ns3.Ipv4Address(nexthop)
-            ifindex = -1
-            # TODO: HACKISH way of getting the ifindex... improve this
-            nifaces = ipv4.GetNInterfaces()
-            for ifidx in xrange(nifaces):
-                iface = ipv4.GetInterface(ifidx)
-                naddress = iface.GetNAddresses()
-                for addridx in xrange(naddress):
-                    ifaddr = iface.GetAddress(addridx)
-                    ifmask = ifaddr.GetMask()
-                    ifindex = ipv4.GetInterfaceForPrefix(nexthop_address, ifmask)
-                    if ifindex == ifidx:
-                        break
-                if ifindex == ifidx:
-                    break
-            if ifindex < 0:
-                # Check previous ptp routes
-                for chaindest, chainprefix, chainhop, metric, device in routes:
-                    if chaindest == nexthop and chainprefix == 32:
-                        chainhop_address = ns3.Ipv4Address(chainhop)
-                        for ifidx in xrange(nifaces):
-                            iface = ipv4.GetInterface(ifidx)
-                            naddress = iface.GetNAddresses()
-                            for addridx in xrange(naddress):
-                                ifaddr = iface.GetAddress(addridx)
-                                ifmask = ifaddr.GetMask()
-                                ifindex = ipv4.GetInterfaceForPrefix(chainhop_address, ifmask)
-                                if ifindex == ifidx:
-                                    break
-            if ifindex < 0:
-                raise RuntimeError, "Cannot associate interface for routing entry:" \
-                    "%s/%s -> %s. At node %s" % (destination, netprefix, nexthop, guid)
-            _add_static_route(ns3, static_routing, 
-                address, netprefix, nexthop_address, ifindex)
-        else:
-            mask = ns3.Ipv4Mask("/%d" % netprefix) 
-            ifindex = ipv4.GetInterfaceForPrefix(address, mask)
-            if ifindex < 0:
-                raise RuntimeError, "Cannot associate interface for routing entry:" \
-                    "%s/%s -> %s. At node %s" % (destination, netprefix, nexthop, guid)
-            _add_static_route_if(ns3, static_routing, 
-                address, netprefix, nexthop_address, ifindex)
-
-def configure_station(testbed_instance, guid):
-    configure_device(testbed_instance, guid)
-    element = testbed_instance._elements[guid]
-    element.Start()
-
-###  Factories  ###
-
-factories_create_order = ["ns3::BasicEnergySource",
-    "ns3::WifiRadioEnergyModel",
-    "ns3::BSSchedulerRtps",
-    "ns3::BSSchedulerSimple",
-    "ns3::UdpTraceClient",
-    "ns3::UdpServer",
-    "ns3::UdpClient",
-    "ns3::FlowMonitor",
-    "ns3::Radvd",
-    "ns3::Ping6",
-    "ns3::flame::FlameProtocol",
-    "ns3::flame::FlameRtable",
-    "ns3::dot11s::AirtimeLinkMetricCalculator",
-    "ns3::dot11s::HwmpProtocol",
-    "ns3::dot11s::HwmpRtable",
-    "ns3::dot11s::PeerManagementProtocol",
-    "ns3::dot11s::PeerLink",
-    "ns3::MeshWifiInterfaceMac",
-    "ns3::MeshPointDevice",
-    "ns3::UanMacRcGw",
-    "ns3::UanMacRc",
-    "ns3::UanPhyCalcSinrDual",
-    "ns3::UanPhyPerGenDefault",
-    "ns3::UanPhyDual",
-    "ns3::UanPropModelThorp",
-    "ns3::UanMacCw",
-    "ns3::UanNoiseModelDefault",
-    "ns3::UanMacAloha",
-    "ns3::UanPropModelIdeal",
-    "ns3::UanTransducerHd",
-    "ns3::UanPhyCalcSinrDefault",
-    "ns3::UanPhyGen",
-    "ns3::UanPhyCalcSinrFhFsk",
-    "ns3::UanPhyPerUmodem",
-    "ns3::UanChannel",
-    "ns3::V4Ping",
-    "ns3::AthstatsWifiTraceSink",
-    "ns3::FlameStack",
-    "ns3::Dot11sStack",
-    "ns3::NonCommunicatingNetDevice",
-    "ns3::HalfDuplexIdealPhy",
-    "ns3::AlohaNoackNetDevice",
-    "ns3::SpectrumAnalyzer",
-    "ns3::WaveformGenerator",
-    "ns3::MultiModelSpectrumChannel",
-    "ns3::SingleModelSpectrumChannel",
-    "ns3::MsduStandardAggregator",
-    "ns3::EdcaTxopN",
-    "ns3::StaWifiMac",
-    "ns3::ApWifiMac",
-    "ns3::MinstrelWifiManager",
-    "ns3::CaraWifiManager",
-    "ns3::AarfcdWifiManager",
-    "ns3::OnoeWifiManager",
-    "ns3::AmrrWifiManager",
-    "ns3::ConstantRateWifiManager",
-    "ns3::IdealWifiManager",
-    "ns3::AarfWifiManager",
-    "ns3::ArfWifiManager",
-    "ns3::WifiNetDevice",
-    "ns3::AdhocWifiMac",
-    "ns3::DcaTxop",
-    "ns3::WifiMacQueue",
-    "ns3::YansWifiChannel",
-    "ns3::YansWifiPhy",
-    "ns3::NistErrorRateModel",
-    "ns3::YansErrorRateModel",
-    "ns3::WaypointMobilityModel",
-    "ns3::ConstantAccelerationMobilityModel",
-    "ns3::RandomDirection2dMobilityModel",
-    "ns3::RandomWalk2dMobilityModel",
-    "ns3::SteadyStateRandomWaypointMobilityModel",
-    "ns3::RandomWaypointMobilityModel",
-    "ns3::GaussMarkovMobilityModel",
-    "ns3::ConstantVelocityMobilityModel",
-    "ns3::ConstantPositionMobilityModel",
-    "ns3::ListPositionAllocator",
-    "ns3::GridPositionAllocator",
-    "ns3::RandomRectanglePositionAllocator",
-    "ns3::RandomBoxPositionAllocator",
-    "ns3::RandomDiscPositionAllocator",
-    "ns3::UniformDiscPositionAllocator",
-    "ns3::HierarchicalMobilityModel",
-    "ns3::aodv::RoutingProtocol",
-    "ns3::olsr::RoutingProtocol",
-    "ns3::UdpEchoClient",
-    "ns3::PacketSink",
-    "ns3::OnOffApplication",
-    "ns3::VirtualNetDevice",
-    "ns3::FdNetDevice",
-    "ns3::Nepi::TunChannel",
-    "ns3::TapBridge",
-    "ns3::BridgeChannel",
-    "ns3::BridgeNetDevice",
-    "ns3::EmuNetDevice",
-    "ns3::CsmaChannel",
-    "ns3::CsmaNetDevice",
-    "ns3::PointToPointRemoteChannel",
-    "ns3::PointToPointChannel",
-    "ns3::PointToPointNetDevice",
-    "ns3::NscTcpL4Protocol",
-    "ns3::Icmpv6L4Protocol",
-    "ns3::Ipv6OptionPad1",
-    "ns3::Ipv6OptionPadn",
-    "ns3::Ipv6OptionJumbogram",
-    "ns3::Ipv6OptionRouterAlert",
-    "ns3::Ipv6ExtensionHopByHop",
-    "ns3::Ipv6ExtensionDestination",
-    "ns3::Ipv6ExtensionFragment",
-    "ns3::Ipv6ExtensionRouting",
-    "ns3::Ipv6ExtensionLooseRouting",
-    "ns3::Ipv6ExtensionESP",
-    "ns3::Ipv6ExtensionAH",
-    "ns3::Ipv6L3Protocol",
-    "ns3::LoopbackNetDevice",
-    "ns3::Icmpv4L4Protocol",
-    "ns3::RttMeanDeviation",
-    "ns3::ArpL3Protocol",
-    "ns3::TcpL4Protocol",
-    "ns3::UdpL4Protocol",
-    "ns3::Ipv4L3Protocol",
-    "ns3::SimpleNetDevice",
-    "ns3::SimpleChannel",
-    "ns3::PacketSocket",
-    "ns3::DropTailQueue",
-    "ns3::Node",
-    "ns3::FriisSpectrumPropagationLossModel",
-    "ns3::Cost231PropagationLossModel",
-    "ns3::JakesPropagationLossModel",
-    "ns3::RandomPropagationLossModel",
-    "ns3::FriisPropagationLossModel",
-    "ns3::TwoRayGroundPropagationLossModel",
-    "ns3::LogDistancePropagationLossModel",
-    "ns3::ThreeLogDistancePropagationLossModel",
-    "ns3::NakagamiPropagationLossModel",
-    "ns3::FixedRssLossModel",
-    "ns3::MatrixPropagationLossModel",
-    "ns3::Nepi::MobilityPair",
-    "ns3::RangePropagationLossModel",
-    "ns3::RandomPropagationDelayModel",
-    "ns3::ConstantSpeedPropagationDelayModel",
-    "ns3::RateErrorModel",
-    "ns3::ListErrorModel",
-    "ns3::ReceiveListErrorModel",
-    "ns3::PacketBurst",
-    "ns3::EnergySourceContainer",
-    "ns3::BSSchedulerRtps",
-    "ns3::BSSchedulerSimple",
-    "ns3::SimpleOfdmWimaxChannel",
-    "ns3::SimpleOfdmWimaxPhy",
-    "ns3::UplinkSchedulerMBQoS",
-    "ns3::UplinkSchedulerRtps",
-    "ns3::UplinkSchedulerSimple",
-    "ns3::IpcsClassifierRecord",
-    "ns3::ServiceFlow",
-    "ns3::BaseStationNetDevice",
-    "ns3::SubscriberStationNetDevice",
- ]
-
-factories_configure_order = ["ns3::BasicEnergySource",
-    "ns3::WifiRadioEnergyModel",
-    "ns3::BSSchedulerRtps",
-    "ns3::BSSchedulerSimple",
-    "ns3::UdpTraceClient",
-    "ns3::UdpServer",
-    "ns3::UdpClient",
-    "ns3::FlowMonitor",
-    "ns3::Radvd",
-    "ns3::Ping6",
-    "ns3::flame::FlameProtocol",
-    "ns3::flame::FlameRtable",
-    "ns3::dot11s::AirtimeLinkMetricCalculator",
-    "ns3::dot11s::HwmpProtocol",
-    "ns3::dot11s::HwmpRtable",
-    "ns3::dot11s::PeerManagementProtocol",
-    "ns3::dot11s::PeerLink",
-    "ns3::MeshWifiInterfaceMac",
-    "ns3::MeshPointDevice",
-    "ns3::UanMacRcGw",
-    "ns3::UanMacRc",
-    "ns3::UanPhyCalcSinrDual",
-    "ns3::UanPhyPerGenDefault",
-    "ns3::UanPhyDual",
-    "ns3::UanPropModelThorp",
-    "ns3::UanMacCw",
-    "ns3::UanNoiseModelDefault",
-    "ns3::UanMacAloha",
-    "ns3::UanPropModelIdeal",
-    "ns3::UanTransducerHd",
-    "ns3::UanPhyCalcSinrDefault",
-    "ns3::UanPhyGen",
-    "ns3::UanPhyCalcSinrFhFsk",
-    "ns3::UanPhyPerUmodem",
-    "ns3::UanChannel",
-    "ns3::V4Ping",
-    "ns3::AthstatsWifiTraceSink",
-    "ns3::FlameStack",
-    "ns3::Dot11sStack",
-    "ns3::NonCommunicatingNetDevice",
-    "ns3::HalfDuplexIdealPhy",
-    "ns3::AlohaNoackNetDevice",
-    "ns3::SpectrumAnalyzer",
-    "ns3::WaveformGenerator",
-    "ns3::MultiModelSpectrumChannel",
-    "ns3::SingleModelSpectrumChannel",
-    "ns3::MsduStandardAggregator",
-    "ns3::EdcaTxopN",
-    "ns3::StaWifiMac",
-    "ns3::ApWifiMac",
-    "ns3::MinstrelWifiManager",
-    "ns3::CaraWifiManager",
-    "ns3::AarfcdWifiManager",
-    "ns3::OnoeWifiManager",
-    "ns3::AmrrWifiManager",
-    "ns3::ConstantRateWifiManager",
-    "ns3::IdealWifiManager",
-    "ns3::AarfWifiManager",
-    "ns3::ArfWifiManager",
-    "ns3::WifiNetDevice",
-    "ns3::AdhocWifiMac",
-    "ns3::DcaTxop",
-    "ns3::WifiMacQueue",
-    "ns3::YansWifiChannel",
-    "ns3::YansWifiPhy",
-    "ns3::NistErrorRateModel",
-    "ns3::YansErrorRateModel",
-    "ns3::WaypointMobilityModel",
-    "ns3::ConstantAccelerationMobilityModel",
-    "ns3::RandomDirection2dMobilityModel",
-    "ns3::RandomWalk2dMobilityModel",
-    "ns3::SteadyStateRandomWaypointMobilityModel",
-    "ns3::RandomWaypointMobilityModel",
-    "ns3::GaussMarkovMobilityModel",
-    "ns3::ConstantVelocityMobilityModel",
-    "ns3::ConstantPositionMobilityModel",
-    "ns3::ListPositionAllocator",
-    "ns3::GridPositionAllocator",
-    "ns3::RandomRectanglePositionAllocator",
-    "ns3::RandomBoxPositionAllocator",
-    "ns3::RandomDiscPositionAllocator",
-    "ns3::UniformDiscPositionAllocator",
-    "ns3::HierarchicalMobilityModel",
-    "ns3::aodv::RoutingProtocol",
-    "ns3::olsr::RoutingProtocol",
-    "ns3::UdpEchoServer",
-    "ns3::UdpEchoClient",
-    "ns3::PacketSink",
-    "ns3::OnOffApplication",
-    "ns3::VirtualNetDevice",
-    "ns3::FdNetDevice",
-    "ns3::Nepi::TunChannel",
-    "ns3::TapBridge",
-    "ns3::BridgeChannel",
-    "ns3::BridgeNetDevice",
-    "ns3::EmuNetDevice",
-    "ns3::CsmaChannel",
-    "ns3::CsmaNetDevice",
-    "ns3::PointToPointRemoteChannel",
-    "ns3::PointToPointChannel",
-    "ns3::PointToPointNetDevice",
-    "ns3::BaseStationNetDevice",
-    "ns3::SubscriberStationNetDevice",
-    "ns3::NscTcpL4Protocol",
-    "ns3::Icmpv6L4Protocol",
-    "ns3::Ipv6OptionPad1",
-    "ns3::Ipv6OptionPadn",
-    "ns3::Ipv6OptionJumbogram",
-    "ns3::Ipv6OptionRouterAlert",
-    "ns3::Ipv6ExtensionHopByHop",
-    "ns3::Ipv6ExtensionDestination",
-    "ns3::Ipv6ExtensionFragment",
-    "ns3::Ipv6ExtensionRouting",
-    "ns3::Ipv6ExtensionLooseRouting",
-    "ns3::Ipv6ExtensionESP",
-    "ns3::Ipv6ExtensionAH",
-    "ns3::Ipv6L3Protocol",
-    "ns3::LoopbackNetDevice",
-    "ns3::Icmpv4L4Protocol",
-    "ns3::RttMeanDeviation",
-    "ns3::ArpL3Protocol",
-    "ns3::TcpL4Protocol",
-    "ns3::UdpL4Protocol",
-    "ns3::Ipv4L3Protocol",
-    "ns3::SimpleNetDevice",
-    "ns3::SimpleChannel",
-    "ns3::PacketSocket",
-    "ns3::DropTailQueue",
-    "ns3::Node",
-    "ns3::FriisSpectrumPropagationLossModel",
-    "ns3::Cost231PropagationLossModel",
-    "ns3::JakesPropagationLossModel",
-    "ns3::RandomPropagationLossModel",
-    "ns3::FriisPropagationLossModel",
-    "ns3::TwoRayGroundPropagationLossModel",
-    "ns3::LogDistancePropagationLossModel",
-    "ns3::ThreeLogDistancePropagationLossModel",
-    "ns3::NakagamiPropagationLossModel",
-    "ns3::FixedRssLossModel",
-    "ns3::MatrixPropagationLossModel",
-    "ns3::Nepi::MobilityPair",
-    "ns3::RangePropagationLossModel",
-    "ns3::RandomPropagationDelayModel",
-    "ns3::ConstantSpeedPropagationDelayModel",
-    "ns3::RateErrorModel",
-    "ns3::ListErrorModel",
-    "ns3::ReceiveListErrorModel",
-    "ns3::PacketBurst",
-    "ns3::EnergySourceContainer",
-    "ns3::BSSchedulerRtps",
-    "ns3::BSSchedulerSimple",
-    "ns3::SimpleOfdmWimaxChannel",
-    "ns3::SimpleOfdmWimaxPhy",
-    "ns3::UplinkSchedulerMBQoS",
-    "ns3::UplinkSchedulerRtps",
-    "ns3::UplinkSchedulerSimple",
-    "ns3::IpcsClassifierRecord",
-    "ns3::ServiceFlow",
- ]
-
-
-factories_info = dict({
-    "ns3::Ping6": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxPackets",
-            "Interval",
-            "RemoteIpv6",
-            "LocalIpv6",
-            "PacketSize",
-            "StartTime",
-            "StopTime"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::UdpL4Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["ProtocolNumber"],
-        "tags": [tags.PROTOCOL],
-    }),
-     "ns3::RandomDiscPositionAllocator": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Theta",
-            "Rho",
-            "X",
-            "Y"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::Node": dict({
-        "category": FC.CATEGORY_NODES,
-        "create_function": create_node,
-        "configure_function": configure_node,
-        "help": "",
-        "connector_types": ["devs", "apps", "protos", "mobility"],
-        "box_attributes": ["Up"],
-        "tags": [tags.NODE, tags.ALLOW_ROUTES],
-    }),
-     "ns3::GridPositionAllocator": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["GridWidth",
-            "MinX",
-            "MinY",
-            "DeltaX",
-            "DeltaY",
-            "LayoutType"],
-    }),
-     "ns3::TapBridge": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Mtu",
-            "DeviceName",
-            "Gateway",
-            "IpAddress",
-            "MacAddress",
-            "Netmask",
-            "Start",
-            "Stop"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::FlowMonitor": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxPerHopDelay",
-            "DelayBinWidth",
-            "JitterBinWidth",
-            "PacketSizeBinWidth",
-            "FlowInterruptionsBinWidth",
-            "FlowInterruptionsMinTime"],
-    }),
-     "ns3::ConstantVelocityMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node", "mp"],
-        "box_attributes": ["Position",
-           "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::V4Ping": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["Remote",
-            "Verbose",
-            "Interval",
-            "Size",
-            "StartTime",
-            "StopTime"],
-        "traces": ["rtt"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::dot11s::PeerLink": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["RetryTimeout",
-            "HoldingTimeout",
-            "ConfirmTimeout",
-            "MaxRetries",
-            "MaxBeaconLoss",
-            "MaxPacketFailure"],
-    }),
-     "ns3::PointToPointNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_device,
-        "help": "",
-        "connector_types": ["node", "err", "queue", "chan"],
-        "box_attributes": ["Mtu",
-            "Address",
-            "DataRate",
-            "InterframeGap"],
-        "traces": ["p2ppcap", "p2pascii"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::NakagamiPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Distance1",
-            "Distance2",
-            "m0",
-            "m1",
-            "m2"],
-    }),
-     "ns3::AarfWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["SuccessK",
-            "TimerK",
-            "MaxSuccessThreshold",
-            "MinTimerThreshold",
-            "MinSuccessThreshold",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::Ipv6OptionJumbogram": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["OptionNumber"],
-    }),
-     "ns3::TwoRayGroundPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Lambda",
-            "SystemLoss",
-            "MinDistance",
-            "HeightAboveZ"],
-    }),
-     "ns3::OnOffApplication": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["DataRate",
-            "PacketSize",
-            "Remote",
-            "OnTime",
-            "OffTime",
-            "MaxBytes",
-            "Protocol",
-            "StartTime",
-            "StopTime"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::AdhocWifiMac": dict({
-        "category": FC.CATEGORY_MAC_MODELS,
-        "create_function": create_wifi_mac,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["CtsTimeout",
-            "AckTimeout",
-            "BasicBlockAckTimeout",
-            "CompressedBlockAckTimeout",
-            "Sifs",
-            "EifsNoDifs",
-            "Slot",
-            "Pifs",
-            "MaxPropagationDelay",
-            "Ssid",
-            "Standard",
-            "QosSupported"],
-    }),
-     "ns3::ConstantAccelerationMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node", "mp"],
-        "box_attributes": ["Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::GaussMarkovMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Bounds",
-            "TimeStep",
-            "Alpha",
-            "MeanVelocity",
-            "MeanDirection",
-            "MeanPitch",
-            "NormalVelocity",
-            "NormalDirection",
-            "NormalPitch",
-            "Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::dot11s::HwmpProtocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["RandomStart",
-            "MaxQueueSize",
-            "Dot11MeshHWMPmaxPREQretries",
-            "Dot11MeshHWMPnetDiameterTraversalTime",
-            "Dot11MeshHWMPpreqMinInterval",
-            "Dot11MeshHWMPperrMinInterval",
-            "Dot11MeshHWMPactiveRootTimeout",
-            "Dot11MeshHWMPactivePathTimeout",
-            "Dot11MeshHWMPpathToRootInterval",
-            "Dot11MeshHWMPrannInterval",
-            "MaxTtl",
-            "UnicastPerrThreshold",
-            "UnicastPreqThreshold",
-            "UnicastDataThreshold",
-            "DoFlag",
-            "RfFlag"],
-    }),
-     "ns3::NscTcpL4Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Library",
-          "ProtocolNumber"],
-    }),
-     "ns3::dot11s::AirtimeLinkMetricCalculator": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Dot11sMeshHeaderLength"],
-    }),
-     "ns3::UanMacCw": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["CW",
-           "SlotTime"],
-    }),
-     "ns3::AthstatsWifiTraceSink": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Interval"],
-    }),
-     "ns3::FlameStack": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::UanMacRc": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["RetryRate",
-            "MaxFrames",
-            "QueueLimit",
-            "SIFS",
-            "NumberOfRates",
-            "MinRetryRate",
-            "RetryStep",
-            "NumberOfRetryRates",
-            "MaxPropDelay"],
-    }),
-     "ns3::WaypointMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_waypoint_mobility,
-        "configure_function": configure_element,
-        "help": "Waypoint-based mobility model.",
-        "connector_types": ["node"],
-        "box_attributes": ["WaypointsLeft",
-            "Position",
-            "Velocity",
-            "WaypointList"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::FdNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_device,
-        "help": "Network interface associated to a file descriptor",
-        "connector_types": ["node", "->fd"],
-        "box_attributes": ["Address", 
-            "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher_fdnd"],
-        "traces": ["fdpcap", "fdascii"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::Nepi::TunChannel": dict({
-        "category": FC.CATEGORY_TUNNELS,
-        "create_function": create_tunchannel,
-        "preconfigure_function": preconfigure_tunchannel,
-        "configure_function": postconfigure_tunchannel,
-        "prestart_function": prestart_tunchannel,
-        "help": "Channel to forward FdNetDevice data to "
-                "other TAP interfaces supporting the NEPI tunneling protocol.",
-        "connector_types": ["fd->", "udp", "tcp"],
-        "allow_addresses": False,
-        "box_attributes": ["tun_proto", "tun_addr", "tun_port", "tun_key","tun_cipher"],
-        "tags": [tags.TUNNEL],
-    }),
-     "ns3::CsmaNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_device,
-        "help": "CSMA (carrier sense, multiple access) interface",
-        "connector_types": ["node", "chan", "err", "queue"],
-        "box_attributes": ["Address",
-            "Mtu",
-            "SendEnable",
-            "ReceiveEnable"],
-        "traces": ["csmapcap", "csmapcap_promisc"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::UanPropModelThorp": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["SpreadCoef"],
-    }),
-     "ns3::Icmpv6L4Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["DAD",
-            "ProtocolNumber"],
-    }),
-     "ns3::SimpleNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node", "chan"],
-        "box_attributes": [],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::FriisPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Lambda",
-            "SystemLoss",
-            "MinDistance"],
-    }),
-     "ns3::Ipv6OptionRouterAlert": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["OptionNumber"],
-    }),
-     "ns3::UniformDiscPositionAllocator": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["rho",
-            "X",
-            "Y"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::RandomBoxPositionAllocator": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["X",
-            "Y",
-            "Z"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::Ipv6ExtensionDestination": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::LoopbackNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::ConstantSpeedPropagationDelayModel": dict({
-        "category": FC.CATEGORY_DELAY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["chan"],
-        "box_attributes": ["Speed"],
-    }),
-     "ns3::Ipv6ExtensionHopByHop": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::BridgeChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::Radvd": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["StartTime",
-            "StopTime"],
-    }),
-     "ns3::PacketSocket": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["RcvBufSize"],
-    }),
-     "ns3::flame::FlameProtocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["BroadcastInterval",
-            "MaxCost"],
-    }),
-     "ns3::Cost231PropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Lambda",
-            "Frequency",
-            "BSAntennaHeight",
-            "SSAntennaHeight",
-            "MinDistance"],
-    }),
-     "ns3::Ipv6ExtensionESP": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::CaraWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["ProbeThreshold",
-            "FailureThreshold",
-            "SuccessThreshold",
-            "Timeout",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    
-    }),
-     "ns3::RttMeanDeviation": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Gain",
-            "MaxMultiplier",
-            "InitialEstimation",
-            "MinRTO"],
-    }),
-     "ns3::Icmpv4L4Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["ProtocolNumber"],
-    }),
-     "ns3::WaveformGenerator": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Period",
-            "DutyCycle"],
-    }),
-     "ns3::YansWifiChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["phys", "delay", "loss"],
-        "box_attributes": [],
-    }),
-     "ns3::SimpleChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["devs"],
-        "box_attributes": [],
-    }),
-     "ns3::Ipv6ExtensionFragment": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::Dot11sStack": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Root"],
-    }),
-     "ns3::FriisSpectrumPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::RandomRectanglePositionAllocator": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["X",
-           "Y"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::HierarchicalMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::ThreeLogDistancePropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Distance0",
-            "Distance1",
-            "Distance2",
-            "Exponent0",
-            "Exponent1",
-            "Exponent2",
-            "ReferenceLoss"],
-    }),
-     "ns3::UanNoiseModelDefault": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Wind",
-            "Shipping"],
-    }),
-     "ns3::dot11s::HwmpRtable": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::PacketBurst": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::RandomPropagationDelayModel": dict({
-        "category": FC.CATEGORY_DELAY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Variable"],
-    }),
-     "ns3::ArpL3Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": [],
-    }),
-     "ns3::SteadyStateRandomWaypointMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MinSpeed",
-            "MaxSpeed",
-            "MinPause",
-            "MaxPause",
-            "MinX",
-            "MaxX",
-            "MinY",
-            "MaxY",
-            "Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::BaseStationNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_base_station,
-        "configure_function": configure_station,
-        "help": "Base station for wireless mobile network",
-        "connector_types": ["node", "chan", "phy", "uplnk", "dwnlnk"],
-        "box_attributes": ["InitialRangInterval",
-            "DcdInterval",
-            "UcdInterval",
-            "IntervalT8",
-            "RangReqOppSize",
-            "BwReqOppSize",
-            "MaxRangCorrectionRetries",
-            "Mtu",
-            "RTG",
-            "TTG"],
-        "traces": ["wimaxpcap", "wimaxascii"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::UdpServer": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["Port",
-            "PacketWindowSize",
-            "StartTime",
-            "StopTime"],
-    }),
-     "ns3::AarfcdWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["SuccessK",
-            "TimerK",
-            "MaxSuccessThreshold",
-            "MinTimerThreshold",
-            "MinSuccessThreshold",
-            "MinRtsWnd",
-            "MaxRtsWnd",
-            "TurnOffRtsAfterRateDecrease",
-            "TurnOnRtsAfterRateIncrease",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::UanTransducerHd": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::LogDistancePropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["prev", "next"],
-        "box_attributes": ["Exponent",
-            "ReferenceDistance",
-            "ReferenceLoss"],
-    }),
-     "ns3::EmuNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node", "queue"],
-        "box_attributes": ["Mtu",
-            "Address",
-            "DeviceName",
-            "Start",
-            "Stop",
-            "RxQueueSize"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::Ipv6ExtensionLooseRouting": dict({
-        "category": FC.CATEGORY_ROUTING,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::RandomWaypointMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["Speed",
-            "Pause",
-            "Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::RangePropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxRange"],
-    }),
-     "ns3::AlohaNoackNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Address",
-            "Mtu"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::MatrixPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_matrix_propagation,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["mobpair", "chan"],
-        "box_attributes": ["DefaultLoss"],
-    }),
-    "ns3::Nepi::MobilityPair": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "help": "",
-        "connector_types": ["matrix", "ma", "mb"],
-        "box_attributes": ["Loss", "Symmetric"],
-    }),
-     "ns3::WifiNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_device,
-        "help": "",
-        "connector_types": ["node", "mac", "phy", "manager"],
-        "box_attributes": ["Mtu"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::CsmaChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["devs"],
-        "box_attributes": ["DataRate",
-            "Delay"],
-    }),
-     "ns3::BridgeNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["Mtu",
-           "EnableLearning",
-           "ExpirationTime"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::Ipv6ExtensionRouting": dict({
-        "category": FC.CATEGORY_ROUTING,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::StaWifiMac": dict({
-        "category": FC.CATEGORY_MAC_MODELS,
-        "create_function": create_wifi_mac,
-        "configure_function": configure_element,
-        "help": "Station Wifi MAC Model",
-        "connector_types": ["dev"],
-        "box_attributes": ["ProbeRequestTimeout",
-            "AssocRequestTimeout",
-            "MaxMissedBeacons",
-            "CtsTimeout",
-            "AckTimeout",
-            "BasicBlockAckTimeout",
-            "CompressedBlockAckTimeout",
-            "Sifs",
-            "EifsNoDifs",
-            "Slot",
-            "Pifs",
-            "MaxPropagationDelay",
-            "Ssid",
-            "Standard",
-            "QosSupported"],
-    }),
-     "ns3::UdpEchoClient": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["MaxPackets",
-            "Interval",
-            "RemoteAddress",
-            "RemotePort",
-            "PacketSize",
-            "StartTime",
-            "StopTime"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::UdpClient": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["MaxPackets",
-            "Interval",
-            "RemoteAddress",
-            "RemotePort",
-            "PacketSize",
-            "StartTime",
-            "StopTime"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::PointToPointChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev2"],
-        "box_attributes": ["Delay"],
-    }),
-     "ns3::Ipv6StaticRouting": dict({
-        "category": FC.CATEGORY_ROUTING,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::DropTailQueue": dict({
-        "category": FC.CATEGORY_QUEUES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["MaxPackets",
-           "MaxBytes"],
-    }),
-     "ns3::ConstantPositionMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node", "mp"],
-        "box_attributes": ["Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::FixedRssLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Rss"],
-    }),
-     "ns3::EnergySourceContainer": dict({
-        "category": FC.CATEGORY_ENERGY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::RandomWalk2dMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["Bounds",
-            "Time",
-            "Distance",
-            "Mode",
-            "Direction",
-            "Speed",
-            "Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::ListPositionAllocator": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::dot11s::PeerManagementProtocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxNumberOfPeerLinks",
-            "MaxBeaconShiftValue",
-            "EnableBeaconCollisionAvoidance"],
-    }),
-     "ns3::MeshPointDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Mtu"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::BasicEnergySource": dict({
-        "category": FC.CATEGORY_ENERGY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["BasicEnergySourceInitialEnergyJ",
-            "BasicEnergySupplyVoltageV",
-            "PeriodicEnergyUpdateInterval"],
-    }),
-     "ns3::Ipv6OptionPadn": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["OptionNumber"],
-    }),
-     "ns3::ApWifiMac": dict({
-        "category": FC.CATEGORY_MAC_MODELS,
-        "create_function": create_wifi_mac,
-        "configure_function": configure_element,
-        "help": "Access point Wifi MAC Model",
-        "connector_types": ["dev"],
-        "box_attributes": ["BeaconInterval",
-            "BeaconGeneration",
-            "CtsTimeout",
-            "AckTimeout",
-            "BasicBlockAckTimeout",
-            "CompressedBlockAckTimeout",
-            "Sifs",
-            "EifsNoDifs",
-            "Slot",
-            "Pifs",
-            "MaxPropagationDelay",
-            "Ssid",
-            "Standard",
-            "QosSupported"],
-    }),
-     "ns3::YansErrorRateModel": dict({
-        "category": FC.CATEGORY_ERROR_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::WifiMacQueue": dict({
-        "category": FC.CATEGORY_QUEUES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxPacketNumber",
-           "MaxDelay"],
-    }),
-     "ns3::NonCommunicatingNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::RateErrorModel": dict({
-        "category": FC.CATEGORY_ERROR_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ErrorUnit",
-            "ErrorRate",
-            "RanVar",
-            "IsEnabled"],
-    }),
-     "ns3::MeshWifiInterfaceMac": dict({
-        "category": FC.CATEGORY_MAC_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["BeaconInterval",
-            "RandomStart",
-            "BeaconGeneration",
-            "CtsTimeout",
-            "AckTimeout",
-            "BasicBlockAckTimeout",
-            "CompressedBlockAckTimeout",
-            "Sifs",
-            "EifsNoDifs",
-            "Slot",
-            "Pifs",
-            "MaxPropagationDelay",
-            "Ssid"],
-    }),
-     "ns3::UanPhyCalcSinrDual": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::Ipv6ExtensionAH": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["ExtensionNumber"],
-    }),
-     "ns3::SingleModelSpectrumChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::YansWifiPhy": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_wifi_phy,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev", "err", "chan"],
-        "box_attributes": ["EnergyDetectionThreshold",
-            "CcaMode1Threshold",
-            "TxGain",
-            "RxGain",
-            "TxPowerLevels",
-            "TxPowerEnd",
-            "TxPowerStart",
-            "RxNoiseFigure",
-            "ChannelSwitchDelay",
-            "ChannelNumber",
-            "Standard"],
-        "traces": ["yanswifipcap"]
-    }),
-     "ns3::WifiRadioEnergyModel": dict({
-        "category": FC.CATEGORY_ENERGY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["TxCurrentA",
-            "RxCurrentA",
-            "IdleCurrentA",
-            "SleepCurrentA"],
-    }),
-     "ns3::EdcaTxopN": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["BlockAckThreshold",
-            "MinCw",
-            "MaxCw",
-            "Aifsn"],
-    }),
-     "ns3::UanPhyPerGenDefault": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Threshold"],
-    }),
-     "ns3::IdealWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["BerThreshold",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::MultiModelSpectrumChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::HalfDuplexIdealPhy": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Rate"],
-    }),
-     "ns3::UanPhyCalcSinrDefault": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::ReceiveListErrorModel": dict({
-        "category": FC.CATEGORY_ERROR_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["IsEnabled"],
-    }),
-     "ns3::SpectrumAnalyzer": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Resolution",
-        "NoisePowerSpectralDensity"],
-    }),
-     "ns3::ConstantRateWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["DataMode",
-            "ControlMode",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::Ipv6OptionPad1": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["OptionNumber"],
-    }),
-     "ns3::UdpTraceClient": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["RemoteAddress",
-            "RemotePort",
-            "MaxPacketSize",
-            "StartTime",
-            "StopTime"],
-    }),
-     "ns3::RraaWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["Basic",
-            "Timeout",
-            "ewndFor54mbps",
-            "ewndFor48mbps",
-            "ewndFor36mbps",
-            "ewndFor24mbps",
-            "ewndFor18mbps",
-            "ewndFor12mbps",
-            "ewndFor9mbps",
-            "ewndFor6mbps",
-            "poriFor48mbps",
-            "poriFor36mbps",
-            "poriFor24mbps",
-            "poriFor18mbps",
-            "poriFor12mbps",
-            "poriFor9mbps",
-            "poriFor6mbps",
-            "pmtlFor54mbps",
-            "pmtlFor48mbps",
-            "pmtlFor36mbps",
-            "pmtlFor24mbps",
-            "pmtlFor18mbps",
-            "pmtlFor12mbps",
-            "pmtlFor9mbps",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::RandomPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Variable"],
-    }),
-     "ns3::UanChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::MinstrelWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["UpdateStatistics",
-            "LookAroundRate",
-            "EWMA",
-            "SegmentSize",
-            "SampleColumn",
-            "PacketLength",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::UanPhyDual": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["CcaThresholdPhy1",
-            "CcaThresholdPhy2",
-            "TxPowerPhy1",
-            "TxPowerPhy2",
-            "RxGainPhy1",
-            "RxGainPhy2",
-            "SupportedModesPhy1",
-            "SupportedModesPhy2"],
-    }),
-     "ns3::ListErrorModel": dict({
-        "category": FC.CATEGORY_ERROR_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["IsEnabled"],
-    }),
-     "ns3::VirtualNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Mtu"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-     "ns3::UanPhyGen": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["CcaThreshold",
-            "RxThreshold",
-            "TxPower",
-            "RxGain",
-            "SupportedModes"],
-    }),
-     "ns3::Ipv6L3Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["DefaultTtl",
-            "IpForward"],
-    }),
-     "ns3::PointToPointRemoteChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Delay"],
-    }),
-     "ns3::UanPhyPerUmodem": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::OnoeWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["UpdatePeriod",
-            "RaiseThreshold",
-            "AddCreditThreshold",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::JakesPropagationLossModel": dict({
-        "category": FC.CATEGORY_LOSS_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["NumberOfRaysPerPath",
-            "NumberOfOscillatorsPerRay",
-            "DopplerFreq",
-            "Distribution"],
-    }),
-     "ns3::PacketSink": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["Local",
-            "Protocol",
-            "StartTime",
-            "StopTime"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::RandomDirection2dMobilityModel": dict({
-        "category": FC.CATEGORY_MOBILITY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["Bounds",
-            "RndSpeed",
-            "Pause",
-            "Position",
-            "Velocity"],
-        "tags": [tags.MOBILE],
-    }),
-     "ns3::UanMacAloha": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::MsduStandardAggregator": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxAmsduSize"],
-    }),
-     "ns3::DcaTxop": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MinCw",
-            "MaxCw",
-            "Aifsn"],
-    }),
-     "ns3::UanPhyCalcSinrFhFsk": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["NumberOfHops"],
-    }),
-     "ns3::UanPropModelIdeal": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": [],
-    }),
-     "ns3::UanMacRcGw": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["MaxReservations",
-            "NumberOfRates",
-            "RetryRate",
-            "MaxPropDelay",
-            "SIFS",
-            "NumberOfNodes",
-            "MinRetryRate",
-            "RetryStep",
-            "NumberOfRetryRates",
-            "TotalRate",
-            "RateStep",
-            "FrameSize"],
-    }),
-     "ns3::NistErrorRateModel": dict({
-        "category": FC.CATEGORY_ERROR_MODELS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["phy"],
-        "box_attributes": [],
-    }),
-     "ns3::Ipv4L3Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_ipv4protocol,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["DefaultTtl",
-            "IpForward",
-            "WeakEsModel"],
-    }),
-     "ns3::aodv::RoutingProtocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["HelloInterval",
-            "RreqRetries",
-            "RreqRateLimit",
-            "NodeTraversalTime",
-            "NextHopWait",
-            "ActiveRouteTimeout",
-            "MyRouteTimeout",
-            "BlackListTimeout",
-            "DeletePeriod",
-            "TimeoutBuffer",
-            "NetDiameter",
-            "NetTraversalTime",
-            "PathDiscoveryTime",
-            "MaxQueueLen",
-            "MaxQueueTime",
-            "AllowedHelloLoss",
-            "GratuitousReply",
-            "DestinationOnly",
-            "EnableHello",
-            "EnableBroadcast"],
-    }),
-     "ns3::TcpL4Protocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "box_attributes": ["RttEstimatorFactory",
-            "ProtocolNumber"],
-    }),
-     "ns3::olsr::RoutingProtocol": dict({
-        "category": FC.CATEGORY_PROTOCOLS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["HelloInterval",
-            "TcInterval",
-            "MidInterval",
-            "HnaInterval",
-            "Willingness"],
-    }),
-     "ns3::UdpEchoServer": dict({
-        "category": FC.CATEGORY_APPLICATIONS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["node"],
-        "stop_function": stop_application,
-        "start_function": start_application,
-        "status_function": status_application,
-        "box_attributes": ["Port",
-           "StartTime",
-           "StopTime"],
-        "tags": [tags.APPLICATION],
-    }),
-     "ns3::AmrrWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["UpdatePeriod",
-            "FailureRatio",
-            "SuccessRatio",
-            "MaxSuccessThreshold",
-            "MinSuccessThreshold",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::ArfWifiManager": dict({
-        "category": FC.CATEGORY_MANAGERS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": ["dev"],
-        "box_attributes": ["TimerThreshold",
-            "SuccessThreshold",
-            "IsLowLatency",
-            "MaxSsrc",
-            "MaxSlrc",
-            "RtsCtsThreshold",
-            "FragmentationThreshold",
-            "NonUnicastMode"],
-    }),
-     "ns3::SubscriberStationNetDevice": dict({
-        "category": FC.CATEGORY_DEVICES,
-        "create_function": create_subscriber_station,
-        "configure_function": configure_station,
-        "help": "Subscriber station for mobile wireless network",
-        "connector_types": ["node", "chan", "phy", "sflows"],
-        "box_attributes": ["LostDlMapInterval",
-            "LostUlMapInterval",
-            "MaxDcdInterval",
-            "MaxUcdInterval",
-            "IntervalT1",
-            "IntervalT2",
-            "IntervalT3",
-            "IntervalT7",
-            "IntervalT12",
-            "IntervalT20",
-            "IntervalT21",
-            "MaxContentionRangingRetries",
-            "Mtu",
-            "RTG",
-            "TTG"],
-        "traces": ["wimaxpcap", "wimaxascii"],
-        "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-    }),
-    "ns3::flame::FlameRtable": dict({
-        "category": "",
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "",
-        "connector_types": [],
-        "box_attributes": ["Lifetime"],
-    }),
-    "ns3::BSSchedulerRtps": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "Simple downlink scheduler for rtPS flows",
-        "connector_types": ["dev"],
-        "box_attributes": [],
-    }),
-    "ns3::BSSchedulerSimple": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_element,
-        "configure_function": configure_element,
-        "help": "simple downlink scheduler for service flows",
-        "connector_types": ["dev"],
-        "box_attributes": [],
-    }),
-    "ns3::SimpleOfdmWimaxChannel": dict({
-        "category": FC.CATEGORY_CHANNELS,
-        "create_function": create_wimax_channel,
-        "configure_function": configure_element,
-        "help": "Wimax channel",
-        "connector_types": ["devs"],
-        "box_attributes": [],
-    }),
-    "ns3::SimpleOfdmWimaxPhy": dict({
-        "category": FC.CATEGORY_PHY_MODELS,
-        "create_function": create_wimax_phy,
-        "configure_function": configure_element,
-        "help": "Wimax Phy",
-        "connector_types": ["dev"],
-        "box_attributes": [],
-    }),
-    "ns3::UplinkSchedulerSimple": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_element_no_constructor,
-        "configure_function": configure_element,
-        "help": "Simple uplink scheduler for service flows",
-        "connector_types": ["dev"],
-        "box_attributes": [],
-    }),
-    "ns3::UplinkSchedulerRtps": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_element_no_constructor,
-        "configure_function": configure_element,
-        "help": "Simple uplink scheduler for rtPS flows",
-        "connector_types": ["dev"],
-        "box_attributes": [],
-    }),
-    "ns3::IpcsClassifierRecord": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_ipcs_classifier_record,
-        "configure_function": configure_element,
-        "help": "Classifier record for service flow",
-        "connector_types": ["sflow"],
-        "box_attributes": ["ClassifierSrcAddress", 
-            "ClassifierSrcMask", 
-            "ClassifierDstAddress",
-            "ClassifierDstMask",
-            "ClassifierSrcPortLow",
-            "ClassifierSrcPortHigh",
-            "ClassifierDstPortLow",
-            "ClassifierDstPortHigh",
-            "ClassifierProtocol",
-            "ClassifierPriority"],
-    }),   
-    "ns3::ServiceFlow": dict({
-        "category": FC.CATEGORY_SERVICE_FLOWS,
-        "create_function": create_service_flow,
-        "configure_function": configure_element,
-        "help": "Service flow for QoS",
-        "connector_types": ["classif", "dev"],
-        "box_attributes": ["ServiceFlowDirection", 
-            "ServiceFlowSchedulingType"],
-    }),   
-})
-        
diff --git a/src/nepi/testbeds/ns3/metadata.py b/src/nepi/testbeds/ns3/metadata.py
deleted file mode 100644 (file)
index 1766e6a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import metadata
-from nepi.util.constants import DeploymentConfiguration as DC
-
-supported_recovery_policies = [
-        DC.POLICY_FAIL,
-        DC.POLICY_RESTART,
-    ]
-
-class MetadataInfo(metadata.MetadataInfo):
-    @property
-    def connector_types(self):
-        from connection_metadata import connector_types
-        return connector_types
-
-    @property
-    def connections(self):
-        from connection_metadata import connections
-        return connections
-
-    @property
-    def attributes(self):
-        from attributes_metadata import attributes
-        return attributes
-
-    @property
-    def traces(self):
-        from traces_metadata import traces
-        return traces
-
-    @property
-    def create_order(self):
-        from factories_metadata import factories_create_order
-        return factories_create_order
-
-    @property
-    def configure_order(self):
-        from factories_metadata import factories_configure_order
-        return factories_configure_order
-
-    @property
-    def factories_info(self):
-        from factories_metadata import factories_info
-        return factories_info
-
-    @property
-    def testbed_attributes(self):
-        from attributes_metadata import testbed_attributes
-        return testbed_attributes
-
-    @property
-    def testbed_id(self):
-        return TESTBED_ID
-
-    @property
-    def testbed_version(self):
-        return TESTBED_VERSION
-
-    @property
-    def supported_recovery_policies(self):
-        return supported_recovery_policies
-
-
diff --git a/src/nepi/testbeds/ns3/ns3_bindings_import.py b/src/nepi/testbeds/ns3/ns3_bindings_import.py
deleted file mode 100644 (file)
index 8214a45..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-from ns.core import *
-from ns.network import *
-from ns.config_store import *
-from ns.internet import *
-from ns.propagation import *
-from ns.point_to_point import *
-from ns.csma import *
-from ns.emu import *
-from ns.bridge import *
-from ns.tap_bridge import *
-from ns.applications import *
-from ns.nix_vector_routing import *
-from ns.olsr import *
-from ns.aodv import *
-from ns.dsdv import *
-#from ns.click import *
-from ns.mobility import *
-from ns.wifi import *
-#from ns.netanim import *
-from ns.stats import *
-from ns.uan import *
-from ns.spectrum import *
-from ns.mesh import *
-from ns.flow_monitor import *
-from ns.wimax import *
-from ns.lte import *
-from ns.mpi import *
-from ns.topology_read import *
-from ns.energy import *
-from ns.tools import *
-#from ns.visualizer import *
-from ns.point_to_point_layout import *
-from ns.fd_net_device import *
diff --git a/src/nepi/testbeds/ns3/traces_metadata.py b/src/nepi/testbeds/ns3/traces_metadata.py
deleted file mode 100644 (file)
index e8859a8..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# -*- coding: utf-8 -*-
-
-traces = dict({
-    "p2ppcap": dict({
-                "name": "P2PPcapTrace",
-                "help": "Trace to sniff packets from a P2P network device"
-              }),
-    "p2pascii": dict({
-                "name": "P2PAsciiTrace",
-                "help": "Ascii trace from a P2P network device"
-              }),
-    "csmapcap_promisc": dict({
-                "name": "CsmaPromiscPcapTrace",
-                "help": "Trace to sniff packets from a Csma network device in promiscuous mode"
-              }),
-    "csmapcap": dict({
-                "name": "CsmaPcapTrace",
-                "help": "Trace to sniff packets from a Csma network device"
-              }),
-    "fdpcap": dict({
-                "name": "FdPcapTrace",
-                "help": "Trace to sniff packets from a file descriptor network device"
-              }),
-    "fdascii": dict({
-                "name": "FdAsciiTrace",
-                "help": "Ascii trace from a file descriptor network device"
-              }),
-    "yanswifipcap": dict({
-                "name": "YansWifiPhyPcapTrace",
-                "help": "Trace to sniff packets from a Wifi network device"
-              }),
-    "wimaxpcap": dict({
-                "name": "WimaxPcapTrace",
-                "help": "Trace to sniff packets from a wimax network station"
-              }),
-    "wimaxascii": dict({
-                "name": "WimaxAsciiTrace",
-                "help": "Ascii trace from a wimax network station"
-              }),
-    "rtt": dict({
-                "name": "Rtt",
-                "help": "Gnuplot-able trace of round trip times"
-    })
-})
diff --git a/src/nepi/testbeds/ns3/util.py b/src/nepi/testbeds/ns3/util.py
deleted file mode 100644 (file)
index a5ee731..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-def _get_ipv4_protocol_guid(testbed_instance, node_guid):
-    # search for the Ipv4L3Protocol asociated with the device
-    protos_guids = testbed_instance.get_connected(node_guid, "protos", "node")
-    if len(protos_guids) == 0:
-        raise RuntimeError("No protocols where found for the node %d" % node_guid)
-    ipv4_guid = None
-    for proto_guid in protos_guids:
-        proto_factory_id = testbed_instance._create[proto_guid]
-        if proto_factory_id == "ns3::Ipv4L3Protocol":
-            ipv4_guid = proto_guid
-            break
-    if not ipv4_guid:
-        raise RuntimeError("No Ipv4L3Protocol associated to node %d. Can't add Ipv4 addresses" % node_guid)
-    return ipv4_guid
-
-def _get_node_guid(testbed_instance, guid):
-    # search for the node asociated with the device
-    node_guids = testbed_instance.get_connected(guid, "node", "devs")
-    if len(node_guids) == 0:
-        raise RuntimeError("Can't instantiate interface %d outside node" % guid)
-    node_guid = node_guids[0]
-    return node_guid
-
-def _get_dev_number(testbed_instance, guid):
-    node_guid = _get_node_guid(testbed_instance, guid)
-    dev_guids = testbed_instance.get_connected(node_guid, "devs", "node")
-    interface_number = 0
-    for guid_ in dev_guids:
-        if guid_ == guid:
-            break
-        interface_number += 1
-    return interface_number
-
diff --git a/src/nepi/testbeds/ns3/validation.py b/src/nepi/testbeds/ns3/validation.py
deleted file mode 100644 (file)
index baf8911..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# TODO: Allow netrefs!
-def is_address(attribute, value):
-    if not isinstance(value, str):
-        return False
-    return True
-
diff --git a/src/nepi/testbeds/omf/__init__.py b/src/nepi/testbeds/omf/__init__.py
deleted file mode 100644 (file)
index d3ef5dd..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_ID
-from execute import TestbedController 
-
diff --git a/src/nepi/testbeds/omf/constants.py b/src/nepi/testbeds/omf/constants.py
deleted file mode 100644 (file)
index dc96b01..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-TESTBED_ID = "omf"
-TESTBED_VERSION = "5.4"
-
diff --git a/src/nepi/testbeds/omf/execute.py b/src/nepi/testbeds/omf/execute.py
deleted file mode 100644 (file)
index 9736955..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import testbed_impl
-from nepi.util.constants import TIME_NOW
-
-from nepi.testbeds.omf.omf_api import OmfAPI
-
-import logging
-import os
-import time
-
-class TestbedController(testbed_impl.TestbedController):
-    def __init__(self):
-        super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
-        self._home = None
-        self._api = None
-        self._logger = logging.getLogger('nepi.testbeds.omf')
-    def do_setup(self):
-        debug = self._attributes.get_attribute_value("enableDebug")
-        if debug:
-            self._logger.setLevel(logging.DEBUG)
-
-        # create home
-        self._home = self._attributes.get_attribute_value("homeDirectory")
-        home = os.path.normpath(self._home)
-        if not os.path.exists(home):
-            os.makedirs(home, 0755)
-
-        # initialize OMF xmpp client
-        slice = self._attributes.get_attribute_value("xmppSlice")
-        host = self._attributes.get_attribute_value("xmppHost")
-        port = self._attributes.get_attribute_value("xmppPort")
-        password = self._attributes.get_attribute_value("xmppPassword")
-
-        xmpp_root = self._attributes.get_attribute_value("xmppRoot")
-        self._api = OmfAPI(slice, host, port, password, xmpp_root)
-        super(TestbedController, self).do_setup()
-
-    @property
-    def api(self):
-        return self._api
-
-    def set(self, guid, name, value, time = TIME_NOW):
-        super(TestbedController, self).set(guid, name, value, time)
-        element = self._elements[guid]
-        if element:
-            try:
-                setattr(element, name, value)
-            except:
-                # We ignore these errors while recovering.
-                # Some attributes are immutable, and setting
-                # them is necessary (to recover the state), but
-                # some are not (they throw an exception).
-                if not self.recovering:
-                    raise
-
-    def get(self, guid, name, time = TIME_NOW):
-        value = super(TestbedController, self).get(guid, name, time)
-        element = self._elements.get(guid)
-        try:
-            return getattr(element, name)
-        except (KeyError, AttributeError):
-            return value
-
-    def shutdown(self):
-        if self.api: 
-            self.api.disconnect()
-
diff --git a/src/nepi/testbeds/omf/metadata.py b/src/nepi/testbeds/omf/metadata.py
deleted file mode 100644 (file)
index fc0f246..0000000
+++ /dev/null
@@ -1,445 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import functools
-import random
-import weakref
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import metadata
-from nepi.core.attributes import Attribute
-from nepi.util import tags, validation, ipaddr2
-from nepi.util.constants import ApplicationStatus as AS, \
-        FactoryCategories as FC, DeploymentConfiguration as DC
-
-##############################################################################
-
-class OmfResource(object):
-    def __init__(self, guid, tc):
-        super(OmfResource, self).__init__()
-        self._tc = weakref.ref(tc)
-        self._guid = guid
-
-    @property
-    def tc(self):
-        return self._tc and self._tc()
-
-    @property
-    def guid(self):
-        return self._guid
-
-    def configure(self):
-        pass
-
-    def start(self):
-        pass
-
-    def stop(self):
-        pass
-
-    def status(self):
-        pass
-
-    def shutdown(self):
-        pass
-
-## NODE #######################################################################
-
-class OmfNode(OmfResource):
-    def __init__(self, guid, tc):
-        super(OmfNode, self).__init__(guid, tc)
-        self.hostname = self.tc._get_parameters(guid)['hostname']
-        self.tc.api.enroll_host(self.hostname)
-
-    def configure(self):
-        routes = self.tc._add_route.get(self.guid, [])
-        iface_guids = self.tc.get_connected(self.guid, "devs", "node")
-       
-        for route in routes:
-            (destination, netprefix, nexthop, metric, device) = route
-            netmask = ipaddr2.ipv4_mask2dot(netprefix)
-
-            # Validate that the interface is associated to the node
-            for iface_guid in iface_guids:
-                iface = self.tc.elements.get(iface_guid)
-                if iface.devname == device:
-                    self.tc.api.execute(self.hostname, 
-                        "Id#%s" % str(random.getrandbits(128)), 
-                        "add -net %s netmask %s dev %s" % (destination, netmask, iface.devname), 
-                        "/sbin/route", # path
-                        None, # env
-                     )
-                    break
-
-## APPLICATION ################################################################
-
-class OmfApplication(OmfResource):
-    def __init__(self, guid, tc):
-        super(OmfApplication, self).__init__(guid, tc)
-        node_guids = tc.get_connected(guid, "node", "apps")
-        if len(node_guids) == 0:
-            raise RuntimeError("Can't instantiate interface %d outside node" % guid)
-
-        self._node_guid = node_guids[0] 
-        self.app_id = None
-        self.arguments = None
-        self.path = None
-        self.env = None
-
-    def start(self):
-        node = self.tc.elements.get(self._node_guid)
-        self.tc.api.execute(node.hostname, 
-                self.appId, 
-                self.arguments, 
-                self.path,
-                self.env)
-
-    def stop(self):
-        node = self.tc.elements.get(self._node_guid)
-        self.tc.api.exit(node.hostname, 
-                self.appId) 
-
-    def status(self):
-        if guid not in testbed_instance.elements.keys():
-            return AS.STATUS_NOT_STARTED
-        return AS.STATUS_RUNNING
-        # TODO!!!!
-        #return AS.STATUS_FINISHED
-
-
-## WIFIIFACE ########################################################
-
-class OmfWifiInterface(OmfResource):
-    alias2name = dict({'w0':'wlan0', 'w1':'wlan1'})
-
-    def __init__(self, guid, tc):
-        super(OmfWifiInterface, self).__init__(guid, tc)
-        node_guids = tc.get_connected(guid, "node", "devs")
-        if len(node_guids) == 0:
-            raise RuntimeError("Can't instantiate interface %d outside node" % guid)
-
-        self._node_guid = node_guids[0] 
-        self.alias = self.tc._get_parameters(self.guid)['alias']
-        self.devname = self.alias2name.get(self.alias)
-        self.mode = None
-        self.type = None
-        self.essid = None
-        self.channel = None
-        self.ip = None
-
-    def __setattr__(self, name, value):
-        super(OmfWifiInterface, self).__setattr__(name, value)
-
-        try:
-            if self.mode and self.type and self.essid and self.channel and self.ip:
-                node = self.tc.elements.get(self._node_guid)    
-                for attrname in ["mode", "type", "essid", "channel", "ip"]:
-                    attrval = getattr(self, attrname)
-                    attrname = "net/%s/%s" % (self.alias, attrname)
-                    self._tc().api.configure(node.hostname, attrname, attrval)
-        except AttributeError:
-            # If the attribute is not yet defined, ignore the error
-            pass
-
-# Factories
-NODE = "Node"
-WIFIIFACE = "WifiInterface"
-CHANNEL = "Channel"
-OMFAPPLICATION = "OmfApplication"
-
-def create(factory, testbed_instance, guid):
-    clazz = OmfResource
-    if factory == NODE:
-        clazz = OmfNode
-    elif factory == OMFAPPLICATION:
-        clazz = OmfApplication
-    elif factory == WIFIIFACE:
-        clazz = OmfWifiInterface
-
-    element = clazz(guid, testbed_instance)
-    #import pdb; pdb.set_trace()
-    testbed_instance._elements[guid] = element
-
-def start(testbed_instance, guid):
-    element = testbed_instance.elements.get(guid)
-    element.start()
-
-def stop(testbed_instance, guid):
-    element = testbed_instance.elements.get(guid)
-    element.stop()
-
-def status(testbed_instance, guid):
-    element = testbed_instance.elements.get(guid)
-    return element.status()
-
-def configure(testbed_instance, guid):
-    element = testbed_instance.elements.get(guid)
-    return element.configure()
-
-### Factory information ###
-
-connector_types = dict({
-    "apps": dict({
-                "help": "Connector from node to applications", 
-                "name": "apps",
-                "max": -1, 
-                "min": 0
-            }),
-    "devs": dict({
-                "help": "Connector to network interfaces", 
-                "name": "devs",
-                "max": -1, 
-                "min": 0
-            }),
-    "chan": dict({
-                "help": "Connector from a device to a channel", 
-                "name": "chan",
-                "max": 1, 
-                "min": 1
-            }),
-    "node": dict({
-                "help": "Connector to a Node", 
-                "name": "node",
-                "max": 1, 
-                "min": 1
-            }),
-   })
-
-connections = [
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, WIFIIFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, WIFIIFACE, "chan"),
-        "to":   (TESTBED_ID, CHANNEL, "devs"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "apps"),
-        "to":   (TESTBED_ID, OMFAPPLICATION, "node"),
-        "can_cross": False
-    }),
- ]
-
-attributes = dict({
-    "appId": dict({
-                "name": "appId",
-                "help": "Application id",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "arguments": dict({
-                "name": "arguments",
-                "help": "Application arguments",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "path": dict({
-                "name": "path",
-                "help": "Path to binary (e.g '/opt/vlc-1.1.13/vlc')",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "env": dict({
-                "name": "env",
-                "help": "String with space separated values of environment variables to set before starting application (e.g 'FOO=foo BAR=bar')",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "hostname": dict({
-                "name": "hostname",
-                "help": "Hostname for the target OMF node",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "mode": dict({
-                "name": "mode",
-                "help": "Corresponds to the OMF attributes net/w0/mode",
-                "type": Attribute.STRING,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_string
-            }),
-    "type": dict({
-                "name": "type",
-                "help": "Corresponds to the OMF attributes net/w0/type",
-                "type": Attribute.STRING,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_string
-            }),
-    "channel": dict({
-                "name": "channel",
-                "help": "Corresponds to the OMF attributes net/w0/channel",
-                "type": Attribute.STRING,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_string
-            }),
-    "essid": dict({
-                "name": "essid",
-                "help": "Corresponds to the OMF attributes net/w0/essid",
-                "type": Attribute.STRING,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_string
-            }),
-    "ip": dict({
-                "name": "ip",
-                "help": "Corresponds to the OMF attributes net/w0/ip",
-                "type": Attribute.STRING,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_ip4_address
-            }),
-    "alias": dict({
-                "name": "alias",
-                "help": "Alias for device (e.g. w0, w1, etc)",
-                "type": Attribute.STRING,
-                "value": "w0",
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_string
-            }),
-    })
-
-traces = dict()
-
-create_order = [ NODE, WIFIIFACE, CHANNEL, OMFAPPLICATION ]
-configure_order = [ WIFIIFACE,  NODE, CHANNEL, OMFAPPLICATION ]
-
-factories_info = dict({
-    NODE: dict({
-            "help": "OMF Node",
-            "category": FC.CATEGORY_NODES,
-            "create_function": functools.partial(create, NODE),
-            "configure_function": configure,
-            "box_attributes": ["hostname"],
-            "connector_types": ["devs", "apps"],
-            "tags": [tags.NODE, tags.ALLOW_ROUTES],
-       }),
-    WIFIIFACE: dict({
-            "help": "Wireless network interface",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": functools.partial(create, WIFIIFACE),
-            "configure_function": configure,
-            "box_attributes": ["mode", "type", "channel", "essid", "ip", "alias"],
-            "connector_types": ["node", "chan"],
-            "tags": [tags.INTERFACE, tags.HAS_ADDRESSES],
-       }),
-    CHANNEL: dict({
-            "help": "Wireless channel",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create,
-            "create_function": functools.partial(create, CHANNEL),
-            "box_attributes": ["mode", "type", "channel", "essid"],
-            "connector_types": ["devs"],
-       }),
-    OMFAPPLICATION: dict({
-            "help": "Generic executable command line application",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": functools.partial(create, OMFAPPLICATION),
-            "start_function": start,
-            "stop_function": stop,
-            "status_function": status,
-            "box_attributes": ["appId", "arguments", "path", "env"],
-            "connector_types": ["node"],
-            "tags": [tags.APPLICATION],
-        }),
-})
-
-testbed_attributes = dict({
-    "enable_debug": dict({
-            "name": "enableDebug",
-            "help": "Enable netns debug output",
-            "type": Attribute.BOOL,
-            "value": False,
-            "validation_function": validation.is_bool
-        }),
-    "xmppSlice": dict({
-                "name": "xmppSlice",
-                "help": "OMF slice",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "xmppHost": dict({
-                "name": "xmppHost",
-                "help": "OMF XMPP server host",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "xmppPort": dict({
-                "name": "xmppPort",
-                "help": "OMF XMPP service port",
-                "type": Attribute.INTEGER,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_integer
-            }),
-    "xmppPassword": dict({
-                "name": "xmppPassword",
-                "help": "OMF XMPP slice password",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "xmppRoot": dict({
-                "name": "xmppRoot",
-                "help": "Root node of the xmpp server pubsub tree",
-                "type": Attribute.STRING,
-                "value": "OMF_5.4",
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    })
-
-supported_recovery_policies = [
-        DC.POLICY_FAIL,
-    ]
-
-class MetadataInfo(metadata.MetadataInfo):
-    @property
-    def connector_types(self):
-        return connector_types
-
-    @property
-    def connections(self):
-        return connections
-
-    @property
-    def attributes(self):
-        return attributes
-
-    @property
-    def traces(self):
-        return traces
-
-    @property
-    def create_order(self):
-        return create_order
-
-    @property
-    def configure_order(self):
-        return configure_order
-
-    @property
-    def factories_info(self):
-        return factories_info
-
-    @property
-    def testbed_attributes(self):
-        return testbed_attributes
-
-    @property
-    def testbed_id(self):
-        return TESTBED_ID
-
-    @property
-    def testbed_version(self):
-        return TESTBED_VERSION
-    
-    @property
-    def supported_recover_policies(self):
-        return supported_recovery_policies
-
diff --git a/src/nepi/testbeds/omf/omf_api.py b/src/nepi/testbeds/omf/omf_api.py
deleted file mode 100644 (file)
index 4eea26e..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-import datetime
-import logging
-import ssl
-import sys
-import time
-
-from nepi.testbeds.omf.omf_client import OMFClient
-from nepi.testbeds.omf.omf_messages import MessageHandler
-
-class OmfAPI(object):
-    def __init__(self, slice, host, port, password, xmpp_root = None):
-        date = datetime.datetime.now().strftime("%Y-%m-%dt%H.%M.%S")
-        tz = -time.altzone if time.daylight != 0 else -time.timezone
-        date += "%+06.2f" % (tz / 3600) # timezone difference is in seconds
-        self._user = "%s-%s" % (slice, date)
-        self._slice = slice
-        self._host = host
-        self._port = port
-        self._password = password
-        self._hostnames = []
-        self._xmpp_root = xmpp_root or "OMF_5.4"
-
-        self._logger = logging.getLogger("nepi.testbeds.omf")
-
-        # OMF xmpp client
-        self._client = None
-        # message handler
-        self._message = None
-
-        if sys.version_info < (3, 0):
-            reload(sys)
-            sys.setdefaultencoding('utf8')
-
-        # instantiate the xmpp client
-        self._init_client()
-
-        # register xmpp nodes for the experiment
-        self._enroll_experiment()
-
-        # register xmpp logger for the experiment
-        self._enroll_logger()
-
-    def _init_client(self):
-        jid = "%s@%s" % (self._user, self._host)
-        xmpp = OMFClient(jid, self._password)
-        # PROTOCOL_SSLv3 required for compatibility with OpenFire
-        xmpp.ssl_version = ssl.PROTOCOL_SSLv3
-
-        if xmpp.connect((self._host, self._port)):
-            xmpp.process(threaded=True)
-            while not xmpp.ready:
-                time.sleep(1)
-            self._client = xmpp
-            self._message = MessageHandler(self._slice, self._user)
-        else:
-            msg = "Unable to connect to the XMPP server."
-            self._logger.error(msg)
-            raise RuntimeError(msg)
-
-    def _enroll_experiment(self):
-        xmpp_node = self._exp_session_id
-        self._client.create(xmpp_node)
-        #print "Create experiment sesion id topics !!" 
-        self._client.subscribe(xmpp_node)
-        #print "Subscribe to experiment sesion id topics !!" 
-
-
-        address = "/%s/%s/%s/%s" % (self._host, self._xmpp_root, self._slice, self._user)
-        print address
-        payload = self._message.newexpfunction(self._user, address)
-        slice_sid = "/%s/%s" % (self._xmpp_root, self._slice)
-        self._client.publish(payload, slice_sid)
-
-    def _enroll_logger(self):
-        xmpp_node = self._logger_session_id
-        self._client.create(xmpp_node)
-        self._client.subscribe(xmpp_node)
-
-        payload = self._message.logfunction("2", 
-                "nodeHandler::NodeHandler", 
-                "INFO", 
-                "OMF Experiment Controller 5.4 (git 529a626)")
-        self._client.publish(payload, xmpp_node)
-
-    def _host_session_id(self, hostname):
-        return "/%s/%s/%s/%s" % (self._xmpp_root, self._slice, self._user, hostname)
-
-    def _host_resource_id(self, hostname):
-        return "/%s/%s/resources/%s" % (self._xmpp_root, self._slice, hostname)
-
-    @property
-    def _exp_session_id(self):
-        return "/%s/%s/%s" % (self._xmpp_root, self._slice, self._user)
-
-    @property
-    def _logger_session_id(self):
-        return "/%s/%s/%s/LOGGER" % (self._xmpp_root, self._slice, self._user)
-
-    def delete(self, hostname):
-        if not hostname in self._hostnames:
-            return
-
-        self._hostnames.remove(hostname)
-
-        xmpp_node = self._host_session_id(hostname)
-        self._client.delete(xmpp_node)
-
-    def enroll_host(self, hostname):
-        if hostname in self._hostnames:
-            return 
-
-        self._hostnames.append(hostname)
-
-        xmpp_node =  self._host_session_id(hostname)
-        self._client.create(xmpp_node)
-        self._client.subscribe(xmpp_node)
-
-        xmpp_node =  self._host_resource_id(hostname)
-        self._client.subscribe(xmpp_node)
-
-        payload = self._message.enrollfunction("1", "*", "1", hostname)
-        self._client.publish(payload, xmpp_node)
-
-    def configure(self, hostname, attribute, value): 
-        payload = self._message.configurefunction(hostname, value, attribute)
-        xmpp_node =  self._host_session_id(hostname)
-        self._client.publish(payload, xmpp_node)
-
-    def execute(self, hostname, app_id, arguments, path, env):
-        payload = self._message.executefunction(hostname, app_id, arguments, path, env)
-        xmpp_node =  self._host_session_id(hostname)
-        self._client.publish(payload, xmpp_node)
-
-    def exit(self, hostname, app_id):
-        payload = self._message.exitfunction(hostname, app_id)
-        xmpp_node =  self._host_session_id(hostname)
-        self._client.publish(payload, xmpp_node)
-
-    def disconnect(self):
-        self._client.delete(self._exp_session_id)
-        self._client.delete(self._logger_session_id)
-
-        for hostname in self._hostnames[:]:
-            self.delete(hostname)
-
-        time.sleep(5)
-        self._client.disconnect()
-
diff --git a/src/nepi/testbeds/omf/omf_client.py b/src/nepi/testbeds/omf/omf_client.py
deleted file mode 100644 (file)
index 9312753..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-import logging
-import sleekxmpp
-from sleekxmpp.exceptions import IqError, IqTimeout
-import traceback
-import xml.etree.ElementTree as ET
-
-class OMFClient(sleekxmpp.ClientXMPP):
-    def __init__(self, jid, password):
-        sleekxmpp.ClientXMPP.__init__(self, jid, password)
-        self._ready = False
-        self._registered = False
-        self._server = None
-
-        self.register_plugin('xep_0077') # In-band registration
-        self.register_plugin('xep_0030')
-        self.register_plugin('xep_0059')
-        self.register_plugin('xep_0060') # PubSub 
-
-        self.add_event_handler("session_start", self.start)
-        self.add_event_handler("register", self.register)
-        self.add_event_handler("pubsub_publish", self.handle_omf_message)
-        
-        self._logger = logging.getLogger("nepi.testbeds.omf")
-
-    @property
-    def ready(self):
-        return self._ready
-
-    def start(self, event):
-        self.send_presence()
-        self._ready = True
-        self._server = "pubsub.%s" % self.boundjid.domain
-
-    def register(self, iq):
-        if self._registered:
-            self._logger.info("%s already registered!" % self.boundjid)
-            return 
-
-        resp = self.Iq()
-        resp['type'] = 'set'
-        resp['register']['username'] = self.boundjid.user
-        resp['register']['password'] = self.password
-
-        try:
-            resp.send(now=True)
-            self._logger.info("Account created for %s!" % self.boundjid)
-            self._registered = True
-        except IqError as e:
-            self._logger.error("Could not register account: %s" %
-                    e.iq['error']['text'])
-        except IqTimeout:
-            self._logger.error("No response from server.")
-
-    def unregister(self):
-        try:
-            self.plugin['xep_0077'].cancel_registration(
-                ifrom=self.boundjid.full)
-            self._logger.info("Account unregistered for %s!" % self.boundjid)
-        except IqError as e:
-            self._logger.error("Could not unregister account: %s" %
-                    e.iq['error']['text'])
-        except IqTimeout:
-            self._logger.error("No response from server.")
-
-    def nodes(self):
-        try:
-            result = self['xep_0060'].get_nodes(self._server)
-            for item in result['disco_items']['items']:
-                self._logger.info(' - %s' % str(item))
-            return result
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not retrieve node list.\ntraceback:\n%s', error)
-
-    def subscriptions(self):
-        try:
-            result = self['xep_0060'].get_subscriptions(self._server)
-                #self.boundjid.full)
-            for node in result['node']:
-                self._logger.info(' - %s' % str(node))
-            return result
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not retrieve subscriptions.\ntraceback:\n%s', error)
-
-    def create(self, node):
-        config = self['xep_0004'].makeForm('submit')
-        config.add_field(var='pubsub#node_type', value='leaf')
-        config.add_field(var='pubsub#notify_retract', value='0')
-        config.add_field(var='pubsub#publish_model', value='open')
-        config.add_field(var='pubsub#persist_items', value='1')
-        config.add_field(var='pubsub#max_items', value='1')
-        config.add_field(var='pubsub#title', value=node)
-
-        try:
-            self['xep_0060'].create_node(self._server, node, config = config)
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not create node: %s\ntraceback:\n%s' % (node, error))
-
-    def delete(self, node):
-        try:
-            self['xep_0060'].delete_node(self._server, node)
-            self._logger.info('Deleted node: %s' % node)
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not delete node: %s\ntraceback:\n%s' % (node, error))
-    
-    def publish(self, data, node):
-        try:
-            result = self['xep_0060'].publish(self._server,node,payload=data)
-            # id = result['pubsub']['publish']['item']['id']
-            # print('Published at item id: %s' % id)
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not publish to: %s\ntraceback:\n%s' \
-                    % (self.boundjid, error))
-
-    def get(self, data):
-        try:
-            result = self['xep_0060'].get_item(self._server, self.boundjid,
-                data)
-            for item in result['pubsub']['items']['substanzas']:
-                self._logger.info('Retrieved item %s: %s' % (item['id'], 
-                    tostring(item['payload'])))
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not retrieve item %s from node %s\ntraceback:\n%s' \
-                    % (data, self.boundjid, error))
-
-    def retract(self, data):
-        try:
-            result = self['xep_0060'].retract(self._server, self.boundjid, data)
-            self._logger.info('Retracted item %s from node %s' % (data, self.boundjid))
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not retract item %s from node %s\ntraceback:\n%s' \
-                    % (data, self.boundjid, error))
-
-    def purge(self):
-        try:
-            result = self['xep_0060'].purge(self._server, self.boundjid)
-            self._logger.info('Purged all items from node %s' % self.boundjid)
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not purge items from node %s\ntraceback:\n%s' \
-                    % (self.boundjid, error))
-
-    def subscribe(self, node):
-        try:
-            result = self['xep_0060'].subscribe(self._server, node)
-            #self._logger.debug('Subscribed %s to node %s' \
-                    #% (self.boundjid.bare, node))
-            self._logger.info('Subscribed %s to node %s' \
-                    % (self.boundjid.user, node))
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not subscribe %s to node %s\ntraceback:\n%s' \
-                    % (self.boundjid.bare, node, error))
-
-    def unsubscribe(self, node):
-        try:
-            result = self['xep_0060'].unsubscribe(self._server, node)
-            self._logger.info('Unsubscribed %s from node %s' % (self.boundjid.bare, node))
-        except:
-            error = traceback.format_exc()
-            self._logger.error('Could not unsubscribe %s from node %s\ntraceback:\n%s' \
-                    % (self.boundjid.bare, node, error))
-
-    def _check_for_tag(self, treeroot, namespaces, tag):
-        for element in treeroot.iter(namespaces+tag):
-            if element.text:
-                return element
-            else : 
-                return None    
-
-    def _check_output(self, treeroot, namespaces):
-        output_param = ["TARGET", "REASON", "PATH", "APPID", "VALUE"]
-        response = ""
-        for elt in output_param:
-            msg = self._check_for_tag(treeroot, namespaces, elt)
-            if msg is not None:
-                response = response + msg.text + ": "
-        #if (log = Debug !!!) :
-        #    deb = self._check_for_tag(treeroot, namespaces, "MESSAGE")
-        #    if deb is not None:
-        #        self._logger.debug(response + deb.text)
-        #else :
-        #    self._logger.info(response)
-        deb = self._check_for_tag(treeroot, namespaces, "MESSAGE")
-        if deb is not None:
-            self._logger.debug(response + deb.text)
-        else :
-            self._logger.debug(response)
-
-    def handle_omf_message(self, iq):
-        namespaces = "{http://jabber.org/protocol/pubsub}"
-        for i in iq['pubsub_event']['items']:
-            root = ET.fromstring(str(i))
-            self._check_output(root, namespaces)
-            #self._logger.debug(i)
-
-
diff --git a/src/nepi/testbeds/omf/omf_messages.py b/src/nepi/testbeds/omf/omf_messages.py
deleted file mode 100644 (file)
index 77c53dc..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-from xml.etree import cElementTree as ET
-
-EXECUTE = "EXECUTE"
-KILL = "KILL"
-STDIN = "STDIN"
-NOOP = "NOOP"
-PM_INSTALL = "PM_INSTALL"
-APT_INSTALL = "APT_INSTALL"
-RPM_INSTALL = "RPM_INSTALL"
-RESET = "RESET"
-REBOOT = "REBOOT"
-MODPROBE = "MODPROBE"
-CONFIGURE = "CONFIGURE"
-LOAD_IMAGE = "LOAD_IMAGE"
-SAVE_IMAGE = "SAVE_IMAGE"
-LOAD_DATA = "LOAD_DATA"
-SET_LINK = "SET_LINK"
-ALIAS = "ALIAS"
-SET_DISCONNECTION = "SET_DISCONNECTION"
-RESTART = "RESTART"
-ENROLL = "ENROLL"
-EXIT = "EXIT" 
-
-class MessageHandler():
-    SliceID = ""
-    ExpID = ""
-
-    def __init__(self, sliceid, expid ):
-        self.SliceID = sliceid
-        self.ExpID = expid
-        print "init" + self.ExpID +"  "+ self.SliceID
-        pass
-
-    def Mid(self, parent, keyword):
-        mid = ET.SubElement(parent, keyword)
-        mid.set("id", "\'omf-payload\'")
-        return mid
-
-    def Mtext(self, parent, keyword, text):
-        mtext = ET.SubElement(parent, keyword)
-        mtext.text = text
-        return mtext
-
-    def executefunction(self, target, appid, cmdlineargs, path, env):
-        payload = ET.Element("omf-message")
-        execute = self.Mid(payload,"EXECUTE")
-        env = self.Mtext(execute, "ENV", env)
-        sliceid = self.Mtext(execute,"SLICEID",self.SliceID)
-        expid = self.Mtext(execute,"EXPID",self.ExpID)
-        target = self.Mtext(execute,"TARGET",target)
-        appid = self.Mtext(execute,"APPID",appid)
-        cmdlineargs = self.Mtext(execute,"CMDLINEARGS",cmdlineargs)
-        path = self.Mtext(execute,"PATH",path)
-        return payload
-
-    def exitfunction(self, target, appid):
-        payload = ET.Element("omf-message")
-        execute = self.Mid(payload,"EXIT")
-        sliceid = self.Mtext(execute,"SLICEID",self.SliceID)
-        expid = self.Mtext(execute,"EXPID",self.ExpID)
-        target = self.Mtext(execute,"TARGET",target)
-        appid = self.Mtext(execute,"APPID",appid)
-        return payload
-
-    def configurefunction(self, target, value, path):
-        payload = ET.Element("omf-message")
-        config = self.Mid(payload, "CONFIGURE")
-        sliceid = self.Mtext(config,"SLICEID",self.SliceID)
-        expid = self.Mtext(config,"EXPID",self.ExpID)
-        target = self.Mtext(config,"TARGET",target)
-        value = self.Mtext(config,"VALUE",value)
-        path = self.Mtext(config,"PATH",path)
-        return payload
-
-    def logfunction(self,level, logger, level_name, data):
-        payload = ET.Element("omf-message")
-        log = self.Mid(payload, "LOGGING")
-        level = self.Mtext(log,"LEVEL",level)
-        sliceid = self.Mtext(log,"SLICEID",self.SliceID)
-        logger = self.Mtext(log,"LOGGER",logger)
-        expid = self.Mtext(log,"EXPID",self.ExpID)
-        level_name = self.Mtext(log,"LEVEL_NAME",level_name)
-        data = self.Mtext(log,"DATA",data)
-        return payload
-
-    def aliasfunction(self, name, target):
-        payload = ET.Element("omf-message")
-        alias = self.Mid(payload,"ALIAS")
-        sliceid = self.Mtext(alias,"SLICEID",self.SliceID)
-        expid = self.Mtext(alias,"EXPID",self.ExpID)
-        name = self.Mtext(alias,"NAME",name)
-        target = self.Mtext(alias,"TARGET",target)
-        return payload
-
-    def enrollfunction(self, enrollkey, image, index, target ):
-        payload = ET.Element("omf-message")
-        enroll = self.Mid(payload,"ENROLL")
-        enrollkey = self.Mtext(enroll,"ENROLLKEY",enrollkey)
-        sliceid = self.Mtext(enroll,"SLICEID",self.SliceID)
-        image = self.Mtext(enroll,"IMAGE",image)
-        expid = self.Mtext(enroll,"EXPID",self.ExpID)
-        index = self.Mtext(enroll,"INDEX",index)
-        target = self.Mtext(enroll,"TARGET",target)
-        return payload
-
-    def noopfunction(self,target):
-        payload = ET.Element("omf-message")
-        noop = self.Mid(payload,"NOOP")
-        sliceid = self.Mtext(noop,"SLICEID",self.SliceID)
-        expid = self.Mtext(noop,"EXPID",self.ExpID)
-        target = self.Mtext(noop,"TARGET",target)
-        return payload
-
-    def newexpfunction(self, experimentid, address):
-        payload = ET.Element("omf-message")
-        newexp = self.Mid(payload,"EXPERIMENT_NEW")
-        experimentid = self.Mtext(newexp,"EXPERIMENT_ID",experimentid)
-        sliceid = self.Mtext(newexp,"SLICEID",self.SliceID)
-        expid = self.Mtext(newexp,"EXPID",self.ExpID)
-        address = self.Mtext(newexp,"ADDRESS",address)
-        return payload
-
-    def handle_message(self, msg):
-        # Do something!!!
-        return msg
diff --git a/src/nepi/testbeds/planetlab/__init__.py b/src/nepi/testbeds/planetlab/__init__.py
deleted file mode 100644 (file)
index f3e1ac0..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from execute import TestbedController 
-
diff --git a/src/nepi/testbeds/planetlab/application.py b/src/nepi/testbeds/planetlab/application.py
deleted file mode 100644 (file)
index 6fc6fee..0000000
+++ /dev/null
@@ -1,1249 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID
-import plcapi
-import operator
-import os
-import os.path
-import sys
-import nepi.util.server as server
-import cStringIO
-import subprocess
-import rspawn
-import random
-import time
-import socket
-import threading
-import logging
-import re
-
-from nepi.util.constants import ApplicationStatus as AS
-
-_ccnre = re.compile("\s*(udp|tcp)\s+(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\s*$")
-
-class Dependency(object):
-    """
-    A Dependency is in every respect like an application.
-    
-    It depends on some packages, it may require building binaries, it must deploy
-    them...
-    
-    But it has no command. Dependencies aren't ever started, or stopped, and have
-    no status.
-    """
-
-    TRACES = ('buildlog')
-
-    def __init__(self, api=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-        
-        # Attributes
-        self.command = None
-        self.sudo = False
-        
-        self.build = None
-        self.install = None
-        self.depends = None
-        self.buildDepends = None
-        self.sources = None
-        self.rpmFusion = False
-        self.env = {}
-        
-        self.stdin = None
-        self.stdout = None
-        self.stderr = None
-        self.buildlog = None
-        
-        self.add_to_path = True
-        
-        # Those are filled when the app is configured
-        self.home_path = None
-        
-        # Those are filled when an actual node is connected
-        self.node = None
-        
-        # Those are filled when the app is started
-        #   Having both pid and ppid makes it harder
-        #   for pid rollover to induce tracking mistakes
-        self._started = False
-        self._setup = False
-        self._setuper = None
-        self._pid = None
-        self._ppid = None
-
-        # Spanning tree deployment
-        self._master = None
-        self._master_passphrase = None
-        self._master_prk = None
-        self._master_puk = None
-        self._master_token = os.urandom(8).encode("hex")
-        self._build_pid = None
-        self._build_ppid = None
-        
-        # Logging
-        self._logger = logging.getLogger('nepi.testbeds.planetlab')
-        
-    
-    def __str__(self):
-        return "%s<%s>" % (
-            self.__class__.__name__,
-            ' '.join(filter(bool,(self.depends, self.sources)))
-        )
-   
-    def deployed(self):
-        return self._setup
-
-    def validate(self):
-        if self.home_path is None:
-            raise AssertionError, "Misconfigured application: missing home path"
-        if self.node.ident_path is None or not os.access(self.node.ident_path, os.R_OK):
-            raise AssertionError, "Misconfigured application: missing slice SSH key"
-        if self.node is None:
-            raise AssertionError, "Misconfigured application: unconnected node"
-        if self.node.hostname is None:
-            raise AssertionError, "Misconfigured application: misconfigured node"
-        if self.node.slicename is None:
-            raise AssertionError, "Misconfigured application: unspecified slice"
-    
-    def check_bad_host(self, out, err):
-        """
-        Called whenever an operation fails, it's given the output to be checked for
-        telltale signs of unhealthy hosts.
-        """
-        return False
-    
-    def remote_trace_path(self, whichtrace):
-        if whichtrace in self.TRACES:
-            tracefile = os.path.join(self.home_path, whichtrace)
-        else:
-            tracefile = None
-        
-        return tracefile
-
-    def remote_trace_name(self, whichtrace):
-        if whichtrace in self.TRACES:
-            return whichtrace
-        return None
-
-    def sync_trace(self, local_dir, whichtrace):
-        tracefile = self.remote_trace_path(whichtrace)
-        if not tracefile:
-            return None
-        
-        local_path = os.path.join(local_dir, tracefile)
-        
-        # create parent local folders
-        proc = subprocess.Popen(
-            ["mkdir", "-p", os.path.dirname(local_path)],
-            stdout = open("/dev/null","w"),
-            stdin = open("/dev/null","r"))
-
-        if proc.wait():
-            raise RuntimeError, "Failed to synchronize trace"
-        
-        # sync files
-        try:
-            self._popen_scp(
-                '%s@%s:%s' % (self.node.slicename, self.node.hostname,
-                    tracefile),
-                local_path
-                )
-        except RuntimeError, e:
-            raise RuntimeError, "Failed to synchronize trace: %s %s" \
-                    % (e.args[0], e.args[1],)
-        
-        return local_path
-    
-    def recover(self):
-        # We assume a correct deployment, so recovery only
-        # means we mark this dependency as deployed
-        self._setup = True
-
-    def setup(self):
-        self._logger.info("Setting up %s", self)
-        self._make_home()
-        self._launch_build()
-        self._finish_build()
-        self._setup = True
-    
-    def async_setup(self):
-        if not self._setuper:
-            def setuper():
-                try:
-                    self.setup()
-                except:
-                    self._setuper._exc.append(sys.exc_info())
-            self._setuper = threading.Thread(
-                target = setuper)
-            self._setuper._exc = []
-            self._setuper.start()
-    
-    def async_setup_wait(self):
-        if not self._setup:
-            self._logger.info("Waiting for %s to be setup", self)
-            if self._setuper:
-                self._setuper.join()
-                if not self._setup:
-                    if self._setuper._exc:
-                        exctyp,exval,exctrace = self._setuper._exc[0]
-                        raise exctyp,exval,exctrace
-                    else:
-                        raise RuntimeError, "Failed to setup application"
-                else:
-                    self._logger.info("Setup ready: %s at %s", self, self.node.hostname)
-            else:
-                self.setup()
-        
-    def _make_home(self):
-        # Make sure all the paths are created where 
-        # they have to be created for deployment
-        # sync files
-        try:
-            self._popen_ssh_command(
-                "mkdir -p %(home)s && ( rm -f %(home)s/{pid,build-pid,nepi-build.sh} >/dev/null 2>&1 || /bin/true )" \
-                    % { 'home' : server.shell_escape(self.home_path) },
-                timeout = 120,
-                retry = 3
-                )
-        except RuntimeError, e:
-            raise RuntimeError, "Failed to set up application %s: %s %s" % (self.home_path, e.args[0], e.args[1],)
-        
-        if self.stdin:
-            stdin = self.stdin
-            if not os.path.isfile(stdin):
-                stdin = cStringIO.StringIO(self.stdin)
-
-            # Write program input
-            try:
-                self._popen_scp(stdin,
-                    '%s@%s:%s' % (self.node.slicename, self.node.hostname, 
-                        os.path.join(self.home_path, 'stdin') ),
-                    )
-            except RuntimeError, e:
-                raise RuntimeError, "Failed to set up application %s: %s %s" \
-                        % (self.home_path, e.args[0], e.args[1],)
-
-    def _replace_paths(self, command):
-        """
-        Replace all special path tags with shell-escaped actual paths.
-        """
-        # need to append ${HOME} if paths aren't absolute, to MAKE them absolute.
-        root = '' if self.home_path.startswith('/') else "${HOME}/"
-        return ( command
-            .replace("${SOURCES}", root+server.shell_escape(self.home_path))
-            .replace("${BUILD}", root+server.shell_escape(os.path.join(self.home_path,'build'))) )
-
-    def _launch_build(self, trial=0):
-        if self._master is not None:
-            if not trial or self._master_prk is not None:
-                self._do_install_keys()
-            buildscript = self._do_build_slave()
-        else:
-            buildscript = self._do_build_master()
-            
-        if buildscript is not None:
-            self._logger.info("Building %s at %s", self, self.node.hostname)
-            
-            # upload build script
-            try:
-                self._popen_scp(
-                    buildscript,
-                    '%s@%s:%s' % (self.node.slicename, self.node.hostname, 
-                        os.path.join(self.home_path, 'nepi-build.sh') )
-                    )
-            except RuntimeError, e:
-                raise RuntimeError, "Failed to set up application %s: %s %s" \
-                        % (self.home_path, e.args[0], e.args[1],)
-            
-            # launch build
-            self._do_launch_build()
-    
-    def _finish_build(self):
-        self._do_wait_build()
-        self._do_install()
-
-    def _do_build_slave(self):
-        if not self.sources and not self.build:
-            return None
-            
-        # Create build script
-        files = set()
-        
-        if self.sources:
-            sources = self.sources.split(' ')
-            files.update(
-                "%s@%s:%s" % (self._master.node.slicename, self._master.node.hostip, 
-                    os.path.join(self._master.home_path, os.path.basename(source)),)
-                for source in sources
-            )
-        
-        if self.build:
-            files.add(
-                "%s@%s:%s" % (self._master.node.slicename, self._master.node.hostip, 
-                    os.path.join(self._master.home_path, 'build.tar.gz'),)
-            )
-        
-        sshopts = "-o ConnectTimeout=30 -o ConnectionAttempts=3 -o ServerAliveInterval=30 -o TCPKeepAlive=yes"
-        
-        launch_agent = "{ ( echo -e '#!/bin/sh\\ncat' > .ssh-askpass ) && chmod u+x .ssh-askpass"\
-                        " && export SSH_ASKPASS=$(pwd)/.ssh-askpass "\
-                        " && ssh-agent > .ssh-agent.sh ; } && . ./.ssh-agent.sh && ( echo $NEPI_MASTER_PASSPHRASE | ssh-add %(prk)s ) && rm -rf %(prk)s %(puk)s" %  \
-        {
-            'prk' : server.shell_escape(self._master_prk_name),
-            'puk' : server.shell_escape(self._master_puk_name),
-        }
-        
-        kill_agent = "kill $SSH_AGENT_PID"
-        
-        waitmaster = (
-            "{ "
-            "echo 'Checking master reachability' ; "
-            "if ping -c 3 %(master_host)s && (. ./.ssh-agent.sh > /dev/null ; ssh -o UserKnownHostsFile=%(hostkey)s %(sshopts)s %(master)s echo MASTER SAYS HI ) ; then "
-            "echo 'Master node reachable' ; "
-            "else "
-            "echo 'MASTER NODE UNREACHABLE' && "
-            "exit 1 ; "
-            "fi ; "
-            ". ./.ssh-agent.sh ; "
-            "while [[ $(. ./.ssh-agent.sh > /dev/null ; ssh -q -o UserKnownHostsFile=%(hostkey)s %(sshopts)s %(master)s cat %(token_path)s.retcode || /bin/true) != %(token)s ]] ; do sleep 5 ; done ; "
-            "if [[ $(. ./.ssh-agent.sh > /dev/null ; ssh -q -o UserKnownHostsFile=%(hostkey)s %(sshopts)s %(master)s cat %(token_path)s || /bin/true) != %(token)s ]] ; then echo BAD TOKEN ; exit 1 ; fi ; "
-            "}" 
-        ) % {
-            'hostkey' : 'master_known_hosts',
-            'master' : "%s@%s" % (self._master.node.slicename, self._master.node.hostip),
-            'master_host' : self._master.node.hostip,
-            'token_path' : os.path.join(self._master.home_path, 'build.token'),
-            'token' : server.shell_escape(self._master._master_token),
-            'sshopts' : sshopts,
-        }
-        
-        syncfiles = ". ./.ssh-agent.sh && scp -p -o UserKnownHostsFile=%(hostkey)s %(sshopts)s %(files)s ." % {
-            'hostkey' : 'master_known_hosts',
-            'files' : ' '.join(files),
-            'sshopts' : sshopts,
-        }
-        if self.build:
-            syncfiles += " && tar xzf build.tar.gz"
-        syncfiles += " && ( echo %s > build.token )" % (server.shell_escape(self._master_token),)
-        syncfiles += " && ( echo %s > build.token.retcode )" % (server.shell_escape(self._master_token),)
-        syncfiles = "{ . ./.ssh-agent.sh ; %s ; }" % (syncfiles,)
-        
-        cleanup = "{ . ./.ssh-agent.sh ; kill $SSH_AGENT_PID ; rm -rf %(prk)s %(puk)s master_known_hosts .ssh-askpass ; }" % {
-            'prk' : server.shell_escape(self._master_prk_name),
-            'puk' : server.shell_escape(self._master_puk_name),
-        }
-        
-        slavescript = "( ( %(launch_agent)s && %(waitmaster)s && %(syncfiles)s && %(kill_agent)s && %(cleanup)s ) || %(cleanup)s ) ; echo %(token)s > build.token.retcode" % {
-            'waitmaster' : waitmaster,
-            'syncfiles' : syncfiles,
-            'cleanup' : cleanup,
-            'kill_agent' : kill_agent,
-            'launch_agent' : launch_agent,
-            'home' : server.shell_escape(self.home_path),
-            'token' : server.shell_escape(self._master_token),
-        }
-       
-        return cStringIO.StringIO(slavescript)
-         
-    def _do_launch_build(self):
-        script = "bash ./nepi-build.sh"
-        if self._master_passphrase:
-            script = "NEPI_MASTER_PASSPHRASE=%s %s" % (
-                server.shell_escape(self._master_passphrase),
-                script
-            )
-        (out,err),proc = rspawn.remote_spawn(
-            script,
-            pidfile = 'build-pid',
-            home = self.home_path,
-            stdin = '/dev/null',
-            stdout = 'buildlog',
-            stderr = rspawn.STDOUT,
-            
-            host = self.node.hostname,
-            port = None,
-            user = self.node.slicename,
-            agent = None,
-            ident_key = self.node.ident_path,
-            server_key = self.node.server_key,
-            hostip = self.node.hostip,
-            )
-        
-        if proc.wait():
-            if self.check_bad_host(out, err):
-                self.node.blacklist()
-            raise RuntimeError, "Failed to set up build slave %s: %s %s" % (self.home_path, out,err,)
-        
-        
-        pid = ppid = None
-        delay = 1.0
-        for i in xrange(5):
-            pidtuple = rspawn.remote_check_pid(
-                os.path.join(self.home_path,'build-pid'),
-                host = self.node.hostip,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                server_key = self.node.server_key,
-                hostip = self.node.hostip
-                )
-            
-            if pidtuple:
-                pid, ppid = pidtuple
-                self._build_pid, self._build_ppid = pidtuple
-                break
-            else:
-                time.sleep(delay)
-                delay = min(30,delay*1.2)
-        else:
-            raise RuntimeError, "Failed to set up build slave %s: cannot get pid" % (self.home_path,)
-
-        self._logger.info("Deploying %s at %s", self, self.node.hostname)
-        
-    def _do_wait_build(self, trial=0):
-        pid = self._build_pid
-        ppid = self._build_ppid
-        
-        if pid and ppid:
-            delay = 1.0
-            first = True
-            bustspin = 0
-            while True:
-                status = rspawn.remote_status(
-                    pid, ppid,
-                    host = self.node.hostname,
-                    port = None,
-                    user = self.node.slicename,
-                    agent = None,
-                    ident_key = self.node.ident_path,
-                    server_key = self.node.server_key,
-                    hostip = self.node.hostip
-                    )
-                
-                if status is rspawn.FINISHED:
-                    self._build_pid = self._build_ppid = None
-                    break
-                elif status is not rspawn.RUNNING:
-                    self._logger.warn("Busted waiting for %s to finish building at %s %s", self, self.node.hostname,
-                            "(build slave)" if self._master is not None else "(build master)")
-                    bustspin += 1
-                    time.sleep(delay*(5.5+random.random()))
-                    if bustspin > 12:
-                        self._build_pid = self._build_ppid = None
-                        break
-                else:
-                    if first:
-                        self._logger.info("Waiting for %s to finish building at %s %s", self, self.node.hostname,
-                            "(build slave)" if self._master is not None else "(build master)")
-                        
-                        first = False
-                    time.sleep(delay*(0.5+random.random()))
-                    delay = min(30,delay*1.2)
-                    bustspin = 0
-        
-            # check build token
-            slave_token = ""
-            for i in xrange(3):
-                (out, err), proc = self._popen_ssh_command(
-                    "cat %(token_path)s" % {
-                        'token_path' : os.path.join(self.home_path, 'build.token'),
-                    },
-                    timeout = 120,
-                    noerrors = True)
-                if not proc.wait() and out:
-                    slave_token = out.strip()
-                
-                if slave_token:
-                    break
-                else:
-                    time.sleep(2)
-            
-            if slave_token != self._master_token:
-                # Get buildlog for the error message
-
-                (buildlog, err), proc = self._popen_ssh_command(
-                    "cat %(buildlog)s" % {
-                        'buildlog' : os.path.join(self.home_path, 'buildlog'),
-                        'buildscript' : os.path.join(self.home_path, 'nepi-build.sh'),
-                    },
-                    timeout = 120,
-                    noerrors = True)
-                
-                proc.wait()
-                
-                if self.check_bad_host(buildlog, err):
-                    self.node.blacklist()
-                elif self._master and trial < 3 and 'BAD TOKEN' in buildlog or 'BAD TOKEN' in err:
-                    # bad sync with master, may try again
-                    # but first wait for master
-                    self._master.async_setup_wait()
-                    self._launch_build(trial+1)
-                    return self._do_wait_build(trial+1)
-                elif trial < 3:
-                    return self._do_wait_build(trial+1)
-                else:
-                    # No longer need'em
-                    self._master_prk = None
-                    self._master_puk = None
-        
-                    raise RuntimeError, "Failed to set up application %s: "\
-                            "build failed, got wrong token from pid %s/%s "\
-                            "(expected %r, got %r), see buildlog at %s:\n%s" % (
-                        self.home_path, pid, ppid, self._master_token, slave_token, self.node.hostname, buildlog)
-
-            # No longer need'em
-            self._master_prk = None
-            self._master_puk = None
-        
-            self._logger.info("Built %s at %s", self, self.node.hostname)
-
-    def _do_kill_build(self):
-        pid = self._build_pid
-        ppid = self._build_ppid
-        
-        if pid and ppid:
-            self._logger.info("Killing build of %s", self)
-            rspawn.remote_kill(
-                pid, ppid,
-                host = self.node.hostname,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                hostip = self.node.hostip
-                )
-        
-        
-    def _do_build_master(self):
-        if not self.sources and not self.build and not self.buildDepends:
-            return None
-            
-        if self.sources:
-            sources = self.sources.split(' ')
-
-            http_sources = list()
-            for source in list(sources):
-                if source.startswith("http") or source.startswith("https"):
-                    http_sources.append(source)
-                    sources.remove(source)
-
-            # Download http sources
-            try:
-                for source in http_sources:
-                    path = os.path.join(self.home_path, source.split("/")[-1])
-                    command = "wget -o %s %s" % (path, source)
-                    self._popen_ssh(command)
-            except RuntimeError, e:
-                raise RuntimeError, "Failed wget source file %r: %s %s" \
-                        % (sources, e.args[0], e.args[1],)
-
-            # Copy all other sources
-            try:
-                self._popen_scp(
-                    sources,
-                    "%s@%s:%s" % (self.node.slicename, self.node.hostname, 
-                        os.path.join(self.home_path,'.'),)
-                    )
-            except RuntimeError, e:
-                raise RuntimeError, "Failed upload source file %r: %s %s" \
-                        % (sources, e.args[0], e.args[1],)
-            
-        buildscript = cStringIO.StringIO()
-        
-        buildscript.write("(\n")
-        
-        if self.buildDepends:
-            # Install build dependencies
-            buildscript.write(
-                "sudo -S yum -y install %(packages)s\n" % {
-                    'packages' : self.buildDepends
-                }
-            )
-        
-            
-        if self.build:
-            # Build sources
-            buildscript.write(
-                "mkdir -p build && ( cd build && ( %(command)s ) )\n" % {
-                    'command' : self._replace_paths(self.build),
-                    'home' : server.shell_escape(self.home_path),
-                }
-            )
-        
-            # Make archive
-            buildscript.write("tar czf build.tar.gz build\n")
-        
-        # Write token
-        buildscript.write("echo %(master_token)s > build.token ) ; echo %(master_token)s > build.token.retcode" % {
-            'master_token' : server.shell_escape(self._master_token)
-        })
-        
-        buildscript.seek(0)
-
-        return buildscript
-
-    def _do_install(self):
-        if self.install:
-            self._logger.info("Installing %s at %s", self, self.node.hostname)
-            # Install application
-            try:
-                command = "cd %(home)s && cd build && ( %(command)s ) > ${HOME}/%(home)s/installlog 2>&1 || ( tail ${HOME}/%(home)s/{install,build}log >&2 && false )" % \
-                    {
-                    'command' : self._replace_paths(self.install),
-                    'home' : server.shell_escape(self.home_path),
-                    }
-                self._popen_ssh_command(command)
-            except RuntimeError, e:
-                if self.check_bad_host(e.args[0], e.args[1]):
-                    self.node.blacklist()
-                raise RuntimeError, "Failed install build sources on node %s: %s %s. COMMAND %s" % (
-                        self.node.hostname, e.args[0], e.args[1], command)
-
-    def set_master(self, master):
-        self._master = master
-        
-    def install_keys(self, prk, puk, passphrase):
-        # Install keys
-        self._master_passphrase = passphrase
-        self._master_prk = prk
-        self._master_puk = puk
-        self._master_prk_name = os.path.basename(prk.name)
-        self._master_puk_name = os.path.basename(puk.name)
-        
-    def _do_install_keys(self):
-        prk = self._master_prk
-        puk = self._master_puk
-       
-        try:
-            self._popen_scp(
-                [ prk.name, puk.name ],
-                '%s@%s:%s' % (self.node.slicename, self.node.hostname, self.home_path )
-                )
-        except RuntimeError, e:
-            raise RuntimeError, "Failed to set up application deployment keys: %s %s" \
-                    % (e.args[0], e.args[1],)
-
-        try:
-            self._popen_scp(
-                cStringIO.StringIO('%s,%s %s\n' % (
-                    self._master.node.hostname, self._master.node.hostip, 
-                    self._master.node.server_key)),
-                '%s@%s:%s' % (self.node.slicename, self.node.hostname, 
-                    os.path.join(self.home_path,"master_known_hosts") )
-                )
-        except RuntimeError, e:
-            raise RuntimeError, "Failed to set up application deployment keys: %s %s" \
-                    % (e.args[0], e.args[1],)
-        
-    
-    def cleanup(self):
-        # make sure there's no leftover build processes
-        self._do_kill_build()
-        
-        # No longer need'em
-        self._master_prk = None
-        self._master_puk = None
-
-    @server.eintr_retry
-    def _popen_scp(self, src, dst, retry = 3):
-        while 1:
-            try:
-                (out,err),proc = server.popen_scp(
-                    src,
-                    dst, 
-                    port = None,
-                    agent = None,
-                    ident_key = self.node.ident_path,
-                    server_key = self.node.server_key
-                    )
-
-                if server.eintr_retry(proc.wait)():
-                    raise RuntimeError, (out, err)
-                return (out, err), proc
-            except:
-                if retry <= 0:
-                    raise
-                else:
-                    retry -= 1
-  
-
-    @server.eintr_retry
-    def _popen_ssh_command(self, command, retry = 0, noerrors=False, timeout=None):
-        (out,err),proc = server.popen_ssh_command(
-            command,
-            host = self.node.hostname,
-            port = None,
-            user = self.node.slicename,
-            agent = None,
-            ident_key = self.node.ident_path,
-            server_key = self.node.server_key,
-            timeout = timeout,
-            retry = retry
-            )
-
-        if server.eintr_retry(proc.wait)():
-            if not noerrors:
-                raise RuntimeError, (out, err)
-        return (out, err), proc
-
-class Application(Dependency):
-    """
-    An application also has dependencies, but also a command to be ran and monitored.
-    
-    It adds the output of that command as traces.
-    """
-    
-    TRACES = ('stdout','stderr','buildlog', 'output')
-    
-    def __init__(self, api=None):
-        super(Application,self).__init__(api)
-        
-        # Attributes
-        self.command = None
-        self.sudo = False
-        
-        self.stdin = None
-        self.stdout = None
-        self.stderr = None
-        self.output = None
-        
-        # Those are filled when the app is started
-        #   Having both pid and ppid makes it harder
-        #   for pid rollover to induce tracking mistakes
-        self._started = False
-        self._pid = None
-        self._ppid = None
-
-        # Do not add to the python path of nodes
-        self.add_to_path = False
-    
-    def __str__(self):
-        return "%s<command:%s%s>" % (
-            self.__class__.__name__,
-            "sudo " if self.sudo else "",
-            self.command,
-        )
-    
-    def start(self):
-        self._logger.info("Starting %s", self)
-        
-        # Create shell script with the command
-        # This way, complex commands and scripts can be ran seamlessly
-        # sync files
-        command = cStringIO.StringIO()
-        command.write('export PYTHONPATH=$PYTHONPATH:%s\n' % (
-            ':'.join(["${HOME}/"+server.shell_escape(s) for s in self.node.pythonpath])
-        ))
-        command.write('export PATH=$PATH:%s\n' % (
-            ':'.join(["${HOME}/"+server.shell_escape(s) for s in self.node.pythonpath])
-        ))
-        if self.node.env:
-            for envkey, envvals in self.node.env.iteritems():
-                for envval in envvals:
-                    command.write('export %s=%s\n' % (envkey, envval))
-        command.write(self.command)
-        command.seek(0)
-
-        try:
-            self._popen_scp(
-                command,
-                '%s@%s:%s' % (self.node.slicename, self.node.hostname, 
-                    os.path.join(self.home_path, "app.sh"))
-                )
-        except RuntimeError, e:
-            raise RuntimeError, "Failed to set up application: %s %s" \
-                    % (e.args[0], e.args[1],)
-        
-        # Start process in a "daemonized" way, using nohup and heavy
-        # stdin/out redirection to avoid connection issues
-        (out,err),proc = rspawn.remote_spawn(
-            self._replace_paths("bash ./app.sh"),
-            
-            pidfile = './pid',
-            home = self.home_path,
-            stdin = 'stdin' if self.stdin is not None else '/dev/null',
-            stdout = 'stdout' if self.stdout else '/dev/null',
-            stderr = 'stderr' if self.stderr else '/dev/null',
-            sudo = self.sudo,
-            host = self.node.hostname,
-            port = None,
-            user = self.node.slicename,
-            agent = None,
-            ident_key = self.node.ident_path,
-            server_key = self.node.server_key
-            )
-        
-        if proc.wait():
-            if self.check_bad_host(out, err):
-                self.node.blacklist()
-            raise RuntimeError, "Failed to set up application: %s %s" % (out,err,)
-
-        self._started = True
-    
-    def recover(self):
-        # Assuming the application is running on PlanetLab,
-        # proper pidfiles should be present at the app's home path.
-        # So we mark this application as started, and check the pidfiles
-        self._started = True
-        self.checkpid()
-
-    def checkpid(self):            
-        # Get PID/PPID
-        # NOTE: wait a bit for the pidfile to be created
-        if self._started and not self._pid or not self._ppid:
-            pidtuple = rspawn.remote_check_pid(
-                os.path.join(self.home_path,'pid'),
-                host = self.node.hostname,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                server_key = self.node.server_key
-                )
-            
-            if pidtuple:
-                self._pid, self._ppid = pidtuple
-    
-    def status(self):
-        self.checkpid()
-        if not self._started:
-            return AS.STATUS_NOT_STARTED
-        elif not self._pid or not self._ppid:
-            return AS.STATUS_NOT_STARTED
-        else:
-            status = rspawn.remote_status(
-                self._pid, self._ppid,
-                host = self.node.hostname,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                server_key = self.node.server_key
-                )
-            
-            if status is rspawn.NOT_STARTED:
-                return AS.STATUS_NOT_STARTED
-            elif status is rspawn.RUNNING:
-                return AS.STATUS_RUNNING
-            elif status is rspawn.FINISHED:
-                return AS.STATUS_FINISHED
-            else:
-                # WTF?
-                return AS.STATUS_NOT_STARTED
-    
-    def kill(self):
-        status = self.status()
-        if status == AS.STATUS_RUNNING:
-            # kill by ppid+pid - SIGTERM first, then try SIGKILL
-            rspawn.remote_kill(
-                self._pid, self._ppid,
-                host = self.node.hostname,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                server_key = self.node.server_key,
-                sudo = self.sudo
-                )
-            self._logger.info("Killed %s", self)
-
-
-class NepiDependency(Dependency):
-    """
-    This dependency adds nepi itself to the python path,
-    so that you may run testbeds within PL nodes.
-    """
-    
-    # Class attribute holding a *weak* reference to the shared NEPI tar file
-    # so that they may share it. Don't operate on the file itself, it would
-    # be a mess, just use its path.
-    _shared_nepi_tar = None
-    
-    def __init__(self, api = None):
-        super(NepiDependency, self).__init__(api)
-        
-        self._tarball = None
-        
-        self.depends = 'python python-ipaddr python-setuptools'
-        
-        # our sources are in our ad-hoc tarball
-        self.sources = self.tarball.name
-        
-        tarname = os.path.basename(self.tarball.name)
-        
-        # it's already built - just move the tarball into place
-        self.build = "mv -f ${SOURCES}/%s ." % (tarname,)
-        
-        # unpack it into sources, and we're done
-        self.install = "tar xzf ${BUILD}/%s -C .." % (tarname,)
-    
-    @property
-    def tarball(self):
-        if self._tarball is None:
-            shared_tar = self._shared_nepi_tar and self._shared_nepi_tar()
-            if shared_tar is not None:
-                self._tarball = shared_tar
-            else:
-                # Build an ad-hoc tarball
-                # Prebuilt
-                import nepi
-                import tempfile
-                
-                shared_tar = tempfile.NamedTemporaryFile(prefix='nepi-src-', suffix='.tar.gz')
-                
-                proc = subprocess.Popen(
-                    ["tar", "czf", shared_tar.name, 
-                        '-C', os.path.join(os.path.dirname(os.path.dirname(nepi.__file__)),'.'), 
-                        'nepi'],
-                    stdout = open("/dev/null","w"),
-                    stdin = open("/dev/null","r"))
-
-                if proc.wait():
-                    raise RuntimeError, "Failed to create nepi tarball"
-                
-                self._tarball = self._shared_nepi_tar = shared_tar
-                
-        return self._tarball
-
-class NS3Dependency(Dependency):
-    """
-    This dependency adds NS3 libraries to the library paths,
-    so that you may run the NS3 testbed within PL nodes.
-    
-    You'll also need the NepiDependency.
-    """
-    
-    def __init__(self, api = None):
-        super(NS3Dependency, self).__init__(api)
-        
-        self.buildDepends = 'make waf gcc gcc-c++ gccxml unzip bzr'
-        
-        # We have to download the sources, untar, build...
-        #pygccxml_source_url = "http://leaseweb.dl.sourceforge.net/project/pygccxml/pygccxml/pygccxml-1.0/pygccxml-1.0.0.zip"
-        pygccxml_source_url = "http://yans.pl.sophia.inria.fr/libs/pygccxml-1.0.0.zip"
-        ns3_source_url = "http://nepi.inria.fr/code/nepi-ns3.13/archive/tip.tar.gz"
-        passfd_source_url = "http://nepi.inria.fr/code/python-passfd/archive/tip.tar.gz"
-        
-        pybindgen_version = "797"
-
-        self.build =(
-            " ( "
-            "  cd .. && "
-            "  python -c 'import pygccxml, pybindgen, passfd' && "
-            "  test -f lib/ns/_core.so && "
-            "  test -f lib/ns/__init__.py && "
-            "  test -f lib/ns/core.py && "
-            "  test -f lib/libns3-core.so && "
-            "  LD_LIBRARY_PATH=lib PYTHONPATH=lib python -c 'import ns.core' "
-            " ) || ( "
-                # Not working, rebuild
-                     # Archive SHA1 sums to check
-                     "echo '7158877faff2254e6c094bf18e6b4283cac19137  pygccxml-1.0.0.zip' > archive_sums.txt && "
-                     " ( " # check existing files
-                     " sha1sum -c archive_sums.txt && "
-                     " test -f passfd-src.tar.gz && "
-                     " test -f ns3-src.tar.gz "
-                     " ) || ( " # nope? re-download
-                     " rm -rf pybindgen pygccxml-1.0.0.zip passfd-src.tar.gz ns3-src.tar.gz && "
-                     " bzr checkout lp:pybindgen -r %(pybindgen_version)s && " # continue, to exploit the case when it has already been dl'ed
-                     " wget -q -c -O pygccxml-1.0.0.zip %(pygccxml_source_url)s && " 
-                     " wget -q -c -O passfd-src.tar.gz %(passfd_source_url)s && "
-                     " wget -q -c -O ns3-src.tar.gz %(ns3_source_url)s && "  
-                     " sha1sum -c archive_sums.txt " # Check SHA1 sums when applicable
-                     " ) && "
-                     "unzip -n pygccxml-1.0.0.zip && "
-                     "mkdir -p ns3-src && "
-                     "mkdir -p passfd-src && "
-                     "tar xzf ns3-src.tar.gz --strip-components=1 -C ns3-src && "
-                     "tar xzf passfd-src.tar.gz --strip-components=1 -C passfd-src && "
-                     "rm -rf target && "    # mv doesn't like unclean targets
-                     "mkdir -p target && "
-                     "cd pygccxml-1.0.0 && "
-                     "rm -rf unittests docs && " # pygccxml has ~100M of unit tests - excessive - docs aren't needed either
-                     "python setup.py build && "
-                     "python setup.py install --install-lib ${BUILD}/target && "
-                     "python setup.py clean && "
-                     "cd ../pybindgen && "
-                     "export PYTHONPATH=$PYTHONPATH:${BUILD}/target && "
-                     "./waf configure --prefix=${BUILD}/target -d release && "
-                     "./waf && "
-                     "./waf install && "
-                     "./waf clean && "
-                     "mv -f ${BUILD}/target/lib/python*/site-packages/pybindgen ${BUILD}/target/. && "
-                     "rm -rf ${BUILD}/target/lib && "
-                     "cd ../passfd-src && "
-                     "python setup.py build && "
-                     "python setup.py install --install-lib ${BUILD}/target && "
-                     "python setup.py clean && "
-                     "cd ../ns3-src && "
-                     "./waf configure --prefix=${BUILD}/target --with-pybindgen=../pybindgen-src -d release --disable-examples --disable-tests && "
-                     "./waf &&"
-                     "./waf install && "
-                     "rm -f ${BUILD}/target/lib/*.so && "
-                     "cp -a ${BUILD}/ns3-src/build/libns3*.so ${BUILD}/target/lib && "
-                     "cp -a ${BUILD}/ns3-src/build/bindings/python/ns ${BUILD}/target/lib &&"
-                     "./waf clean "
-             " )"
-                     % dict(
-                        pybindgen_version = server.shell_escape(pybindgen_version),
-                        pygccxml_source_url = server.shell_escape(pygccxml_source_url),
-                        ns3_source_url = server.shell_escape(ns3_source_url),
-                        passfd_source_url = server.shell_escape(passfd_source_url),
-                     ))
-        
-        # Just move ${BUILD}/target
-        self.install = (
-            " ( "
-            "  cd .. && "
-            "  python -c 'import pygccxml, pybindgen, passfd' && "
-            "  test -f lib/ns/_core.so && "
-            "  test -f lib/ns/__init__.py && "
-            "  test -f lib/ns/core.py && "
-            "  test -f lib/libns3-core.so && "
-            "  LD_LIBRARY_PATH=lib PYTHONPATH=lib python -c 'import ns.core' "
-            " ) || ( "
-                # Not working, reinstall
-                    "test -d ${BUILD}/target && "
-                    "[[ \"x\" != \"x$(find ${BUILD}/target -mindepth 1 -print -quit)\" ]] &&"
-                    "( for i in ${BUILD}/target/* ; do rm -rf ${SOURCES}/${i##*/} ; done ) && " # mv doesn't like unclean targets
-                    "mv -f ${BUILD}/target/* ${SOURCES}"
-            " )"
-        )
-        
-        # Set extra environment paths
-        self.env['NEPI_NS3BINDINGS'] = "${SOURCES}/lib"
-        self.env['NEPI_NS3LIBRARY'] = "${SOURCES}/lib"
-    
-    @property
-    def tarball(self):
-        if self._tarball is None:
-            shared_tar = self._shared_nepi_tar and self._shared_nepi_tar()
-            if shared_tar is not None:
-                self._tarball = shared_tar
-            else:
-                # Build an ad-hoc tarball
-                # Prebuilt
-                import nepi
-                import tempfile
-                
-                shared_tar = tempfile.NamedTemporaryFile(prefix='nepi-src-', suffix='.tar.gz')
-                
-                proc = subprocess.Popen(
-                    ["tar", "czf", shared_tar.name, 
-                        '-C', os.path.join(os.path.dirname(os.path.dirname(nepi.__file__)),'.'), 
-                        'nepi'],
-                    stdout = open("/dev/null","w"),
-                    stdin = open("/dev/null","r"))
-
-                if proc.wait():
-                    raise RuntimeError, "Failed to create nepi tarball"
-                
-                self._tarball = self._shared_nepi_tar = shared_tar
-                
-        return self._tarball
-
-class YumDependency(Dependency):
-    """
-    This dependency is an internal helper class used to
-    efficiently distribute yum-downloaded rpms.
-    
-    It temporarily sets the yum cache as persistent in the
-    build master, and installs all the required packages.
-    
-    The rpm packages left in the yum cache are gathered and
-    distributed by the underlying Dependency in an efficient
-    manner. Build slaves will then install those rpms back in
-    the cache before issuing the install command.
-    
-    When packages have been installed already, nothing but an
-    empty tar is distributed.
-    """
-    
-    # Class attribute holding a *weak* reference to the shared NEPI tar file
-    # so that they may share it. Don't operate on the file itself, it would
-    # be a mess, just use its path.
-    _shared_nepi_tar = None
-    
-    def _build_get(self):
-        # canonical representation of dependencies
-        depends = ' '.join( sorted( (self.depends or "").split(' ') ) )
-        
-        # download rpms and pack into a tar archive
-        return (
-            "sudo -S nice yum -y makecache && "
-            "sudo -S sed -i -r 's/keepcache *= *0/keepcache=1/' /etc/yum.conf && "
-            " ( ( "
-                "sudo -S nice yum -y install %s ; "
-                "rm -f ${BUILD}/packages.tar ; "
-                "tar -C /var/cache/yum -rf ${BUILD}/packages.tar $(cd /var/cache/yum ; find -iname '*.rpm')"
-            " ) || /bin/true ) && "
-            "sudo -S sed -i -r 's/keepcache *= *1/keepcache=0/' /etc/yum.conf && "
-            "( sudo -S nice yum -y clean packages || /bin/true ) "
-        ) % ( depends, )
-    def _build_set(self, value):
-        # ignore
-        return
-    build = property(_build_get, _build_set)
-    
-    def _install_get(self):
-        # canonical representation of dependencies
-        depends = ' '.join( sorted( (self.depends or "").split(' ') ) )
-        
-        # unpack cached rpms into yum cache, install, and cleanup
-        return (
-            "sudo -S tar -k --keep-newer-files -C /var/cache/yum -xf packages.tar && "
-            "sudo -S nice yum -y install %s && "
-            "( sudo -S nice yum -y clean packages || /bin/true ) "
-        ) % ( depends, )
-    def _install_set(self, value):
-        # ignore
-        return
-    install = property(_install_get, _install_set)
-        
-    def check_bad_host(self, out, err):
-        badre = re.compile(r'(?:'
-                           r'The GPG keys listed for the ".*" repository are already installed but they are not correct for this package'
-                           r'|Error: Cannot retrieve repository metadata (repomd.xml) for repository: .*[.] Please verify its path and try again'
-                           r'|Error: disk I/O error'
-                           r'|MASTER NODE UNREACHABLE'
-                           r')', 
-                           re.I)
-        return badre.search(out) or badre.search(err) or self.node.check_bad_host(out,err)
-
-
-class CCNxDaemon(Application):
-    """
-    An application also has dependencies, but also a command to be ran and monitored.
-    
-    It adds the output of that command as traces.
-    """
-    
-    def __init__(self, api=None):
-        super(CCNxDaemon,self).__init__(api)
-        
-        # Attributes
-        self.ccnLocalPort = None
-        self.ccnRoutes = None
-        self.ccnxVersion = "0.7.1"
-        self.repository = False
-        
-        #self.ccnx_0_6_0_sources = "http://yans.pl.sophia.inria.fr/libs/ccnx-0.6.0.tar.gz"
-        self.ccnx_sources = "http://www.ccnx.org/releases/ccnx-%s.tar.gz"
-        self.buildDepends = 'make gcc openssl-devel expat-devel libpcap-devel libxml2-devel'
-
-        self.ccnx_build = (
-            " ( "
-            "  cd .. && "
-            "  test -d ccnx-src/build/bin "
-            " ) || ( "
-                # Not working, rebuild
-                "("
-                     " mkdir -p ccnx-src && "
-                     " wget -q -c -O ccnx-src.tar.gz %(ccnx_source_url)s &&"
-                     " tar xf ccnx-src.tar.gz --strip-components=1 -C ccnx-src "
-                ") && "
-                     "cd ccnx-src && "
-                     "./configure && make"
-             " )") % dict(
-                     ccnx_source_url = server.shell_escape(self.ccnx_sources % self.ccnxVersion),
-                )
-
-        self.ccnx_install = (
-            " ( "
-            "  test -d ${BUILD}/ccnx-src/bin && "
-            "  cp -r ${BUILD}/ccnx-src/bin ${SOURCES}"
-            " )"
-        )
-
-        self.env['PATH'] = "$PATH:${SOURCES}/bin"
-
-    def setup(self):
-        # setting ccn sources
-        if not self.build:
-            self.build = self.ccnx_build
-
-        if not self.install:
-                self.install = self.ccnx_install
-
-        super(CCNxDaemon, self).setup()
-
-    def start(self):
-        self.command = ""
-        if self.ccnLocalPort:
-            self.command = "export CCN_LOCAL_PORT=%s ; " % self.ccnLocalPort
-        self.command += " ccndstart "
-
-        # configure ccn routes
-        if self.ccnRoutes:
-            routes = self.ccnRoutes.split("|")
-            
-            if self.ccnLocalPort:
-                routes = map(lambda route: "%s %s" %(route, 
-                    self.ccnLocalPort) if _ccnre.match(route) else route, 
-                        routes)
-
-            routes = map(lambda route: "ccndc add %s" % route, 
-                routes)
-
-            routescmd = " ; ".join(routes)
-            self.command += " ; "
-            self.command += routescmd
-
-            if self.repository:
-                self.command += " ; ccnr "
-
-
-        # Start will be invoked in prestart step
-        super(CCNxDaemon, self).start()
-            
-    def kill(self):
-        self._logger.info("Killing %s", self)
-
-        command = "${SOURCES}/bin/ccndstop"
-
-        if self.ccnLocalPort:
-            self.command = "export CCN_LOCAL_PORT=%s; %s" % (self.ccnLocalPort, command)
-
-        cmd = self._replace_paths(command)
-        command = cStringIO.StringIO()
-        command.write(cmd)
-        command.seek(0)
-
-        try:
-            self._popen_scp(
-                command,
-                '%s@%s:%s' % (self.node.slicename, self.node.hostname, 
-                    os.path.join(self.home_path, "kill.sh"))
-                )
-        except RuntimeError, e:
-            raise RuntimeError, "Failed to kill ccndxdaemon: %s %s" \
-                    % (e.args[0], e.args[1],)
-        
-
-        script = "bash ./kill.sh"
-        (out,err),proc = rspawn.remote_spawn(
-            script,
-            pidfile = 'kill-pid',
-            home = self.home_path,
-            stdin = '/dev/null',
-            stdout = 'killlog',
-            stderr = rspawn.STDOUT,
-            
-            host = self.node.hostname,
-            port = None,
-            user = self.node.slicename,
-            agent = None,
-            ident_key = self.node.ident_path,
-            server_key = self.node.server_key,
-            hostip = self.node.hostip,
-            )
-        
-        if proc.wait():
-            raise RuntimeError, "Failed to kill cnnxdaemon: %s %s" % (out,err,)
-        
-        super(CCNxDaemon, self).kill()
diff --git a/src/nepi/testbeds/planetlab/constants.py b/src/nepi/testbeds/planetlab/constants.py
deleted file mode 100644 (file)
index e749e93..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-TESTBED_ID = "planetlab"
-TESTBED_VERSION = "0.1"
-
diff --git a/src/nepi/testbeds/planetlab/execute.py b/src/nepi/testbeds/planetlab/execute.py
deleted file mode 100644 (file)
index e39fee0..0000000
+++ /dev/null
@@ -1,800 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import testbed_impl
-from nepi.core.metadata import Parallel
-from nepi.util.constants import TIME_NOW
-from nepi.util.graphtools import mst
-from nepi.util import ipaddr2
-from nepi.util import environ
-from nepi.util import server
-from nepi.util.parallel import ParallelRun
-import threading
-import sys
-import os
-import os.path
-import time
-import resourcealloc
-import collections
-import operator
-import functools
-import socket
-import struct
-import tempfile
-import subprocess
-import random
-import shutil
-import logging
-import metadata
-import weakref
-import util as plutil
-
-class TempKeyError(Exception):
-    pass
-
-class TestbedController(testbed_impl.TestbedController):
-    def __init__(self):
-        super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
-        self._home_directory = None
-        self.slicename = None
-        self._traces = dict()
-
-        import node, interfaces, application, multicast
-        self._node = node
-        self._interfaces = interfaces
-        self._app = application
-        self._multicast = multicast
-        
-        self._blacklist = set()
-        self._just_provisioned = set()
-        
-        self._load_blacklist()
-
-        self._slice_id = None
-        self._plcapi = None
-        self._sliceapi = None
-        self._vsys_vnet = None
-
-        self._logger = logging.getLogger('nepi.testbeds.planetlab')
-        
-        self.recovering = False
-
-    @property
-    def home_directory(self):
-        return self._home_directory
-
-    @property
-    def plcapi(self):
-        if not self._plcapi:
-            import plcapi
-            self._plcapi = plcapi.plcapi(
-                    self.authUser,
-                    self.authString,
-                    self.plcHost,
-                    self.plcUrl,
-                    self.proxy
-                    )
-        return self._plcapi
-
-    @property
-    def sliceapi(self):
-        if not self._sliceapi:
-            if not self.sfa:
-                self._sliceapi = self.plcapi
-            else:
-                from nepi.util import sfiapi
-                self._sliceapi = sfiapi.sfiapi(self.slice_id)
-        return self._sliceapi
-
-    @property
-    def slice_id(self):
-        if not self._slice_id:
-            self._slice_id = self.sliceapi.GetSliceId(self.slicename)
-        return self._slice_id
-    
-    @property
-    def vsys_vnet(self):
-        if not self._vsys_vnet:
-            self._vsys_vnet = self.sliceapi.GetSliceVnetSysTag(self.slicename)
-        return self._vsys_vnet
-
-    def _load_blacklist(self):
-        blpath = environ.homepath('plblacklist')
-        
-        try:
-            bl = open(blpath, "r")
-        except:
-            self._blacklist = set()
-            return
-            
-        try:
-            self._blacklist = set(
-                map(str.strip, bl.readlines())
-            )
-        finally:
-            bl.close()
-    
-    def _save_blacklist(self):
-        blpath = environ.homepath('plblacklist')
-        bl = open(blpath, "w")
-        try:
-            bl.writelines(
-                map('%s\n'.__mod__, self._blacklist))
-        finally:
-            bl.close()
-    
-    def do_setup(self):
-        self._home_directory = self._attributes.\
-            get_attribute_value("homeDirectory")
-        self.slicename = self._attributes.\
-            get_attribute_value("slice")
-        self.authUser = self._attributes.\
-            get_attribute_value("authUser")
-        self.authString = self._attributes.\
-            get_attribute_value("authPass")
-        self.sliceSSHKey = self._attributes.\
-            get_attribute_value("sliceSSHKey")
-        self.sliceSSHKeyPass = None
-        self.plcHost = self._attributes.\
-            get_attribute_value("plcHost")
-        self.plcUrl = self._attributes.\
-            get_attribute_value("plcUrl")
-        self.logLevel = self._attributes.\
-            get_attribute_value("plLogLevel")
-        self.proxy = self._attributes.\
-            get_attribute_value("proxy")
-        self.tapPortBase = self._attributes.\
-            get_attribute_value("tapPortBase")
-        self.p2pDeployment = self._attributes.\
-            get_attribute_value("p2pDeployment")
-        self.cleanProc = self._attributes.\
-            get_attribute_value("cleanProc")
-        self.cleanHome = self._attributes.\
-            get_attribute_value("cleanHome")
-        self.sfa = self._attributes.\
-            get_attribute_value("sfa")
-        if self.sfa:
-            self._slice_id = self._attributes.\
-            get_attribute_value("sliceHrn")
-
-        if not self.slicename:
-            raise RuntimeError, "Slice not set"
-        if not self.authUser:
-            raise RuntimeError, "PlanetLab account username not set"
-        if not self.authString:
-            raise RuntimeError, "PlanetLab account passphrase not set"
-        if not self.sliceSSHKey:
-            raise RuntimeError, "PlanetLab account key not specified"
-        if not os.path.exists(self.sliceSSHKey):
-            raise RuntimeError, "PlanetLab account key cannot be opened: %s" % (self.sliceSSHKey,)
-        
-        self._logger.setLevel(getattr(logging,self.logLevel))
-        
-        super(TestbedController, self).do_setup()
-
-    def do_post_asynclaunch(self, guid):
-        # Dependencies were launched asynchronously,
-        # so wait for them
-        dep = self._elements[guid]
-        if isinstance(dep, self._app.Dependency):
-            dep.async_setup_wait()
-    
-    # Two-phase configuration for asynchronous launch
-    do_poststep_preconfigure = staticmethod(do_post_asynclaunch)
-    do_poststep_configure = staticmethod(do_post_asynclaunch)
-
-    def do_preconfigure(self):
-        while True:
-            # Perform resource discovery if we don't have
-            # specific resources assigned yet
-            self.do_resource_discovery()
-
-            # Create PlanetLab slivers
-            self.do_provisioning()
-            
-            try:
-                # Wait for provisioning
-                self.do_wait_nodes()
-                
-                # Okkey...
-                break
-            except self._node.UnresponsiveNodeError:
-                # Oh... retry...
-                pass
-        
-        if self.p2pDeployment:
-            # Plan application deployment
-            self.do_spanning_deployment_plan()
-
-        # Configure elements per XML data
-        super(TestbedController, self).do_preconfigure()
-
-    def do_resource_discovery(self, recover = False):
-        to_provision = self._to_provision = set()
-        
-        reserved = set(self._blacklist)
-        for guid, node in self._elements.iteritems():
-            if isinstance(node, self._node.Node) and node._node_id is not None:
-                reserved.add(node.hostname)
-        
-        # Initial algo:
-        #   look for perfectly defined nodes
-        #   (ie: those with only one candidate)
-        reserve_lock = threading.RLock()
-        def assignifunique(guid, node):
-            # Try existing nodes first
-            # If we have only one candidate, simply use it
-            candidates = node.find_candidates(
-                filter_slice_id = self.slice_id)
-            
-            node_id = None
-            candidate_hosts = set(candidates.keys() if candidates else [])
-            reserve_lock.acquire()
-            try:
-                candidate_hosts -= reserved
-                if len(candidate_hosts) == 1:
-                    hostname = iter(candidate_hosts).next()
-                    node_id = candidates[hostname]
-                    reserved.add(hostname)
-                elif not candidate_hosts:
-                    # Try again including unassigned nodes
-                    reserve_lock.release()
-                    try:
-                        candidates = node.find_candidates()
-                    finally:
-                        reserve_lock.acquire()
-                    candidate_hosts = set(candidates.keys() if candidates else [])
-                    candidate_hosts -= reserved
-                    if len(candidate_hosts) > 1:
-                        return
-                    if len(candidate_hosts) == 1:
-                        hostname = iter(candidate_hosts).next()
-                        node_id = candidates[hostname]
-                        to_provision.add(node_id)
-                        reserved.add(hostname)
-                    elif not candidates:
-                        raise RuntimeError, "Cannot assign resources for node %s, no candidates with %s" % (guid,
-                            node.make_filter_description())
-            finally:
-                reserve_lock.release()
-           
-            if node_id is not None:
-                node.assign_node_id(node_id)
-        
-        runner = ParallelRun(maxthreads=4) # don't overload the PLC API, just 4 threads to hide latencies and that's it
-        runner.start()
-        for guid, node in self._elements.iteritems():
-            if isinstance(node, self._node.Node) and node._node_id is None:
-                runner.put(assignifunique, guid, node)
-        runner.sync()
-        
-        # Now do the backtracking search for a suitable solution
-        # First with existing slice nodes
-        reqs = []
-        nodes = []
-        def genreqs(node, filter_slice_id=None):
-            # Try existing nodes first
-            # If we have only one candidate, simply use it
-            candidates = node.find_candidates(
-                filter_slice_id = filter_slice_id)
-            for r in reserved:
-                if candidates.has_key(r):
-                    del candidates[r]
-            reqs.append(candidates.values())
-            nodes.append(node)
-        for guid, node in self._elements.iteritems():
-            if isinstance(node, self._node.Node) and node._node_id is None:
-                runner.put(genreqs, node, self.slice_id)
-        runner.sync()
-       
-        if nodes and reqs:
-            if recover:
-                raise RuntimeError, "Impossible to recover: unassigned host for Nodes %r" % (nodes,)
-
-            def pickbest(fullset, nreq, node=nodes[0]):
-                if len(fullset) > nreq:
-                    fullset = zip(node.rate_nodes(fullset),fullset)
-                    fullset.sort(reverse=True)
-                    del fullset[nreq:]
-                    return set(map(operator.itemgetter(1),fullset))
-                else:
-                    return fullset
-            
-            try:
-                solution = resourcealloc.alloc(reqs, sample=pickbest)
-            except resourcealloc.ResourceAllocationError:
-                # Failed, try again with all nodes
-                reqs = []
-                for node in nodes:
-                    runner.put(genreqs, node)
-                runner.sync()
-                solution = resourcealloc.alloc(reqs, sample=pickbest)
-                to_provision.update(solution)
-            
-            # Do assign nodes
-            for node, node_id in zip(nodes, solution):
-                runner.put(node.assign_node_id, node_id)
-            runner.join()
-
-    def do_provisioning(self):
-        if self._to_provision:
-            # Add new nodes to the slice
-            cur_nodes = self.sliceapi.GetSliceNodes(self.slice_id)
-            new_nodes = list(set(cur_nodes) | self._to_provision)
-            self.sliceapi.AddSliceNodes(self.slice_id, nodes=new_nodes)
-
-        # cleanup
-        self._just_provisioned = self._to_provision
-        del self._to_provision
-    
-    def do_wait_nodes(self):
-        for guid, node in self._elements.iteritems():
-            if isinstance(node, self._node.Node):
-                # Just inject configuration stuff
-                node.home_path = "nepi-node-%s" % (guid,)
-                node.ident_path = self.sliceSSHKey
-                node.slicename = self.slicename
-            
-                # Show the magic
-                self._logger.info("PlanetLab Node %s configured at %s", guid, node.hostname)
-        
-        try:
-            runner = ParallelRun(maxthreads=64, maxqueue=1)
-            abort = []
-            def waitforit(guid, node):
-                try:
-                    node.wait_provisioning(
-                        (20*60 if node._node_id in self._just_provisioned else 60)
-                    )
-                    
-                    self._logger.info("READY Node %s at %s", guid, node.hostname)
-                    
-                    # Prepare dependency installer now
-                    node.prepare_dependencies()
-                except:
-                    abort.append(None)
-                    raise
-                
-            for guid, node in self._elements.iteritems():
-                if abort:
-                    break
-                if isinstance(node, self._node.Node):
-                    self._logger.info("Waiting for Node %s configured at %s", guid, node.hostname)
-                    runner.put(waitforit, guid, node)
-            runner.join()
-                    
-        except self._node.UnresponsiveNodeError:
-            # Uh... 
-            self._logger.warn("UNRESPONSIVE Nodes")
-            
-            # Mark all dead nodes (which are unresponsive) on the blacklist
-            # and re-raise
-            for guid, node in self._elements.iteritems():
-                if isinstance(node, self._node.Node):
-                    if not node.is_alive():
-                        self._logger.warn("Blacklisting %s for unresponsiveness", node.hostname)
-                        self._blacklist.add(node.hostname)
-                        node.unassign_node()
-            
-            try:
-                self._save_blacklist()
-            except:
-                # not important...
-                import traceback
-                traceback.print_exc()
-            
-            raise
-    
-    def do_spanning_deployment_plan(self):
-        # Create application groups by collecting all applications
-        # based on their hash - the hash should contain everything that
-        # defines them and the platform they're built
-        
-        def dephash(app):
-            return (
-                frozenset((app.depends or "").split(' ')),
-                frozenset((app.sources or "").split(' ')),
-                app.build,
-                app.install,
-                app.node.architecture,
-                app.node.operatingSystem,
-                app.node.pl_distro,
-                app.__class__,
-            )
-        
-        depgroups = collections.defaultdict(list)
-        
-        for element in self._elements.itervalues():
-            if isinstance(element, self._app.Dependency):
-                depgroups[dephash(element)].append(element)
-            elif isinstance(element, self._node.Node):
-                deps = element._yum_dependencies
-                if deps:
-                    depgroups[dephash(deps)].append(deps)
-        
-        # Set up spanning deployment for those applications that
-        # have been deployed in several nodes.
-        for dh, group in depgroups.iteritems():
-            if len(group) > 1:
-                # Pick root (deterministically)
-                root = min(group, key=lambda app:app.node.hostname)
-                
-                # Obtain all IPs in numeric format
-                # (which means faster distance computations)
-                for dep in group:
-                    dep._ip = server.gethostbyname(dep.node.hostname)
-                    dep._ip_n = struct.unpack('!L', socket.inet_aton(dep._ip))[0]
-                
-                # Compute plan
-                # NOTE: the plan is an iterator
-                plan = mst.mst(
-                    group,
-                    lambda a,b : ipaddr2.ipdistn(a._ip_n, b._ip_n),
-                    root = root,
-                    maxbranching = 2)
-                
-                # Re-sign private key
-                try:
-                    tempprk, temppuk, tmppass = self._make_temp_private_key()
-                except TempKeyError:
-                    continue
-                
-                # Set up slaves
-                plan = list(plan)
-                for slave, master in plan:
-                    slave.set_master(master)
-                    slave.install_keys(tempprk, temppuk, tmppass)
-                    
-        # We don't need the user's passphrase anymore
-        self.sliceSSHKeyPass = None
-    
-    def _make_temp_private_key(self):
-        # Get the user's key's passphrase
-        if not self.sliceSSHKeyPass:
-            if 'SSH_ASKPASS' in os.environ:
-                proc = subprocess.Popen(
-                    [ os.environ['SSH_ASKPASS'],
-                      "Please type the passphrase for the %s SSH identity file. "
-                      "The passphrase will be used to re-cipher the identity file with "
-                      "a random 256-bit key for automated chain deployment on the "
-                      "%s PlanetLab slice" % ( 
-                        os.path.basename(self.sliceSSHKey), 
-                        self.slicename
-                    ) ],
-                    stdin = open("/dev/null"),
-                    stdout = subprocess.PIPE,
-                    stderr = subprocess.PIPE)
-                out,err = proc.communicate()
-                self.sliceSSHKeyPass = out.strip()
-        
-        if not self.sliceSSHKeyPass:
-            raise TempKeyError
-        
-        # Create temporary key files
-        prk = tempfile.NamedTemporaryFile(
-            dir = self.root_directory,
-            prefix = "pl_deploy_tmpk_",
-            suffix = "")
-
-        puk = tempfile.NamedTemporaryFile(
-            dir = self.root_directory,
-            prefix = "pl_deploy_tmpk_",
-            suffix = ".pub")
-            
-        # Create secure 256-bits temporary passphrase
-        passphrase = os.urandom(32).encode("hex")
-                
-        # Copy keys
-        oprk = open(self.sliceSSHKey, "rb")
-        opuk = open(self.sliceSSHKey+".pub", "rb")
-        shutil.copymode(oprk.name, prk.name)
-        shutil.copymode(opuk.name, puk.name)
-        shutil.copyfileobj(oprk, prk)
-        shutil.copyfileobj(opuk, puk)
-        prk.flush()
-        puk.flush()
-        oprk.close()
-        opuk.close()
-        
-        # A descriptive comment
-        comment = "%s#NEPI_INTERNAL@%s" % (self.authUser, self.slicename)
-        
-        # Recipher keys
-        proc = subprocess.Popen(
-            ["ssh-keygen", "-p",
-             "-f", prk.name,
-             "-P", self.sliceSSHKeyPass,
-             "-N", passphrase,
-             "-C", comment ],
-            stdout = subprocess.PIPE,
-            stderr = subprocess.PIPE,
-            stdin = subprocess.PIPE
-        )
-        out, err = proc.communicate()
-        
-        if err:
-            raise RuntimeError, "Problem generating keys: \n%s\n%r" % (
-                out, err)
-        
-        prk.seek(0)
-        puk.seek(0)
-        
-        # Change comment on public key
-        puklines = puk.readlines()
-        puklines[0] = puklines[0].split(' ')
-        puklines[0][-1] = comment+'\n'
-        puklines[0] = ' '.join(puklines[0])
-        puk.seek(0)
-        puk.truncate()
-        puk.writelines(puklines)
-        del puklines
-        puk.flush()
-        
-        return prk, puk, passphrase
-    
-    def set(self, guid, name, value, time = TIME_NOW):
-        super(TestbedController, self).set(guid, name, value, time)
-        # TODO: take on account schedule time for the task
-        element = self._elements[guid]
-        if element:
-            if name == "up":
-                if value == True:
-                    element.if_up()
-                else:
-                    element.if_down()
-
-            try:
-                setattr(element, name, value)
-            except:
-                # We ignore these errors while recovering.
-                # Some attributes are immutable, and setting
-                # them is necessary (to recover the state), but
-                # some are not (they throw an exception).
-                if not self.recovering:
-                    raise
-
-            if hasattr(element, 'refresh'):
-                # invoke attribute refresh hook
-                element.refresh()
-
-    def get(self, guid, name, time = TIME_NOW):
-        value = super(TestbedController, self).get(guid, name, time)
-        # TODO: take on account schedule time for the task
-        factory_id = self._create[guid]
-        factory = self._factories[factory_id]
-        element = self._elements.get(guid)
-        try:
-            return getattr(element, name)
-        except (KeyError, AttributeError):
-            return value
-
-    def get_address(self, guid, index, attribute='Address'):
-        index = int(index)
-
-        # try the real stuff
-        iface = self._elements.get(guid)
-        if iface and index == 0:
-            if attribute == 'Address':
-                return iface.address
-            elif attribute == 'NetPrefix':
-                return iface.netprefix
-            elif attribute == 'Broadcast':
-                return iface.broadcast
-
-        # if all else fails, query box
-        return super(TestbedController, self).get_address(guid, index, attribute)
-
-    def action(self, time, guid, action):
-        raise NotImplementedError
-
-    def shutdown(self):
-        for trace in self._traces.itervalues():
-            trace.close()
-        
-        def invokeif(action, testbed, guid):
-            element = self._elements[guid]
-            if hasattr(element, action):
-                getattr(element, action)()
-        
-        self._do_in_factory_order(
-            functools.partial(invokeif, 'cleanup'),
-            metadata.shutdown_order)
-
-        self._do_in_factory_order(
-            functools.partial(invokeif, 'destroy'),
-            metadata.shutdown_order)
-            
-        self._elements.clear()
-        self._traces.clear()
-
-    def trace(self, guid, trace_id, attribute='value'):
-        elem = self._elements[guid]
-
-        if attribute == 'value':
-            path = elem.sync_trace(self.home_directory, trace_id)
-            if path:
-                fd = open(path, "r")
-                content = fd.read()
-                fd.close()
-            else:
-                content = None
-        elif attribute == 'path':
-            content = elem.remote_trace_path(trace_id)
-        elif attribute == 'name':
-            content = elem.remote_trace_name(trace_id)
-        else:
-            content = None
-        return content
-
-    def follow_trace(self, trace_id, trace):
-        self._traces[trace_id] = trace
-
-    def recover(self):
-        try:
-            # An internal flag, so we know to behave differently in
-            # a few corner cases.
-            self.recovering = True
-            
-            # Create and connect do not perform any real tasks against
-            # the nodes, it only sets up the object hierarchy,
-            # so we can run them normally
-            self.do_create()
-            self.do_connect_init()
-            self.do_connect_compl()
-            
-            # Manually recover nodes, to mark dependencies installed
-            # and clean up mutable attributes
-            self._do_in_factory_order(
-                lambda self, guid : self._elements[guid].recover(), 
-                [
-                    metadata.NODE,
-                ])
-            
-            # Assign nodes - since we're working off exeucte XML, nodes
-            # have specific hostnames assigned and we don't need to do
-            # real assignment, only find out node ids and check liveliness
-            self.do_resource_discovery(recover = True)
-            self.do_wait_nodes()
-            
-            # Pre/post configure, however, tends to set up tunnels
-            # Execute configuration steps only for those object
-            # kinds that do not have side effects
-            
-            # Do the ones without side effects,
-            # including nodes that need to set up home 
-            # folders and all that
-            self._do_in_factory_order(
-                "preconfigure_function", 
-                [
-                    metadata.INTERNET,
-                    Parallel(metadata.NODE),
-                    metadata.NODEIFACE,
-                ])
-            
-            # Tunnels require a home path that is configured
-            # at this step. Since we cannot run the step itself,
-            # we need to inject this homepath ourselves
-            for guid, element in self._elements.iteritems():
-                if isinstance(element, self._interfaces.TunIface):
-                    element._home_path = "tun-%s" % (guid,)
-            
-            # Manually recover tunnels, applications and
-            # netpipes, negating the side effects
-            self._do_in_factory_order(
-                lambda self, guid : self._elements[guid].recover(), 
-                [
-                    Parallel(metadata.TAPIFACE),
-                    Parallel(metadata.TUNIFACE),
-                    metadata.NETPIPE,
-                    Parallel(metadata.NEPIDEPENDENCY),
-                    Parallel(metadata.NS3DEPENDENCY),
-                    Parallel(metadata.DEPENDENCY),
-                    Parallel(metadata.APPLICATION),
-                    Parallel(metadata.CCNXDAEMON),
-                ])
-
-            # Tunnels are not harmed by configuration after
-            # recovery, and some attributes get set this way
-            # like external_iface
-            self._do_in_factory_order(
-                "preconfigure_function", 
-                [
-                    Parallel(metadata.TAPIFACE),
-                    Parallel(metadata.TUNIFACE),
-                ])
-
-            # Post-do the ones without side effects
-            self._do_in_factory_order(
-                "configure_function", 
-                [
-                    metadata.INTERNET,
-                    Parallel(metadata.NODE),
-                    metadata.NODEIFACE,
-                    Parallel(metadata.TAPIFACE),
-                    Parallel(metadata.TUNIFACE),
-                ])
-            
-            # There are no required prestart steps
-            # to call upon recovery, so we're done
-        finally:
-            self.recovering = True
-    
-    def _make_generic(self, parameters, kind, **kwargs):
-        args = dict({'api': self.plcapi})
-        args.update(kwargs)
-        app = kind(**args)
-        app.testbed = weakref.ref(self)
-
-        # Note: there is 1-to-1 correspondence between attribute names
-        #   If that changes, this has to change as well
-        for attr,val in parameters.iteritems():
-            try:
-                setattr(app, attr, val)
-            except:
-                # We ignore these errors while recovering.
-                # Some attributes are immutable, and setting
-                # them is necessary (to recover the state), but
-                # some are not (they throw an exception).
-                if not self.recovering:
-                    raise
-
-        return app
-
-    def _make_node(self, parameters):
-        args = dict({'sliceapi': self.sliceapi})
-        node = self._make_generic(parameters, self._node.Node, **args)
-        node.enable_proc_cleanup = self.cleanProc
-        node.enable_home_cleanup = self.cleanHome
-        return node
-
-    def _make_node_iface(self, parameters):
-        return self._make_generic(parameters, self._interfaces.NodeIface)
-
-    def _make_tun_iface(self, parameters):
-        return self._make_generic(parameters, self._interfaces.TunIface)
-
-    def _make_tap_iface(self, parameters):
-        return self._make_generic(parameters, self._interfaces.TapIface)
-
-    def _make_netpipe(self, parameters):
-        return self._make_generic(parameters, self._interfaces.NetPipe)
-
-    def _make_internet(self, parameters):
-        return self._make_generic(parameters, self._interfaces.Internet)
-
-    def _make_application(self, parameters, clazz = None):
-        if not clazz:
-            clazz = self._app.Application
-        return self._make_generic(parameters, clazz)
-
-    def _make_dependency(self, parameters):
-        return self._make_generic(parameters, self._app.Dependency)
-
-    def _make_nepi_dependency(self, parameters):
-        return self._make_generic(parameters, self._app.NepiDependency)
-
-    def _make_ns3_dependency(self, parameters):
-        return self._make_generic(parameters, self._app.NS3Dependency)
-
-    def _make_tun_filter(self, parameters):
-        return self._make_generic(parameters, self._interfaces.TunFilter)
-
-    def _make_class_queue_filter(self, parameters):
-        return self._make_generic(parameters, self._interfaces.ClassQueueFilter)
-
-    def _make_logging_class_queue_filter(self, parameters):
-        return self._make_generic(parameters, self._interfaces.LoggingClassQueueFilter)
-
-    def _make_tos_queue_filter(self, parameters):
-        return self._make_generic(parameters, self._interfaces.ToSQueueFilter)
-
-    def _make_multicast_forwarder(self, parameters):
-        return self._make_generic(parameters, self._multicast.MulticastForwarder)
-
-    def _make_multicast_announcer(self, parameters):
-        return self._make_generic(parameters, self._multicast.MulticastAnnouncer)
-
-    def _make_multicast_router(self, parameters):
-        return self._make_generic(parameters, self._multicast.MulticastRouter)
-
-
diff --git a/src/nepi/testbeds/planetlab/interfaces.py b/src/nepi/testbeds/planetlab/interfaces.py
deleted file mode 100644 (file)
index f4df82f..0000000
+++ /dev/null
@@ -1,608 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID
-import nepi.util.ipaddr2 as ipaddr2
-import nepi.util.server as server
-import plcapi
-import subprocess
-import os
-import os.path
-import random
-import ipaddr
-import functools
-
-import tunproto
-
-class NodeIface(object):
-    def __init__(self, api=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-        
-        # Attributes
-        self.primary = True
-
-        # These get initialized at configuration time
-        self.address = None
-        self.lladdr = None
-        self.netprefix = None
-        self.netmask = None
-        self.broadcast = True
-        self._interface_id = None
-
-        # These get initialized when the iface is connected to its node
-        self.node = None
-
-        # These get initialized when the iface is connected to the internet
-        self.has_internet = False
-
-    def __str__(self):
-        return "%s<ip:%s/%s up mac:%s>" % (
-            self.__class__.__name__,
-            self.address, self.netmask,
-            self.lladdr,
-        )
-    
-    __repr__ = __str__
-
-    def add_address(self, address, netprefix, broadcast):
-        raise RuntimeError, "Cannot add explicit addresses to public interface"
-    
-    def pick_iface(self, siblings):
-        """
-        Picks an interface using the PLCAPI to query information about the node.
-        
-        Needs an assigned node.
-        
-        Params:
-            siblings: other NodeIface elements attached to the same node
-        """
-        
-        if self.node is None or self.node._node_id is None:
-            raise RuntimeError, "Cannot pick interface without an assigned node"
-      
-        # HACK: SFA doesnt give the node_id!!
-        if not isinstance(self.node._node_id, int):
-            node_data = self._api.GetNodes(filters={'hostname':self.node.hostname}, fields=('node_id',))[0]
-            node_id = node_data['node_id']
-        else:
-            node_id = self.node._node_id
-
-        avail = self._api.GetInterfaces(
-            node_id=node_id, 
-            is_primary=self.primary,
-            fields=('interface_id','mac','netmask','ip') )
-        
-        used = set([sibling._interface_id for sibling in siblings
-                    if sibling._interface_id is not None])
-        
-        for candidate in avail:
-            candidate_id = candidate['interface_id']
-            if candidate_id not in used:
-                # pick it!
-                self._interface_id = candidate_id
-                self.address = candidate['ip']
-                self.lladdr = candidate['mac']
-                self.netprefix = candidate['netmask']
-                self.netmask = ipaddr2.ipv4_dot2mask(self.netprefix) if self.netprefix else None
-                return
-        else:
-            raise RuntimeError, "Cannot configure interface: cannot find suitable interface in PlanetLab node"
-
-    def validate(self):
-        if not self.has_internet:
-            raise RuntimeError, "All external interface devices must be connected to the Internet"
-    
-
-class _CrossIface(object):
-    def __init__(self, proto, addr, port, cipher):
-        self.tun_proto = proto
-        self.tun_addr = addr
-        self.tun_port = port
-        self.tun_cipher = cipher
-
-        # Attributes
-        self.address = None
-        self.netprefix = None
-        self.netmask = None
-        # Cannot access cross peers
-        self.peer_proto_impl = None
-    
-    def __str__(self):
-        return "%s%r" % (
-            self.__class__.__name__,
-            ( self.tun_proto,
-              self.tun_addr,
-              self.tun_port,
-              self.tun_cipher ) 
-        )
-    
-    __repr__ = __str__
-
-class TunIface(object):
-    _PROTO_MAP = tunproto.TUN_PROTO_MAP
-    _KIND = 'TUN'
-
-    def __init__(self, api=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-        
-        # Attributes
-        self.address = None
-        self.netprefix = None
-        self.netmask = None
-        
-        self.up = None
-        self.mtu = None
-        self.snat = False
-        self.txqueuelen = 1000
-        self.pointopoint = None
-        self.multicast = False
-        self.bwlimit = None
-        
-        # Enabled traces
-        self.capture = False
-
-        # These get initialized when the iface is connected to its node
-        self.node = None
-        
-        # These get initialized when the iface is connected to any filter
-        self.filter_module = None
-        self.multicast_forwarder = None
-        
-        # These get initialized when the iface is configured
-        self.external_iface = None
-        
-        # These get initialized when the iface is configured
-        # They're part of the TUN standard attribute set
-        self.tun_port = None
-        self.tun_addr = None
-        self.tun_cipher = "AES"
-        
-        # These get initialized when the iface is connected to its peer
-        self.peer_iface = None
-        self.peer_proto = None
-        self.peer_addr = None
-        self.peer_port = None
-        self.peer_proto_impl = None
-        self._delay_recover = False
-
-        # same as peer proto, but for execute-time standard attribute lookups
-        self.tun_proto = None 
-        
-        
-        # Generate an initial random cryptographic key to use for tunnelling
-        # Upon connection, both endpoints will agree on a common one based on
-        # this one.
-        self.tun_key = ( ''.join(map(chr, [ 
-                    r.getrandbits(8) 
-                    for i in xrange(32) 
-                    for r in (random.SystemRandom(),) ])
-                ).encode("base64").strip() )        
-        
-
-    def __str__(self):
-        return "%s<ip:%s/%s %s%s%s>" % (
-            self.__class__.__name__,
-            self.address, self.netprefix,
-            " up" if self.up else " down",
-            " snat" if self.snat else "",
-            (" p2p %s" % (self.pointopoint,)) if self.pointopoint else "",
-        )
-    
-    __repr__ = __str__
-    
-    @property
-    def if_name(self):
-        if self.peer_proto_impl:
-            return self.peer_proto_impl.if_name
-
-    def if_up(self):
-        if self.peer_proto_impl:
-            return self.peer_proto_impl.if_up()
-
-    def if_down(self):
-        if self.peer_proto_impl:
-            return self.peer_proto_impl.if_down()
-
-    def routes_here(self, route):
-        """
-        Returns True if the route should be attached to this interface
-        (ie, it references a gateway in this interface's network segment)
-        """
-        if self.address and self.netprefix:
-            addr, prefix = self.address, self.netprefix
-            pointopoint = self.pointopoint
-            if not pointopoint and self.peer_iface:
-                pointopoint = self.peer_iface.address
-            
-            if pointopoint:
-                prefix = 32
-                
-            dest, destprefix, nexthop, metric, device = route
-            
-            myNet = ipaddr.IPv4Network("%s/%d" % (addr, prefix))
-            gwIp = ipaddr.IPv4Network(nexthop)
-            
-            if pointopoint:
-                peerIp = ipaddr.IPv4Network(pointopoint)
-                
-                if gwIp == peerIp:
-                    return True
-            else:
-                if gwIp in myNet:
-                    return True
-        return False
-    
-    def add_address(self, address, netprefix, broadcast):
-        if (self.address or self.netprefix or self.netmask) is not None:
-            raise RuntimeError, "Cannot add more than one address to %s interfaces" % (self._KIND,)
-        if broadcast:
-            raise ValueError, "%s interfaces cannot broadcast in PlanetLab (%s)" % (self._KIND,broadcast)
-        
-        self.address = address
-        self.netprefix = netprefix
-        self.netmask = ipaddr2.ipv4_mask2dot(netprefix)
-    
-    def validate(self):
-        if not self.node:
-            raise RuntimeError, "Unconnected %s iface - missing node" % (self._KIND,)
-        if self.peer_iface and self.peer_proto not in self._PROTO_MAP:
-            raise RuntimeError, "Unsupported tunnelling protocol: %s" % (self.peer_proto,)
-        if not self.address or not self.netprefix or not self.netmask:
-            raise RuntimeError, "Misconfigured %s iface - missing address" % (self._KIND,)
-        if self.filter_module and self.peer_proto not in ('udp','tcp',None):
-            raise RuntimeError, "Miscofnigured TUN: %s - filtered tunnels only work with udp or tcp links" % (self,)
-        if self.tun_cipher != 'PLAIN' and self.peer_proto not in ('udp','tcp',None):
-            raise RuntimeError, "Miscofnigured TUN: %s - ciphered tunnels only work with udp or tcp links" % (self,)
-    
-    def _impl_instance(self, home_path):
-        impl = self._PROTO_MAP[self.peer_proto](
-            self, self.peer_iface, home_path, self.tun_key)
-        impl.port = self.tun_port
-        impl.cross_slice = not self.peer_iface or isinstance(self.peer_iface, _CrossIface)
-        return impl
-    
-    def recover(self):
-        if self.peer_proto:
-            self.peer_proto_impl = self._impl_instance(
-                self._home_path)
-            self.peer_proto_impl.recover()
-        else:
-            self._delay_recover = True
-    
-    def prepare(self, home_path):
-        if not self.peer_iface and (self.peer_proto and self.peer_addr):
-            # Ad-hoc peer_iface
-            self.peer_iface = _CrossIface(
-                self.peer_proto,
-                self.peer_addr,
-                self.peer_port,
-                self.peer_cipher)
-        if self.peer_iface:
-            if not self.peer_proto_impl:
-                self.peer_proto_impl = self._impl_instance(home_path)
-            if self._delay_recover:
-                self.peer_proto_impl.recover()
-    
-    def launch(self):
-        if self.peer_proto_impl:
-            self.peer_proto_impl.launch()
-    
-    def cleanup(self):
-        if self.peer_proto_impl:
-            self.peer_proto_impl.shutdown()
-
-    def destroy(self):
-        if self.peer_proto_impl:
-            self.peer_proto_impl.destroy()
-            self.peer_proto_impl = None
-
-    def wait(self):
-        if self.peer_proto_impl:
-            self.peer_proto_impl.wait()
-
-    def sync_trace(self, local_dir, whichtrace, tracemap = None):
-        if self.peer_proto_impl:
-            return self.peer_proto_impl.sync_trace(local_dir, whichtrace,
-                    tracemap)
-        else:
-            return None
-
-    def remote_trace_path(self, whichtrace, tracemap = None):
-        if self.peer_proto_impl:
-            return self.peer_proto_impl.remote_trace_path(whichtrace, tracemap)
-        else:
-            return None
-
-    def remote_trace_name(self, whichtrace):
-        return whichtrace
-
-class TapIface(TunIface):
-    _PROTO_MAP = tunproto.TAP_PROTO_MAP
-    _KIND = 'TAP'
-
-# Yep, it does nothing - yet
-class Internet(object):
-    def __init__(self, api=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-
-class NetPipe(object):
-    def __init__(self, api=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-
-        # Attributes
-        self.mode = None
-        self.addrList = None
-        self.portList = None
-        
-        self.plrIn = None
-        self.bwIn = None
-        self.delayIn = None
-
-        self.plrOut = None
-        self.bwOut = None
-        self.delayOut = None
-        
-        # These get initialized when the pipe is connected to its node
-        self.node = None
-        self.configured = False
-    
-    def validate(self):
-        if not self.mode:
-            raise RuntimeError, "Undefined NetPipe mode"
-        if not self.portList:
-            raise RuntimeError, "Undefined NetPipe port list - must always define the scope"
-        if not (self.plrIn or self.bwIn or self.delayIn):
-            raise RuntimeError, "Undefined NetPipe inbound characteristics"
-        if not (self.plrOut or self.bwOut or self.delayOut):
-            raise RuntimeError, "Undefined NetPipe outbound characteristics"
-        if not self.node:
-            raise RuntimeError, "Unconnected NetPipe"
-    
-    def _add_pipedef(self, bw, plr, delay, options):
-        if delay:
-            options.extend(("delay","%dms" % (delay,)))
-        if bw:
-            options.extend(("bw","%.8fMbit/s" % (bw,)))
-        if plr:
-            options.extend(("plr","%.8f" % (plr,)))
-    
-    def _get_ruledef(self):
-        scope = "%s%s%s" % (
-            self.portList,
-            "@" if self.addrList else "",
-            self.addrList or "",
-        )
-        
-        options = []
-        if self.bwIn or self.plrIn or self.delayIn:
-            options.append("IN")
-            self._add_pipedef(self.bwIn, self.plrIn, self.delayIn, options)
-        if self.bwOut or self.plrOut or self.delayOut:
-            options.append("OUT")
-            self._add_pipedef(self.bwOut, self.plrOut, self.delayOut, options)
-        options = ' '.join(options)
-        
-        return (scope,options)
-    
-    def recover(self):
-        # Rules are safe on their nodes
-        self.configured = True
-
-    def configure(self):
-        # set up rule
-        scope, options = self._get_ruledef()
-        command = "sudo -S netconfig config %s %s %s" % (self.mode, scope, options)
-        
-        (out,err),proc = server.popen_ssh_command(
-            command,
-            host = self.node.hostname,
-            port = None,
-            user = self.node.slicename,
-            agent = None,
-            ident_key = self.node.ident_path,
-            server_key = self.node.server_key
-            )
-    
-        if proc.wait():
-            raise RuntimeError, "Failed instal build sources: %s %s" % (out,err,)
-        
-        # we have to clean up afterwards
-        self.configured = True
-    
-    def refresh(self):
-        if self.configured:
-            # refresh rule
-            scope, options = self._get_ruledef()
-            command = "sudo -S netconfig refresh %s %s %s" % (self.mode, scope, options)
-            
-            (out,err),proc = server.popen_ssh_command(
-                command,
-                host = self.node.hostname,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                server_key = self.node.server_key
-                )
-        
-            if proc.wait():
-                raise RuntimeError, "Failed instal build sources: %s %s" % (out,err,)
-    
-    def cleanup(self):
-        if self.configured:
-            # remove rule
-            scope, options = self._get_ruledef()
-            command = "sudo -S netconfig delete %s %s" % (self.mode, scope)
-            
-            (out,err),proc = server.popen_ssh_command(
-                command,
-                host = self.node.hostname,
-                port = None,
-                user = self.node.slicename,
-                agent = None,
-                ident_key = self.node.ident_path,
-                server_key = self.node.server_key
-                )
-        
-            if proc.wait():
-                raise RuntimeError, "Failed instal build sources: %s %s" % (out,err,)
-            
-            self.configured = False
-    
-    def sync_trace(self, local_dir, whichtrace):
-        if whichtrace != 'netpipeStats':
-            raise ValueError, "Unsupported trace %s" % (whichtrace,)
-        
-        local_path = os.path.join(local_dir, "netpipe_stats_%s" % (self.mode,))
-        
-        # create parent local folders
-        proc = subprocess.Popen(
-            ["mkdir", "-p", os.path.dirname(local_path)],
-            stdout = open("/dev/null","w"),
-            stdin = open("/dev/null","r"))
-
-        if proc.wait():
-            raise RuntimeError, "Failed to synchronize trace: %s %s" % (out,err,)
-        
-        (out,err),proc = server.popen_ssh_command(
-            "echo 'Rules:' ; sudo -S netconfig show rules ; echo 'Pipes:' ; sudo -S netconfig show pipes",
-            host = self.node.hostname,
-            port = None,
-            user = self.node.slicename,
-            agent = None,
-            ident_key = self.node.ident_path,
-            server_key = self.node.server_key
-            )
-        
-        if proc.wait():
-            raise RuntimeError, "Failed to synchronize trace: %s %s" % (out,err,)
-        
-        # dump results to file
-        f = open(local_path, "wb")
-        f.write(err or "")
-        f.write(out or "")
-        f.close()
-        
-        return local_path
-    
-class TunFilter(object):
-    _TRACEMAP = {
-        # tracename : (remotename, localname)
-    }
-    
-    def __init__(self, api=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-        
-        # Attributes
-        self.module = None
-        self.args = None
-
-        # These get initialised when the filter is connected
-        self.peer_guid = None
-        self.peer_proto = None
-        self.iface_guid = None
-        self.peer = None
-        self.iface = None
-    
-    def _get(what, self):
-        wref = self.iface
-        if wref:
-            wref = wref()
-        if wref:
-            return getattr(wref, what)
-        else:
-            return None
-
-    def _set(what, self, val):
-        wref = self.iface
-        if wref:
-            wref = wref()
-        if wref:
-            setattr(wref, what, val)
-    
-    tun_proto = property(
-        functools.partial(_get, 'tun_proto'),
-        functools.partial(_set, 'tun_proto') )
-    tun_addr = property(
-        functools.partial(_get, 'tun_addr'),
-        functools.partial(_set, 'tun_addr') )
-    tun_port = property(
-        functools.partial(_get, 'tun_port'),
-        functools.partial(_set, 'tun_port') )
-    tun_key = property(
-        functools.partial(_get, 'tun_key'),
-        functools.partial(_set, 'tun_key') )
-    tun_cipher = property(
-        functools.partial(_get, 'tun_cipher'),
-        functools.partial(_set, 'tun_cipher') )
-    
-    del _get
-    del _set
-
-    def remote_trace_path(self, whichtrace):
-        iface = self.iface()
-        if iface is not None:
-            return iface.remote_trace_path(whichtrace, self._TRACEMAP)
-        return None
-
-    def remote_trace_name(self, whichtrace):
-        iface = self.iface()
-        if iface is not None:
-            return iface.remote_trace_name(whichtrace, self._TRACEMAP)
-        return None
-
-    def sync_trace(self, local_dir, whichtrace):
-        iface = self.iface()
-        if iface is not None:
-            return iface.sync_trace(local_dir, whichtrace, self._TRACEMAP)
-        return None
-
-class ClassQueueFilter(TunFilter):
-    _TRACEMAP = {
-        # tracename : (remotename, localname)
-        'dropped_stats' : ('dropped_stats', 'dropped_stats')
-    }
-    
-    def __init__(self, api=None):
-        super(ClassQueueFilter, self).__init__(api)
-        # Attributes
-        self.module = "classqueue.py"
-
-class LoggingClassQueueFilter(ClassQueueFilter):
-    _TRACEMAP = ClassQueueFilter._TRACEMAP.copy()
-    _TRACEMAP.update({
-        # tracename : (remotename, localname)
-        'queue_stats_f'   : ('queue_stats_f', 'queue_stats_f'),
-        'queue_stats_b'   : ('queue_stats_b', 'queue_stats_b'),
-    })
-    
-    def __init__(self, api=None):
-        super(LoggingClassQueueFilter, self).__init__(api)
-        # Attributes
-        self.module = "loggingclassqueue.py classqueue.py"
-        
-    def _args_get(self):
-        # Inject outpath
-        args = dict(filter(lambda x:len(x)>1, map(lambda x:x.split('=',1),(self._args or "").split(','))))
-        args["outpath"] = "queue_stats"
-        return ",".join(map("=".join, args.iteritems()))
-    def _args_set(self, value):
-        self._args = value
-    args = property(_args_get, _args_set)
-
-class ToSQueueFilter(TunFilter):
-    def __init__(self, api=None):
-        super(ToSQueueFilter, self).__init__(api)
-        # Attributes
-        self.module = "tosqueue.py"
-
diff --git a/src/nepi/testbeds/planetlab/metadata.py b/src/nepi/testbeds/planetlab/metadata.py
deleted file mode 100644 (file)
index f10893a..0000000
+++ /dev/null
@@ -1,1934 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import time
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import metadata
-from nepi.core.metadata import Parallel
-from nepi.core.attributes import Attribute
-from nepi.util import tags, validation
-from nepi.util.constants import ApplicationStatus as AS, \
-        FactoryCategories as FC, \
-        ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP, \
-        DeploymentConfiguration as DC
-
-import functools
-import os
-import os.path
-import weakref
-
-NODE = "Node"
-NODEIFACE = "NodeInterface"
-TUNIFACE = "TunInterface"
-TAPIFACE = "TapInterface"
-APPLICATION = "Application"
-CCNXDAEMON = "CCNxDaemon"
-DEPENDENCY = "Dependency"
-NEPIDEPENDENCY = "NepiDependency"
-NS3DEPENDENCY = "NS3Dependency"
-INTERNET = "Internet"
-NETPIPE = "NetPipe"
-TUNFILTER = "TunFilter"
-CLASSQUEUEFILTER = "ClassQueueFilter"
-LOGGINGCLASSQUEUEFILTER = "LoggingClassQueueFilter"
-TOSQUEUEFILTER = "TosQueueFilter"
-MULTICASTFORWARDER = "MulticastForwarder"
-MULTICASTANNOUNCER = "MulticastAnnouncer"
-MULTICASTROUTER = "MulticastRouter"
-
-TUNFILTERS = (TUNFILTER, CLASSQUEUEFILTER, LOGGINGCLASSQUEUEFILTER, TOSQUEUEFILTER)
-TAPFILTERS = (TUNFILTER, )
-ALLFILTERS = (TUNFILTER, CLASSQUEUEFILTER, LOGGINGCLASSQUEUEFILTER, TOSQUEUEFILTER)
-
-PL_TESTBED_ID = "planetlab"
-
-
-### Custom validation functions ###
-def is_addrlist(attribute, value):
-    if not validation.is_string(attribute, value):
-        return False
-    
-    if not value:
-        # No empty strings
-        return False
-    
-    components = value.split(',')
-    
-    for component in components:
-        if '/' in component:
-            addr, mask = component.split('/',1)
-        else:
-            addr, mask = component, '32'
-        
-        if mask is not None and not (mask and mask.isdigit()):
-            # No empty or nonnumeric masks
-            return False
-        
-        if not validation.is_ip4_address(attribute, addr):
-            # Address part must be ipv4
-            return False
-        
-    return True
-
-def is_portlist(attribute, value):
-    if not validation.is_string(attribute, value):
-        return False
-    
-    if not value:
-        # No empty strings
-        return False
-    
-    components = value.split(',')
-    
-    for component in components:
-        if '-' in component:
-            pfrom, pto = component.split('-',1)
-        else:
-            pfrom = pto = component
-        
-        if not pfrom or not pto or not pfrom.isdigit() or not pto.isdigit():
-            # No empty or nonnumeric ports
-            return False
-        
-    return True
-
-
-### Connection functions ####
-
-def connect_node_iface_node(testbed_instance, node_guid, iface_guid):
-    node = testbed_instance._elements[node_guid]
-    iface = testbed_instance._elements[iface_guid]
-    iface.node = node
-
-def connect_node_iface_inet(testbed_instance, iface_guid, inet_guid):
-    iface = testbed_instance._elements[iface_guid]
-    iface.has_internet = True
-
-def connect_tun_iface_node(testbed_instance, node_guid, iface_guid):
-    node = testbed_instance._elements[node_guid]
-    iface = testbed_instance._elements[iface_guid]
-    iface.node = node
-    node.required_vsys.update(('fd_tuntap', 'vif_up', 'vif_down'))
-    node.required_packages.update(('python', 'python-crypto', 'python-setuptools', 'gcc'))
-
-def connect_tun_iface_peer(proto, testbed_instance, iface_guid, peer_iface_guid):
-    iface = testbed_instance._elements[iface_guid]
-    peer_iface = testbed_instance._elements[peer_iface_guid]
-    iface.peer_iface = peer_iface
-    peer_iface.peer_iface = iface
-    iface.peer_proto = \
-    iface.tun_proto = \
-    peer_iface.peer_proto = \
-    peer_iface.tun_proto = proto
-    iface.tun_key = peer_iface.tun_key
-
-def connect_tun_iface_filter(testbed_instance, iface_guid, filter_guid):
-    iface = testbed_instance._elements[iface_guid]
-    filt = testbed_instance._elements[filter_guid]
-    traces = testbed_instance._get_traces(filter_guid)
-    if 'dropped_stats' in traces: 
-        args = filt.args if filt.args else ""
-        filt.args = ','.join(filt.args.split(',') + ["logdropped=true",])
-    iface.filter_module = filt
-    filt.iface_guid = iface_guid
-    filt.iface = weakref.ref(iface)
-
-    if filt.peer_guid:
-        connect_tun_iface_peer(filt.peer_proto, testbed_instance, filt.iface_guid, filt.peer_guid)
-
-def connect_filter_peer(proto, testbed_instance, filter_guid, peer_guid):
-    peer = testbed_instance._elements[peer_guid]
-    filt = testbed_instance._elements[filter_guid]
-    filt.peer_proto = proto
-    filt.peer_guid = peer_guid
-    if filt.iface_guid:
-        connect_tun_iface_peer(filt.peer_proto, testbed_instance, filt.iface_guid, filt.peer_guid)
-
-def connect_filter_filter(proto, testbed_instance, filter_guid, peer_guid):
-    peer = testbed_instance._elements[peer_guid]
-    filt = testbed_instance._elements[filter_guid]
-    filt.peer_proto = proto
-    peer.peer_proto = proto
-    if filt.iface_guid:
-        peer.peer_guid = filt.iface_guid
-    if peer.iface_guid:
-        filt.peer_guid = peer.iface_guid
-    if filt.iface_guid and filt.peer_guid:
-        connect_tun_iface_peer(filt.peer_proto, testbed_instance, filt.iface_guid, filt.peer_guid)
-
-def crossconnect_tun_iface_peer_init(proto, testbed_instance, iface_guid, peer_iface_data):
-    iface = testbed_instance._elements[iface_guid]
-    iface.peer_iface = None
-    iface.peer_addr = peer_iface_data.get("tun_addr")
-    iface.peer_proto = peer_iface_data.get("tun_proto") or proto
-    iface.peer_port = peer_iface_data.get("tun_port")
-    iface.peer_cipher = peer_iface_data.get("tun_cipher")
-    iface.tun_key = min(iface.tun_key, peer_iface_data.get("tun_key"))
-    iface.tun_proto = proto
-    
-    preconfigure_tuniface(testbed_instance, iface_guid)
-
-def crossconnect_tun_iface_peer_compl(proto, testbed_instance, iface_guid, peer_iface_data):
-    # refresh (refreshable) attributes for second-phase
-    iface = testbed_instance._elements[iface_guid]
-    iface.peer_addr = peer_iface_data.get("tun_addr")
-    iface.peer_proto = peer_iface_data.get("tun_proto") or proto
-    iface.peer_port = peer_iface_data.get("tun_port")
-    iface.peer_cipher = peer_iface_data.get("tun_cipher")
-    
-    postconfigure_tuniface(testbed_instance, iface_guid)
-
-def crossconnect_tun_iface_peer_both(proto, testbed_instance, iface_guid, peer_iface_data):
-    crossconnect_tun_iface_peer_init(proto, testbed_instance, iface_guid, peer_iface_data)
-    crossconnect_tun_iface_peer_compl(proto, testbed_instance, iface_guid, peer_iface_data)
-
-def crossconnect_filter_peer_init(proto, testbed_instance, filter_guid, peer_data):
-    filt = testbed_instance._elements[filter_guid]
-    filt.peer_proto = proto
-    crossconnect_tun_iface_peer_init(filt.peer_proto, testbed_instance, filt.iface_guid, peer_data)
-
-def crossconnect_filter_peer_compl(proto, testbed_instance, filter_guid, peer_data):
-    filt = testbed_instance._elements[filter_guid]
-    filt.peer_proto = proto
-    crossconnect_tun_iface_peer_compl(filt.peer_proto, testbed_instance, filt.iface_guid, peer_data)
-
-def crossconnect_filter_peer_both(proto, testbed_instance, filter_guid, peer_data):
-    crossconnect_filter_peer_init(proto, testbed_instance, iface_guid, peer_iface_data)
-    crossconnect_filter_peer_compl(proto, testbed_instance, iface_guid, peer_iface_data)
-
-def connect_dep(testbed_instance, node_guid, app_guid, node=None, app=None):
-    node = node or testbed_instance._elements[node_guid]
-    app = app or testbed_instance._elements[app_guid]
-    app.node = node
-    
-    if app.depends:
-        node.required_packages.update(set(
-            app.depends.split() ))
-   
-    if app.add_to_path:
-        if app.home_path and app.home_path not in node.pythonpath:
-            node.pythonpath.append(app.home_path)
-    
-    if app.env:
-        for envkey, envval in app.env.iteritems():
-            envval = app._replace_paths(envval)
-            node.env[envkey].append(envval)
-    
-    if app.rpmFusion:
-        node.rpmFusion = True
-
-def connect_forwarder(testbed_instance, node_guid, fwd_guid):
-    node = testbed_instance._elements[node_guid]
-    fwd = testbed_instance._elements[fwd_guid]
-    node.multicast_forwarder = fwd
-    
-    if fwd.router:
-        connect_dep(testbed_instance, node_guid, None, app=fwd.router)
-
-    connect_dep(testbed_instance, node_guid, fwd_guid)
-
-def connect_router(testbed_instance, fwd_guid, router_guid):
-    fwd = testbed_instance._elements[fwd_guid]
-    router = testbed_instance._elements[router_guid]
-    fwd.router = router
-    
-    if fwd.node:
-        connect_dep(testbed_instance, None, router_guid, node=fwd.node)
-
-def connect_node_netpipe(testbed_instance, node_guid, netpipe_guid):
-    node = testbed_instance._elements[node_guid]
-    netpipe = testbed_instance._elements[netpipe_guid]
-    netpipe.node = node
-    node.required_vsys.add('ipfw-be')
-    node.required_packages.add('ipfwslice')
-    
-
-### Creation functions ###
-
-def create_node(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    
-    # create element with basic attributes
-    element = testbed_instance._make_node(parameters)
-    
-    # add constraint on number of (real) interfaces
-    # by counting connected devices
-    dev_guids = testbed_instance.get_connected(guid, "devs", "node")
-    num_open_ifaces = sum( # count True values
-        NODEIFACE == testbed_instance._get_factory_id(guid)
-        for guid in dev_guids )
-    element.min_num_external_ifaces = num_open_ifaces
-    
-    # require vroute vsys if we have routes to set up
-    routes = testbed_instance._add_route.get(guid)
-    if routes:
-        vsys = element.routing_method(routes,
-            testbed_instance.vsys_vnet)
-        element.required_vsys.add(vsys)
-    
-    testbed_instance.elements[guid] = element
-
-def create_nodeiface(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_node_iface(parameters)
-    testbed_instance.elements[guid] = element
-
-def create_tuniface(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_tun_iface(parameters)
-    
-    # Set custom addresses, if there are any already
-    # Setting this early helps set up P2P links
-    if guid in testbed_instance._add_address and not (element.address or element.netmask or element.netprefix):
-        addresses = testbed_instance._add_address[guid]
-        for address in addresses:
-            (address, netprefix, broadcast) = address
-            element.add_address(address, netprefix, broadcast)
-    
-    testbed_instance.elements[guid] = element
-
-def create_tapiface(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_tap_iface(parameters)
-    
-    # Set custom addresses, if there are any already
-    # Setting this early helps set up P2P links
-    if guid in testbed_instance._add_address and not (element.address or element.netmask or element.netprefix):
-        addresses = testbed_instance._add_address[guid]
-        for address in addresses:
-            (address, netprefix, broadcast) = address
-            element.add_address(address, netprefix, broadcast)
-    
-    testbed_instance.elements[guid] = element
-
-def create_tunfilter(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_tun_filter(parameters)
-    testbed_instance.elements[guid] = element
-
-def create_classqueuefilter(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_class_queue_filter(parameters)
-    testbed_instance.elements[guid] = element
-
-def create_loggingclassqueuefilter(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_logging_class_queue_filter(parameters)
-    testbed_instance.elements[guid] = element
-
-def create_tosqueuefilter(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_tos_queue_filter(parameters)
-    testbed_instance.elements[guid] = element
-
-def create_application(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_application(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-app-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_ccnxdaemon(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_application(parameters,
-            clazz  = testbed_instance._app.CCNxDaemon )
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-ccnd-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_dependency(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_dependency(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-dep-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_nepi_dependency(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_nepi_dependency(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-nepi-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_ns3_dependency(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_ns3_dependency(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-ns3-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_multicast_forwarder(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_multicast_forwarder(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-mcfwd-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_multicast_announcer(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_multicast_announcer(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-mcann-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_multicast_router(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_multicast_router(parameters)
-    
-    # Just inject configuration stuff
-    element.home_path = "nepi-mcrt-%s" % (guid,)
-    
-    testbed_instance.elements[guid] = element
-
-def create_internet(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_internet(parameters)
-    testbed_instance.elements[guid] = element
-
-def create_netpipe(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    element = testbed_instance._make_netpipe(parameters)
-    testbed_instance.elements[guid] = element
-
-### Start/Stop functions ###
-
-def prestart_ccnxdaemon(testbed_instance, guid):
-    # ccnx daemon needs to start before the rest of the
-    # ccn applications
-    start_application(testbed_instance, guid)
-
-def stop_ccndaemon(testbed_instance, guid):
-    app = testbed_instance.elements[guid]
-    app.kill()
-
-def start_application(testbed_instance, guid):
-    parameters = testbed_instance._get_parameters(guid)
-    traces = testbed_instance._get_traces(guid)
-    app = testbed_instance.elements[guid]
-    
-    app.stdout = "stdout" in traces
-    app.stderr = "stderr" in traces
-    app.buildlog = "buildlog" in traces
-    app.outout = "output" in traces
-    
-    app.start()
-
-def stop_application(testbed_instance, guid):
-    app = testbed_instance.elements[guid]
-    app.kill()
-
-### Status functions ###
-
-def status_application(testbed_instance, guid):
-    if guid not in testbed_instance.elements.keys():
-        return AS.STATUS_NOT_STARTED
-    
-    app = testbed_instance.elements[guid]
-    return app.status()
-
-def status_dependency(testbed_instance, guid):
-    if guid not in testbed_instance.elements.keys():
-        return AS.STATUS_NOT_STARTED
-    
-    dep = testbed_instance.elements[guid]
-    if dep.deployed():
-        return AS.STATUS_FINISHED
-    return AS.STATUS_RUNNING
-
-### Configure functions ###
-
-def configure_nodeiface(testbed_instance, guid):
-    element = testbed_instance._elements[guid]
-    
-    # Cannot explicitly configure addresses
-    if guid in testbed_instance._add_address:
-        raise ValueError, "Cannot explicitly set address of public PlanetLab interface"
-    
-    # Get siblings
-    node_guid = testbed_instance.get_connected(guid, "node", "devs")[0]
-    dev_guids = testbed_instance.get_connected(node_guid, "node", "devs")
-    siblings = [ self._element[dev_guid] 
-                 for dev_guid in dev_guids
-                 if dev_guid != guid ]
-    
-    # Fetch address from PLC api
-    element.pick_iface(siblings)
-    
-    # Do some validations
-    element.validate()
-
-def preconfigure_tuniface(testbed_instance, guid):
-    element = testbed_instance._elements[guid]
-    
-    # Set custom addresses if any, and if not set already
-    if guid in testbed_instance._add_address and not (element.address or element.netmask or element.netprefix):
-        addresses = testbed_instance._add_address[guid]
-        for address in addresses:
-            (address, netprefix, broadcast) = address
-            element.add_address(address, netprefix, broadcast)
-    
-    # Link to external interface, if any
-    for iface in testbed_instance._elements.itervalues():
-        if isinstance(iface, testbed_instance._interfaces.NodeIface) and iface.node is element.node and iface.has_internet:
-            element.external_iface = iface
-            break
-
-    # Set standard TUN attributes
-    if (not element.tun_addr or not element.tun_port) and element.external_iface:
-        element.tun_addr = element.external_iface.address
-        element.tun_port = testbed_instance.tapPortBase + int(guid)
-
-    # Set enabled traces
-    traces = testbed_instance._get_traces(guid)
-    for capmode in ('pcap', 'packets'):
-        if capmode in traces:
-            element.capture = capmode
-            break
-    else:
-        element.capture = False
-    
-    # Do some validations
-    element.validate()
-    
-    # First-phase setup
-    element.prepare('tun-%s' % (guid,))
-
-def postconfigure_tuniface(testbed_instance, guid):
-    element = testbed_instance._elements[guid]
-    
-    # Second-phase setup
-    element.launch()
-    
-def prestart_tuniface(testbed_instance, guid):
-    element = testbed_instance._elements[guid]
-    
-    # Second-phase setup
-    element.wait()
-
-def configure_node(testbed_instance, guid):
-    node = testbed_instance._elements[guid]
-    
-    # Just inject configuration stuff
-    node.home_path = "nepi-node-%s" % (guid,)
-    node.ident_path = testbed_instance.sliceSSHKey
-    node.slicename = testbed_instance.slicename
-    
-    # Do some validations
-    node.validate()
-    
-    # this will be done in parallel in all nodes
-    # this call only spawns the process
-    node.install_dependencies()
-
-def configure_node_routes(testbed_instance, guid):
-    node = testbed_instance._elements[guid]
-    routes = testbed_instance._add_route.get(guid)
-    
-    if routes:
-        devs = [ dev
-            for dev_guid in testbed_instance.get_connected(guid, "devs", "node")
-            for dev in ( testbed_instance._elements.get(dev_guid) ,)
-            if dev and isinstance(dev, testbed_instance._interfaces.TunIface) ]
-    
-        vsys = testbed_instance.vsys_vnet
-        
-        node.configure_routes(routes, devs, vsys)
-
-def configure_application(testbed_instance, guid):
-    app = testbed_instance._elements[guid]
-    
-    # Do some validations
-    app.validate()
-    
-    # Wait for dependencies
-    app.node.wait_dependencies()
-    
-    # Install stuff
-    app.async_setup()
-
-def configure_dependency(testbed_instance, guid):
-    dep = testbed_instance._elements[guid]
-    
-    # Do some validations
-    dep.validate()
-    
-    # Wait for dependencies
-    dep.node.wait_dependencies()
-    
-    # Install stuff
-    dep.async_setup()
-
-def configure_announcer(testbed_instance, guid):
-    # Link ifaces
-    fwd = testbed_instance._elements[guid]
-    fwd.ifaces = [ dev
-        for node_guid in testbed_instance.get_connected(guid, "node", "apps")
-        for dev_guid in testbed_instance.get_connected(node_guid, "devs", "node")
-        for dev in ( testbed_instance._elements.get(dev_guid) ,)
-        if dev and isinstance(dev, testbed_instance._interfaces.TunIface)
-            and dev.multicast ]
-    
-    # Install stuff
-    configure_dependency(testbed_instance, guid)
-
-def configure_forwarder(testbed_instance, guid):
-    configure_announcer(testbed_instance, guid)
-    
-    # Link ifaces to forwarder
-    fwd = testbed_instance._elements[guid]
-    for iface in fwd.ifaces:
-        iface.multicast_forwarder = '/var/run/mcastfwd'
-
-def configure_router(testbed_instance, guid):
-    # Link ifaces
-    rt = testbed_instance._elements[guid]
-    rt.nonifaces = [ dev
-        for fwd_guid in testbed_instance.get_connected(guid, "fwd", "router")
-        for node_guid in testbed_instance.get_connected(fwd_guid, "node", "apps")
-        for dev_guid in testbed_instance.get_connected(node_guid, "devs", "node")
-        for dev in ( testbed_instance._elements.get(dev_guid) ,)
-        if dev and isinstance(dev, testbed_instance._interfaces.TunIface)
-            and not dev.multicast ]
-    
-    # Install stuff
-    configure_dependency(testbed_instance, guid)
-
-def configure_netpipe(testbed_instance, guid):
-    netpipe = testbed_instance._elements[guid]
-    
-    # Do some validations
-    netpipe.validate()
-    
-    # Wait for dependencies
-    netpipe.node.wait_dependencies()
-    
-    # Install rules
-    netpipe.configure()
-
-### Factory information ###
-
-connector_types = dict({
-    "apps": dict({
-                "help": "Connector from node to applications", 
-                "name": "apps",
-                "max": -1, 
-                "min": 0
-            }),
-    "devs": dict({
-                "help": "Connector from node to network interfaces", 
-                "name": "devs",
-                "max": -1, 
-                "min": 0
-            }),
-    "deps": dict({
-                "help": "Connector from node to application dependencies "
-                        "(packages and applications that need to be installed)", 
-                "name": "deps",
-                "max": -1, 
-                "min": 0
-            }),
-    "inet": dict({
-                "help": "Connector from network interfaces to the internet", 
-                "name": "inet",
-                "max": 1, 
-                "min": 1
-            }),
-    "node": dict({
-                "help": "Connector to a Node", 
-                "name": "node",
-                "max": 1, 
-                "min": 1
-            }),
-    "router": dict({
-                "help": "Connector to a routing daemon", 
-                "name": "router",
-                "max": 1, 
-                "min": 0
-            }),
-    "fwd": dict({
-                "help": "Forwarder this routing daemon communicates with", 
-                "name": "fwd",
-                "max": 1, 
-                "min": 1
-            }),
-    "pipes": dict({
-                "help": "Connector to a NetPipe", 
-                "name": "pipes",
-                "max": 2, 
-                "min": 0
-            }),
-    
-    "tcp": dict({
-                "help": "ip-ip tunneling over TCP link", 
-                "name": "tcp",
-                "max": 1, 
-                "min": 0
-            }),
-    "udp": dict({
-                "help": "ip-ip tunneling over UDP datagrams", 
-                "name": "udp",
-                "max": 1, 
-                "min": 0
-            }),
-    "gre": dict({
-                "help": "IP or Ethernet tunneling using the GRE protocol", 
-                "name": "gre",
-                "max": 1, 
-                "min": 0
-            }),
-    "fd->": dict({
-                "help": "TUN device file descriptor provider", 
-                "name": "fd->",
-                "max": 1, 
-                "min": 0
-            }),
-    "->fd": dict({
-                "help": "TUN device file descriptor slot", 
-                "name": "->fd",
-                "max": 1, 
-                "min": 0
-            }),
-   })
-
-connections = [
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, NODEIFACE, "node"),
-        "init_code": connect_node_iface_node,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, TUNIFACE, "node"),
-        "init_code": connect_tun_iface_node,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, TAPIFACE, "node"),
-        "init_code": connect_tun_iface_node,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODEIFACE, "inet"),
-        "to":   (TESTBED_ID, INTERNET, "devs"),
-        "init_code": connect_node_iface_inet,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "apps"),
-        "to":   (TESTBED_ID, (APPLICATION, CCNXDAEMON, MULTICASTANNOUNCER), "node"),
-        "init_code": connect_dep,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "deps"),
-        "to":   (TESTBED_ID, (DEPENDENCY, NEPIDEPENDENCY, NS3DEPENDENCY), "node"),
-        "init_code": connect_dep,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "pipes"),
-        "to":   (TESTBED_ID, NETPIPE, "node"),
-        "init_code": connect_node_netpipe,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "apps"),
-        "to":   (TESTBED_ID, MULTICASTFORWARDER, "node"),
-        "init_code": connect_forwarder,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, MULTICASTFORWARDER, "router"),
-        "to":   (TESTBED_ID, MULTICASTROUTER, "fwd"),
-        "init_code": connect_router,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "tcp"),
-        "to":   (TESTBED_ID, TUNIFACE, "tcp"),
-        "init_code": functools.partial(connect_tun_iface_peer,"tcp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "udp"),
-        "to":   (TESTBED_ID, TUNIFACE, "udp"),
-        "init_code": functools.partial(connect_tun_iface_peer,"udp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "gre"),
-        "to":   (TESTBED_ID, TUNIFACE, "gre"),
-        "init_code": functools.partial(connect_tun_iface_peer,"gre"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "fd->"),
-        "to":   (TESTBED_ID, TUNFILTERS, "->fd"),
-        "init_code": connect_tun_iface_filter,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNFILTERS, "tcp"),
-        "to":   (TESTBED_ID, TUNIFACE, "tcp"),
-        "init_code": functools.partial(connect_filter_peer,"tcp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNFILTERS, "udp"),
-        "to":   (TESTBED_ID, TUNIFACE, "udp"),
-        "init_code": functools.partial(connect_filter_peer,"udp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "tcp"),
-        "to":   (TESTBED_ID, TAPIFACE, "tcp"),
-        "init_code": functools.partial(connect_tun_iface_peer,"tcp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "udp"),
-        "to":   (TESTBED_ID, TAPIFACE, "udp"),
-        "init_code": functools.partial(connect_tun_iface_peer,"udp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "gre"),
-        "to":   (TESTBED_ID, TAPIFACE, "gre"),
-        "init_code": functools.partial(connect_tun_iface_peer,"gre"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "fd->"),
-        "to":   (TESTBED_ID, TAPFILTERS, "->fd"),
-        "init_code": connect_tun_iface_filter,
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPFILTERS, "tcp"),
-        "to":   (TESTBED_ID, TAPIFACE, "tcp"),
-        "init_code": functools.partial(connect_filter_peer,"tcp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPFILTERS, "udp"),
-        "to":   (TESTBED_ID, TAPIFACE, "udp"),
-        "init_code": functools.partial(connect_filter_peer,"udp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNFILTERS, "tcp"),
-        "to":   (TESTBED_ID, TUNFILTERS, "tcp"),
-        "init_code": functools.partial(connect_filter_filter,"tcp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNFILTERS, "udp"),
-        "to":   (TESTBED_ID, TUNFILTERS, "udp"),
-        "init_code": functools.partial(connect_filter_filter,"udp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPFILTERS, "tcp"),
-        "to":   (TESTBED_ID, TAPFILTERS, "tcp"),
-        "init_code": functools.partial(connect_filter_filter,"tcp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPFILTERS, "udp"),
-        "to":   (TESTBED_ID, TAPFILTERS, "udp"),
-        "init_code": functools.partial(connect_filter_filter,"udp"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "tcp"),
-        "to":   (None, None, "tcp"),
-        "init_code": functools.partial(crossconnect_tun_iface_peer_init,"tcp"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_compl,"tcp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "udp"),
-        "to":   (None, None, "udp"),
-        "init_code": functools.partial(crossconnect_tun_iface_peer_init,"udp"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_compl,"udp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "fd->"),
-        "to":   (None, None, "->fd"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_both,"fd"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TUNIFACE, "gre"),
-        "to":   (None, None, "gre"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_both,"gre"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "tcp"),
-        "to":   (None, None, "tcp"),
-        "init_code": functools.partial(crossconnect_tun_iface_peer_init,"tcp"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_compl,"tcp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "udp"),
-        "to":   (None, None, "udp"),
-        "init_code": functools.partial(crossconnect_tun_iface_peer_init,"udp"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_compl,"udp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "fd->"),
-        "to":   (None, None, "->fd"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_both,"fd"),
-        "can_cross": True
-    }),
-    # EGRE is an extension of PlanetLab, so we can't connect externally
-    # if the other testbed isn't another PlanetLab
-    dict({
-        "from": (TESTBED_ID, TAPIFACE, "gre"),
-        "to":   (TESTBED_ID, None, "gre"),
-        "compl_code": functools.partial(crossconnect_tun_iface_peer_both,"gre"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, ALLFILTERS, "tcp"),
-        "to":   (None, None, "tcp"),
-        "init_code": functools.partial(crossconnect_filter_peer_init,"tcp"),
-        "compl_code": functools.partial(crossconnect_filter_peer_compl,"tcp"),
-        "can_cross": True
-    }),
-    dict({
-        "from": (TESTBED_ID, ALLFILTERS, "udp"),
-        "to":   (None, None, "udp"),
-        "init_code": functools.partial(crossconnect_filter_peer_init,"udp"),
-        "compl_code": functools.partial(crossconnect_filter_peer_compl,"udp"),
-        "can_cross": True
-    }),
-]
-
-attributes = dict({
-    "forward_X11": dict({      
-                "name": "forward_X11",
-                "help": "Forward x11 from main namespace to the node",
-                "type": Attribute.BOOL, 
-                "value": False,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_bool,
-            }),
-    "hostname": dict({      
-                "name": "hostname",
-                "help": "Constrain hostname during resource discovery. May use wildcards.",
-                "type": Attribute.STRING, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string,
-            }),
-    "city": dict({      
-                "name": "city",
-                "help": "Constrain location (city) during resource discovery. May use wildcards.",
-                "type": Attribute.STRING, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string,
-            }),
-    "country": dict({      
-                "name": "hostname",
-                "help": "Constrain location (country) during resource discovery. May use wildcards.",
-                "type": Attribute.STRING, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string,
-            }),
-    "region": dict({      
-                "name": "hostname",
-                "help": "Constrain location (region) during resource discovery. May use wildcards.",
-                "type": Attribute.STRING, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string,
-            }),
-    "architecture": dict({      
-                "name": "architecture",
-                "help": "Constrain architexture during resource discovery.",
-                "type": Attribute.ENUM, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "allowed": ["x86_64",
-                            "i386"],
-                "validation_function": validation.is_enum,
-            }),
-    "operating_system": dict({      
-                "name": "operatingSystem",
-                "help": "Constrain operating system during resource discovery.",
-                "type": Attribute.ENUM, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "allowed": ["f8",
-                            "f12",
-                            "f14",
-                            "centos",
-                            "other"],
-                "validation_function": validation.is_enum,
-            }),
-    "site": dict({      
-                "name": "site",
-                "help": "Constrain the PlanetLab site this node should reside on.",
-                "type": Attribute.ENUM, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "allowed": ["PLE",
-                            "PLC",
-                            "PLJ"],
-                "validation_function": validation.is_enum,
-            }),
-    "min_reliability": dict({
-                "name": "minReliability",
-                "help": "Constrain reliability while picking PlanetLab nodes. Specifies a lower acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,100),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "max_reliability": dict({
-                "name": "maxReliability",
-                "help": "Constrain reliability while picking PlanetLab nodes. Specifies an upper acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,100),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "min_bandwidth": dict({
-                "name": "minBandwidth",
-                "help": "Constrain available bandwidth while picking PlanetLab nodes. Specifies a lower acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,2**31),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "max_bandwidth": dict({
-                "name": "maxBandwidth",
-                "help": "Constrain available bandwidth while picking PlanetLab nodes. Specifies an upper acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,2**31),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "min_load": dict({
-                "name": "minLoad",
-                "help": "Constrain node load average while picking PlanetLab nodes. Specifies a lower acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,2**31),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "max_load": dict({
-                "name": "maxLoad",
-                "help": "Constrain node load average while picking PlanetLab nodes. Specifies an upper acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,2**31),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "min_cpu": dict({
-                "name": "minCpu",
-                "help": "Constrain available cpu time while picking PlanetLab nodes. Specifies a lower acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,100),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-    "max_cpu": dict({
-                "name": "maxCpu",
-                "help": "Constrain available cpu time while picking PlanetLab nodes. Specifies an upper acceptable bound.",
-                "type": Attribute.DOUBLE,
-                "range": (0,100),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_number,
-            }),
-     "timeframe": dict({
-                "name": "timeframe",
-                "help": "Past time period in which to check information about the node. Values are year,month, week, latest", 
-                "type": Attribute.ENUM, 
-                "value": "week",
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "allowed": ["latest",
-                            "week",
-                            "month",
-                            "year"],
-                "validation_function": validation.is_enum,
-            }),
-           
-    "up": dict({
-                "name": "up",
-                "help": "Link up",
-                "type": Attribute.BOOL,
-                "value": True,
-                "flags": Attribute.NoDefaultValue, 
-                "validation_function": validation.is_bool
-            }),
-    "primary": dict({
-                "name": "primary",
-                "help": "This is the primary interface for the attached node",
-                "type": Attribute.BOOL,
-                "value": True,
-                "validation_function": validation.is_bool
-            }),
-    "if_name": dict({
-                "name": "if_name",
-                "help": "Device name",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "mtu":  dict({
-                "name": "mtu", 
-                "help": "Maximum transmition unit for device",
-                "type": Attribute.INTEGER,
-                "range": (0,1500),
-                "validation_function": validation.is_integer_range(0,1500)
-            }),
-    "mask":  dict({
-                "name": "mask", 
-                "help": "Network mask for the device (eg: 24 for /24 network)",
-                "type": Attribute.INTEGER,
-                "validation_function": validation.is_integer_range(8,24)
-            }),
-    "snat":  dict({
-                "name": "snat", 
-                "help": "Enable SNAT (source NAT to the internet) no this device",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "multicast":  dict({
-                "name": "multicast", 
-                "help": "Enable multicast forwarding on this device. "
-                        "Note that you still need a multicast routing daemon "
-                        "in the node.",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "pointopoint":  dict({
-                "name": "pointopoint", 
-                "help": "If the interface is a P2P link, the remote endpoint's IP "
-                        "should be set on this attribute.",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "bwlimit":  dict({
-                "name": "bwlimit", 
-                "help": "Emulated transmission speed (in kbytes per second)",
-                "type": Attribute.INTEGER,
-                "range" : (1,10*2**20),
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_integer
-            }),
-    "txqueuelen":  dict({
-                "name": "txqueuelen", 
-                "help": "Transmission queue length (in packets)",
-                "type": Attribute.INTEGER,
-                "value": 1000,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "range" : (1,10000),
-                "validation_function": validation.is_integer
-            }),
-            
-    "command": dict({
-                "name": "command",
-                "help": "Command line string",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "ccnroutes": dict({
-                "name": "ccnRoutes",
-                "help": "Route can be static (e.g. udp ip) or multicast (e.g. udp 224.0.0.204 2869). To separate different route use '|' ",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-     "sudo": dict({
-                "name": "sudo",
-                "help": "Run with root privileges",
-                "type": Attribute.BOOL,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "stdin": dict({
-                "name": "stdin",
-                "help": "Standard input",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-            
-    "depends": dict({
-                "name": "depends",
-                "help": "Space-separated list of packages required to run the application",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "build-depends": dict({
-                "name": "buildDepends",
-                "help": "Space-separated list of packages required to build the application",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "rpm-fusion": dict({
-                "name": "rpmFusion",
-                "help": "True if required packages can be found in the RpmFusion repository",
-                "type": Attribute.BOOL,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "sources": dict({
-                "name": "sources",
-                "help": "Space-separated list of regular files to be deployed in the working path prior to building. "
-                        "Archives won't be expanded automatically.",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "ccnxversion": dict({      
-                "name": "ccnxVersion",
-                "help": "Version of ccnx source code to install in the node.",
-                "type": Attribute.ENUM, 
-                "value": "0.6.0",
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "allowed": ["0.6.0",
-                            "0.7.1"],
-                "validation_function": validation.is_enum,
-            }),
-    "repository": dict({      
-                "name": "repository",
-                "help": "If True the ccnr command will be issued on start, and a repository will be created.",
-                "type": Attribute.BOOL, 
-                "value": False,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_bool,
-            }),
-     "ccnlocalport" : dict({
-            "name" : "ccnLocalPort", 
-            "help" : "Local port to bind the ccn daemon. (i.e. CCN_LOCAL_PORT=)",
-            "type" : Attribute.INTEGER,
-            "flags" : Attribute.DesignInvisible | \
-                    Attribute.ExecInvisible | \
-                    Attribute.ExecImmutable | \
-                    Attribute.Metadata,
-            "validation_function" : validation.is_integer,
-            }),
-    "build": dict({
-                "name": "build",
-                "help": "Build commands to execute after deploying the sources. "
-                        "Sources will be in the ${SOURCES} folder. "
-                        "Example: tar xzf ${SOURCES}/my-app.tgz && cd my-app && ./configure && make && make clean.\n"
-                        "Try to make the commands return with a nonzero exit code on error.\n"
-                        "Also, do not install any programs here, use the 'install' attribute. This will "
-                        "help keep the built files constrained to the build folder (which may "
-                        "not be the home folder), and will result in faster deployment. Also, "
-                        "make sure to clean up temporary files, to reduce bandwidth usage between "
-                        "nodes when transferring built packages.",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "install": dict({
-                "name": "install",
-                "help": "Commands to transfer built files to their final destinations. "
-                        "Sources will be in the initial working folder, and a special "
-                        "tag ${SOURCES} can be used to reference the experiment's "
-                        "home folder (where the application commands will run).\n"
-                        "ALL sources and targets needed for execution must be copied there, "
-                        "if building has been enabled.\n"
-                        "That is, 'slave' nodes will not automatically get any source files. "
-                        "'slave' nodes don't get build dependencies either, so if you need "
-                        "make and other tools to install, be sure to provide them as "
-                        "actual dependencies instead.",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    
-    "netpipe_mode": dict({      
-                "name": "mode",
-                "help": "Link mode:\n"
-                        " * SERVER: applies to incoming connections\n"
-                        " * CLIENT: applies to outgoing connections\n"
-                        " * SERVICE: applies to both",
-                "type": Attribute.ENUM, 
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "allowed": ["SERVER",
-                            "CLIENT",
-                            "SERVICE"],
-                "validation_function": validation.is_enum,
-            }),
-    "port_list":  dict({
-                "name": "portList", 
-                "help": "Port list or range. Eg: '22', '22,23,27', '20-2000'",
-                "type": Attribute.STRING,
-                "validation_function": is_portlist,
-            }),
-    "addr_list":  dict({
-                "name": "addrList", 
-                "help": "Address list or range. Eg: '127.0.0.1', '127.0.0.1,127.0.1.1', '127.0.0.1/8'",
-                "type": Attribute.STRING,
-                "validation_function": is_addrlist,
-            }),
-    "bw_in":  dict({
-                "name": "bwIn", 
-                "help": "Inbound bandwidth limit (in Mbit/s)",
-                "type": Attribute.DOUBLE,
-                "validation_function": validation.is_number,
-            }),
-    "bw_out":  dict({
-                "name": "bwOut", 
-                "help": "Outbound bandwidth limit (in Mbit/s)",
-                "type": Attribute.DOUBLE,
-                "validation_function": validation.is_number,
-            }),
-    "plr_in":  dict({
-                "name": "plrIn", 
-                "help": "Inbound packet loss rate (0 = no loss, 1 = 100% loss)",
-                "type": Attribute.DOUBLE,
-                "validation_function": validation.is_number,
-            }),
-    "plr_out":  dict({
-                "name": "plrOut", 
-                "help": "Outbound packet loss rate (0 = no loss, 1 = 100% loss)",
-                "type": Attribute.DOUBLE,
-                "validation_function": validation.is_number,
-            }),
-    "delay_in":  dict({
-                "name": "delayIn", 
-                "help": "Inbound packet delay (in milliseconds)",
-                "type": Attribute.INTEGER,
-                "range": (0,60000),
-                "validation_function": validation.is_integer,
-            }),
-    "delay_out":  dict({
-                "name": "delayOut", 
-                "help": "Outbound packet delay (in milliseconds)",
-                "type": Attribute.INTEGER,
-                "range": (0,60000),
-                "validation_function": validation.is_integer,
-            }),
-    "module": dict({
-                "name": "module",
-                "help": "Path to a .c or .py source for a filter module, or a binary .so",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "args": dict({
-                "name": "args",
-                "help": "Module arguments - comma-separated list of name=value pairs",
-                "type": Attribute.STRING,
-                "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-                "validation_function": validation.is_string
-            }),
-    "routing_algorithm": dict({      
-            "name": "algorithm",
-            "help": "Routing algorithm.",
-            "value": "dvmrp",
-            "type": Attribute.ENUM, 
-            "allowed": ["dvmrp"],
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_enum,
-        }),
-    })
-
-traces = dict({
-    "stdout": dict({
-                "name": "stdout",
-                "help": "Standard output stream"
-              }),
-    "stderr": dict({
-                "name": "stderr",
-                "help": "Application standard error",
-              }),
-    "buildlog": dict({
-                "name": "buildlog",
-                "help": "Output of the build process",
-              }), 
-    
-    "netpipe_stats": dict({
-                "name": "netpipeStats",
-                "help": "Information about rule match counters, packets dropped, etc.",
-              }),
-
-    "packets": dict({
-                "name": "packets",
-                "help": "Detailled log of all packets going through the interface",
-              }),
-    "pcap": dict({
-                "name": "pcap",
-                "help": "PCAP trace of all packets going through the interface",
-              }),
-    "output": dict({
-                "name": "output",
-                "help": "Extra output trace for applications. When activated this trace can be referenced with wildcard a reference from an Application command line. Ex: command: 'tcpdump -w {#[elemet-label].trace[trace-id].[name|path]#}' ",
-              }),
-    "dropped_stats": dict({
-                "name": "dropped_stats",
-                "help": "Information on dropped packets on a filer or queue associated to a network interface",
-            }),
-    "queue_stats_f": dict({
-                "name": "queue_stats_f",
-                "help": "Detailled, fine-grained information on egress queue state, csv format.",
-            }),
-    "queue_stats_b": dict({
-                "name": "queue_stats_b",
-                "help": "Detailled, fine-grained information on ingress queue state, csv format.",
-            }),
-    })
-
-create_order = [ 
-    INTERNET, NODE, NODEIFACE, CLASSQUEUEFILTER, LOGGINGCLASSQUEUEFILTER, TOSQUEUEFILTER, 
-    MULTICASTANNOUNCER, MULTICASTFORWARDER, MULTICASTROUTER, 
-    TUNFILTER, TAPIFACE, TUNIFACE, NETPIPE, 
-    NEPIDEPENDENCY, NS3DEPENDENCY, DEPENDENCY, CCNXDAEMON, APPLICATION ]
-
-configure_order = [ 
-    INTERNET, Parallel(NODE), 
-    NODEIFACE, 
-    Parallel(MULTICASTANNOUNCER), Parallel(MULTICASTFORWARDER), Parallel(MULTICASTROUTER), 
-    Parallel(TAPIFACE), Parallel(TUNIFACE), NETPIPE, 
-    Parallel(NEPIDEPENDENCY), Parallel(NS3DEPENDENCY), Parallel(DEPENDENCY), Parallel(CCNXDAEMON),
-    Parallel(APPLICATION)]
-
-# Start (and prestart) node after ifaces, because the node needs the ifaces in order to set up routes
-start_order = [ INTERNET, 
-    NODEIFACE, 
-    Parallel(TAPIFACE), Parallel(TUNIFACE), 
-    Parallel(NODE), NETPIPE, 
-    Parallel(MULTICASTANNOUNCER), Parallel(MULTICASTFORWARDER), Parallel(MULTICASTROUTER), 
-    Parallel(NEPIDEPENDENCY), Parallel(NS3DEPENDENCY), Parallel(DEPENDENCY), Parallel(CCNXDAEMON),
-    Parallel(APPLICATION)]
-
-# cleanup order
-shutdown_order = [ 
-    Parallel(APPLICATION), 
-    Parallel (CCNXDAEMON),
-    Parallel(MULTICASTROUTER), Parallel(MULTICASTFORWARDER), Parallel(MULTICASTANNOUNCER), 
-    Parallel(TAPIFACE), Parallel(TUNIFACE), Parallel(NETPIPE), 
-    Parallel(NEPIDEPENDENCY), Parallel(NS3DEPENDENCY), Parallel(DEPENDENCY), 
-    NODEIFACE, Parallel(NODE) ]
-
-factories_info = dict({
-    NODE: dict({
-            "help": "Virtualized Node (V-Server style)",
-            "category": FC.CATEGORY_NODES,
-            "create_function": create_node,
-            "preconfigure_function": configure_node,
-            "prestart_function": configure_node_routes,
-            "box_attributes": [
-                "forward_X11",
-                "hostname",
-                "architecture",
-                "operating_system",
-                "site",
-                "min_reliability",
-                "max_reliability",
-                "min_bandwidth",
-                "max_bandwidth",
-                "min_load",
-                "max_load",
-                "min_cpu",
-                "max_cpu",
-                "timeframe",
-                
-                # NEPI-in-NEPI attributes
-                ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,
-            ],
-            "connector_types": ["devs", "apps", "pipes", "deps"],
-            "tags": [tags.NODE, tags.ALLOW_ROUTES],
-       }),
-    NODEIFACE: dict({
-            "help": "External network interface - they cannot be brought up or down, and they MUST be connected to the internet.",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_nodeiface,
-            "preconfigure_function": configure_nodeiface,
-            "box_attributes": [ ],
-            "connector_types": ["node", "inet"],
-            "tags": [tags.INTERFACE, tags.HAS_ADDRESSES],
-        }),
-    TUNIFACE: dict({
-            "help": "Virtual TUN network interface (layer 3)",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_tuniface,
-            "preconfigure_function": preconfigure_tuniface,
-            "configure_function": postconfigure_tuniface,
-            "prestart_function": prestart_tuniface,
-            "box_attributes": [
-                "up", "if_name", "mtu", "snat", "pointopoint", "multicast", "bwlimit",
-                "txqueuelen",
-                "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher",
-            ],
-            "traces": ["packets", "pcap"],
-            "connector_types": ["node","udp","tcp","fd->","gre"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-        }),
-    TAPIFACE: dict({
-            "help": "Virtual TAP network interface (layer 2)",
-            "category": FC.CATEGORY_DEVICES,
-            "create_function": create_tapiface,
-            "preconfigure_function": preconfigure_tuniface,
-            "configure_function": postconfigure_tuniface,
-            "prestart_function": prestart_tuniface,
-            "box_attributes": [
-                "up", "if_name", "mtu", "snat", "pointopoint", "multicast", "bwlimit",
-                "txqueuelen",
-                "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher",
-            ],
-            "traces": ["packets", "pcap"],
-            "connector_types": ["node","udp","tcp","fd->","gre"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-        }),
-    TUNFILTER: dict({
-            "help": "TUN/TAP stream filter\n\n"
-                    "If specified, it should be either a .py or .so module. "
-                    "It will be loaded, and all incoming and outgoing packets "
-                    "will be routed through it. The filter will not be responsible "
-                    "for buffering, packet queueing is performed in tun_connect "
-                    "already, so it should not concern itself with it. It should "
-                    "not, however, block in one direction if the other is congested.\n"
-                    "\n"
-                    "Modules are expected to have the following methods:\n"
-                    "\tinit(**args)\n"
-                    "\t\tIf arguments are given, this method will be called with the\n"
-                    "\t\tgiven arguments (as keyword args in python modules, or a single\n"
-                    "\taccept_packet(packet, direction):\n"
-                    "\t\tDecide whether to drop the packet. Direction is 0 for packets "
-                        "coming from the local side to the remote, and 1 is for packets "
-                        "coming from the remote side to the local. Return a boolean, "
-                        "true if the packet is not to be dropped.\n"
-                    "\tfilter_init():\n"
-                    "\t\tInitializes a filtering pipe (filter_run). It should "
-                        "return two file descriptors to use as a bidirectional "
-                        "pipe: local and remote. 'local' is where packets from the "
-                        "local side will be written to. After filtering, those packets "
-                        "should be written to 'remote', where tun_connect will read "
-                        "from, and it will forward them to the remote peer. "
-                        "Packets from the remote peer will be written to 'remote', "
-                        "where the filter is expected to read from, and eventually "
-                        "forward them to the local side. If the file descriptors are "
-                        "not nonblocking, they will be set to nonblocking. So it's "
-                        "better to set them from the start like that.\n"
-                    "\tfilter_run(local, remote):\n"
-                    "\t\tIf filter_init is provided, it will be called repeatedly, "
-                        "in a separate thread until the process is killed. It should "
-                        "sleep at most for a second.\n"
-                    "\tfilter_close(local, remote):\n"
-                    "\t\tCalled then the process is killed, if filter_init was provided. "
-                        "It should, among other things, close the file descriptors.\n"
-                    "\n"
-                    "Python modules are expected to return a tuple in filter_init, "
-                    "either of file descriptors or file objects, while native ones "
-                    "will receive two int*.\n"
-                    "\n"
-                    "Python modules can additionally contain a custom queue class "
-                    "that will replace the FIFO used by default. The class should "
-                    "be named 'queueclass' and contain an interface compatible with "
-                    "collections.deque. That is, indexing (especiall for q[0]), "
-                    "bool(q), popleft, appendleft, pop (right), append (right), "
-                    "len(q) and clear.",
-            "category": FC.CATEGORY_CHANNELS,
-            "create_function": create_tunfilter,
-            "box_attributes": [
-                "module", "args",
-                "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher",
-            ],
-            "connector_types": ["->fd","udp","tcp"],
-        }),
-    CLASSQUEUEFILTER : dict({
-            "help": "TUN classful queue, uses a separate queue for each user-definable class.\n\n"
-                    "It takes two arguments, both of which have sensible defaults:\n"
-                    "\tsize: the base size of each class' queue\n"
-                    "\tclasses: the class definitions, which follow the following syntax:\n"
-                    '\t   <CLASSLIST> ::= <CLASS> ":" CLASSLIST\n'
-                    '\t                |  <CLASS>\n'
-                    '\t   <CLASS>     ::= <PROTOLIST> "*" <PRIORITYSPEC>\n'
-                    '\t                |  <DFLTCLASS>\n'
-                    '\t   <DFLTCLASS> ::= "*" <PRIORITYSPEC>\n'
-                    '\t   <PROTOLIST> ::= <PROTO> "." <PROTOLIST>\n'
-                    '\t                |  <PROTO>\n'
-                    '\t   <PROTO>     ::= <NAME> | <NUMBER>\n'
-                    '\t   <NAME>      ::= --see http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers --\n'
-                    '\t                   --only in lowercase, with special characters removed--\n'
-                    '\t                   --or see below--\n'
-                    '\t   <NUMBER>    ::= [0-9]+\n'
-                    '\t   <PRIORITYSPEC> ::= <THOUGHPUT> [ "#" <SIZE> ] [ "p" <PRIORITY> ]\n'
-                    '\t   <THOUGHPUT> ::= NUMBER -- default 1\n'
-                    '\t   <PRIORITY>  ::= NUMBER -- default 0\n'
-                    '\t   <SIZE>      ::= NUMBER -- default 1\n'
-                    "\n"
-                    "Size, thoughput and priority are all relative terms. "
-                    "Sizes are multipliers for the size argument, thoughput "
-                    "is applied relative to other classes and the same with "
-                    "priority.",
-            "category": FC.CATEGORY_CHANNELS,
-            "create_function": create_classqueuefilter,
-            "box_attributes": [
-                "args",
-                "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher",
-            ],
-            "connector_types": ["->fd","udp","tcp"],
-            "traces": ["dropped_stats"],
-        }),
-    LOGGINGCLASSQUEUEFILTER : dict({
-            "help": "TUN classful queue, uses a separate queue for each user-definable class.\n"
-                    "See ClassQueueFilter. This version adds detailled queue state tracing.",
-            "category": FC.CATEGORY_CHANNELS,
-            "create_function": create_loggingclassqueuefilter,
-            "box_attributes": [
-                "args",
-                "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher",
-            ],
-            "connector_types": ["->fd","udp","tcp"],
-            "traces": ["dropped_stats","queue_stats_f","queue_stats_b"],
-        }),
-    TOSQUEUEFILTER : dict({
-            "help": "TUN classfull queue that classifies according to the TOS (RFC 791) IP field.\n\n"
-                    "It takes a size argument that specifies the size of each class. As TOS is a "
-                    "subset of DiffServ, this queue half-implements DiffServ.",
-            "category": FC.CATEGORY_CHANNELS,
-            "create_function": create_tosqueuefilter,
-            "box_attributes": [
-                "args",
-                "tun_proto", "tun_addr", "tun_port", "tun_key", "tun_cipher",
-            ],
-            "connector_types": ["->fd","udp","tcp"],
-        }),
-
-    APPLICATION: dict({
-            "help": "Generic executable command line application",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_application,
-            "start_function": start_application,
-            "status_function": status_application,
-            "stop_function": stop_application,
-            "configure_function": configure_application,
-            "box_attributes": ["command", "sudo", "stdin",
-                               "depends", "build-depends", "build", "install",
-                               "sources", "rpm-fusion" ],
-            "connector_types": ["node"],
-            "traces": ["stdout", "stderr", "buildlog", "output"],
-            "tags": [tags.APPLICATION],
-        }),
-
-    CCNXDAEMON: dict({
-            "help": "CCNx daemon",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_ccnxdaemon,
-            "prestart_function": prestart_ccnxdaemon,
-            "status_function": status_application,
-            "stop_function": stop_application,
-            "configure_function": configure_application,
-            "box_attributes": ["ccnroutes", "build", "ccnlocalport",
-                "install", "ccnxversion", "sources", "repository"],
-            "connector_types": ["node"],
-            "traces": ["stdout", "stderr", "buildlog", "output"],
-            "tags": [tags.APPLICATION],
-        }),
-    DEPENDENCY: dict({
-            "help": "Requirement for package or application to be installed on some node",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_dependency,
-            "preconfigure_function": configure_dependency,
-            "status_function": status_dependency,
-            "box_attributes": ["depends", "build-depends", "build", "install",
-                               "sources", "rpm-fusion" ],
-            "connector_types": ["node"],
-            "traces": ["buildlog"],
-        }),
-    NEPIDEPENDENCY: dict({
-            "help": "Requirement for NEPI inside NEPI - required to run testbed instances inside a node",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_nepi_dependency,
-            "preconfigure_function": configure_dependency,
-            "box_attributes": [],
-            "connector_types": ["node"],
-            "traces": ["buildlog"],
-        }),
-    NS3DEPENDENCY: dict({
-            "help": "Requirement for NS3 inside NEPI - required to run NS3 testbed instances inside a node. It also needs NepiDependency.",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_ns3_dependency,
-            "preconfigure_function": configure_dependency,
-            "box_attributes": [ ],
-            "connector_types": ["node"],
-            "traces": ["buildlog"],
-        }),
-    MULTICASTFORWARDER: dict({
-            "help": "This application installs a userspace packet forwarder "
-                    "that, when connected to a node, filters all packets "
-                    "flowing through multicast-capable virtual interfaces "
-                    "and applies custom-specified routing policies.",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_multicast_forwarder,
-            "preconfigure_function": configure_forwarder,
-            "start_function": start_application,
-            "status_function": status_application,
-            "stop_function": stop_application,
-            "box_attributes": [ ],
-            "connector_types": ["node","router"],
-            "traces": ["buildlog","stderr"],
-        }),
-    MULTICASTANNOUNCER: dict({
-            "help": "This application installs a userspace daemon that "
-                    "monitors multicast membership and announces it on all "
-                    "multicast-capable interfaces.\n"
-                    "This does not usually happen automatically on PlanetLab slivers.",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_multicast_announcer,
-            "preconfigure_function": configure_announcer,
-            "start_function": start_application,
-            "status_function": status_application,
-            "stop_function": stop_application,
-            "box_attributes": [ ],
-            "connector_types": ["node"],
-            "traces": ["buildlog","stderr"],
-        }),
-    MULTICASTROUTER: dict({
-            "help": "This application installs a userspace daemon that "
-                    "monitors multicast membership and announces it on all "
-                    "multicast-capable interfaces.\n"
-                    "This does not usually happen automatically on PlanetLab slivers.",
-            "category": FC.CATEGORY_APPLICATIONS,
-            "create_function": create_multicast_router,
-            "preconfigure_function": configure_router,
-            "start_function": start_application,
-            "status_function": status_application,
-            "stop_function": stop_application,
-            "box_attributes": ["routing_algorithm"],
-            "connector_types": ["fwd"],
-            "traces": ["buildlog","stdout","stderr"],
-        }),
-    INTERNET: dict({
-            "help": "Internet routing",
-            "category": FC.CATEGORY_CHANNELS,
-            "create_function": create_internet,
-            "connector_types": ["devs"],
-            "tags": [tags.INTERNET],
-        }),
-    NETPIPE: dict({
-            "help": "Link emulation",
-            "category": FC.CATEGORY_CHANNELS,
-            "create_function": create_netpipe,
-            "configure_function": configure_netpipe,
-            "box_attributes": ["netpipe_mode",
-                               "addr_list", "port_list",
-                               "bw_in","plr_in","delay_in",
-                               "bw_out","plr_out","delay_out"],
-            "connector_types": ["node"],
-            "traces": ["netpipe_stats"],
-        }),
-})
-
-testbed_attributes = dict({
-        "slice_hrn": dict({
-            "name": "sliceHrn",
-            "help": "The hierarchical Resource Name (HRN) for the PlanetLab slice.",
-            "type": Attribute.STRING,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.NoDefaultValue,
-            "validation_function": validation.is_string
-        }),
-        "sfa": dict({
-            "name": "sfa",
-            "help": "Activates the use of SFA for node reservation.",
-            "type": Attribute.BOOL,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.NoDefaultValue,
-            "validation_function": validation.is_bool
-        }),
-        "slice": dict({
-            "name": "slice",
-            "help": "The name of the PlanetLab slice to use",
-            "type": Attribute.STRING,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.NoDefaultValue,
-            "validation_function": validation.is_string
-        }),
-        "auth_user": dict({
-            "name": "authUser",
-            "help": "The name of the PlanetLab user to use for API calls - it must have at least a User role.",
-            "type": Attribute.STRING,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.NoDefaultValue,
-            "validation_function": validation.is_string
-        }),
-        "auth_pass": dict({
-            "name": "authPass",
-            "help": "The PlanetLab user's password.",
-            "type": Attribute.STRING,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.NoDefaultValue,
-            "validation_function": validation.is_string
-        }),
-        "plc_host": dict({
-            "name": "plcHost",
-            "help": "The PlanetLab PLC API host",
-            "type": Attribute.STRING,
-            "value": "www.planet-lab.eu",
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_string
-        }),
-        "plc_url": dict({
-            "name": "plcUrl",
-            "help": "The PlanetLab PLC API url pattern - %(hostname)s is replaced by plcHost.",
-            "type": Attribute.STRING,
-            "value": "https://%(hostname)s:443/PLCAPI/",
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_string
-        }),
-        "proxy": dict({
-            "name": "proxy",
-            "help": "Https proxy to connect to the outside world",
-            "type": Attribute.STRING,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_string
-        }),
-        "p2p_deployment": dict({
-            "name": "p2pDeployment",
-            "help": "Enable peer-to-peer deployment of applications and dependencies. "
-                    "When enabled, dependency packages and applications are "
-                    "deployed in a P2P fashion, picking a single node to do "
-                    "the building or repo download, while all the others "
-                    "cooperatively exchange resulting binaries or rpms. "
-                    "When deploying to many nodes, this is a far more efficient "
-                    "use of resources. It does require re-encrypting and distributing "
-                    "the slice's private key. Though it is implemented in a secure "
-                    "fashion, if they key's sole purpose is not PlanetLab, then this "
-                    "feature should be disabled.",
-            "type": Attribute.BOOL,
-            "value": True,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_bool
-        }),
-        "slice_ssh_key": dict({
-            "name": "sliceSSHKey",
-            "help": "The controller-local path to the slice user's ssh private key. "
-                    "It is the user's responsability to deploy this file where the controller "
-                    "will run, it won't be done automatically because it's sensitive information. "
-                    "It is recommended that a NEPI-specific user be created for this purpose and "
-                    "this purpose alone.",
-            "type": Attribute.STRING,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable | Attribute.NoDefaultValue,
-            "validation_function": validation.is_string
-        }),
-        "pl_log_level": dict({      
-            "name": "plLogLevel",
-            "help": "Verbosity of logging of planetlab events.",
-            "value": "INFO",
-            "type": Attribute.ENUM, 
-            "allowed": ["DEBUG",
-                        "INFO",
-                        "WARNING",
-                        "ERROR",
-                        "CRITICAL"],
-            "validation_function": validation.is_enum,
-        }),
-        "tap_port_base":  dict({
-            "name": "tapPortBase", 
-            "help": "Base port to use when connecting TUN/TAPs. Effective port will be BASE + GUID.",
-            "type": Attribute.INTEGER,
-            "value": 15000,
-            "range": (2000,30000),
-            "validation_function": validation.is_integer_range(2000,30000)
-        }),
-        "clean_proc": dict({
-            "name": "cleanProc",
-            "help": "Set to True if the slice will be dedicated to this experiment. "
-                    "NEPI will perform node and slice process cleanup, making sure slices are "
-                    "in a clean, repeatable state before running the experiment.",
-            "type": Attribute.BOOL,
-            "value": False,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_bool
-        }),
-        "clean_home": dict({
-            "name": "cleanHome",
-            "help": "Set to True all preexistent directories in the home "
-                    "directory of each sliver will be removed before the "
-                    "start of the experiment.",
-            "type": Attribute.BOOL,
-            "value": False,
-            "flags": Attribute.ExecReadOnly | Attribute.ExecImmutable,
-            "validation_function": validation.is_bool
-        }),
-    })
-
-supported_recovery_policies = [
-        DC.POLICY_FAIL,
-        DC.POLICY_RESTART,
-        DC.POLICY_RECOVER,
-    ]
-
-class MetadataInfo(metadata.MetadataInfo):
-    @property
-    def connector_types(self):
-        return connector_types
-
-    @property
-    def connections(self):
-        return connections
-
-    @property
-    def attributes(self):
-        return attributes
-
-    @property
-    def traces(self):
-        return traces
-
-    @property
-    def create_order(self):
-        return create_order
-
-    @property
-    def configure_order(self):
-        return configure_order
-
-    @property
-    def prestart_order(self):
-        return start_order
-
-    @property
-    def start_order(self):
-        return start_order
-
-    @property
-    def factories_info(self):
-        return factories_info
-
-    @property
-    def testbed_attributes(self):
-        return testbed_attributes
-
-    @property
-    def testbed_id(self):
-        return TESTBED_ID
-
-    @property
-    def testbed_version(self):
-        return TESTBED_VERSION
-
-    @property
-    def supported_recovery_policies(self):
-        return supported_recovery_policies
-
-
diff --git a/src/nepi/testbeds/planetlab/multicast.py b/src/nepi/testbeds/planetlab/multicast.py
deleted file mode 100644 (file)
index 7394e67..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID
-
-import os
-import os.path
-import sys
-import functools
-
-import nepi.util.server as server
-import nepi.util.ipaddr2 as ipaddr2
-
-import logging
-
-import application
-
-class MulticastForwarder(application.Application):
-    """
-    This application installs a userspace packet forwarder
-    that, when connected to a node, filters all packets
-    flowing through multicast-capable virtual interfaces
-    and applies custom-specified routing policies
-    """
-    def __init__(self, *p, **kw):
-        super(MulticastForwarder, self).__init__(*p, **kw)
-        
-        self.sources = ' '.join([
-            os.path.join( os.path.dirname(__file__),
-                "scripts", "mcastfwd.py" ),
-            ipaddr2.__file__.replace('.pyc','.py').replace('.pyo','.py'),
-        ])
-        
-        self.sudo = True
-        
-        self.depends = "python"
-        
-        # Initialized when connected
-        self.ifaces = []
-        self.router = None
-    
-    def _command_get(self):
-        cmd = "python mcastfwd.py "
-        if not self.router:
-            cmd += "-R "
-        cmd += ' '.join([iface.address for iface in self.ifaces])
-        return cmd
-    def _command_set(self, value):
-        # ignore
-        return
-    command = property(_command_get, _command_set)
-    
-        
-class MulticastAnnouncer(application.Application):
-    """
-    This application installs a userspace daemon that
-    monitors multicast membership and announces it on all
-    multicast-capable interfaces.
-    This does not usually happen automatically on PlanetLab slivers.
-    """
-    def __init__(self, *p, **kw):
-        super(MulticastAnnouncer, self).__init__(*p, **kw)
-        
-        self.sources = ' '.join([
-            os.path.join( os.path.dirname(__file__),
-                "scripts", "mcastfwd.py" ),
-            ipaddr2.__file__.replace('.pyc','.py').replace('.pyo','.py'),
-        ])
-        
-        self.sudo = True
-        
-        self.depends = "python"
-        
-        self.ifaces = []
-        self.router = None
-    
-    def _command_get(self):
-        return (
-            "python mcastfwd.py -A %s"
-        ) % ( ' '.join([iface.address for iface in self.ifaces]), )
-    def _command_set(self, value):
-        # ignore
-        return
-    command = property(_command_get, _command_set)
-
-class MulticastRouter(application.Application):
-    """
-    This application installs a userspace daemon that
-    monitors multicast membership and announces it on all
-    multicast-capable interfaces.
-    This does not usually happen automatically on PlanetLab slivers.
-    """
-    ALGORITHM_MAP = {
-        'dvmrp' : {
-            'sources' :
-                ' '.join([
-                    os.path.join( os.path.dirname(__file__),
-                        "scripts", "mrouted-3.9.5-pl.patch" ),
-                ]) ,
-            'depends' : "",
-            'buildDepends' : "byacc gcc make patch",
-            'build' : 
-                "mkdir -p mrouted && "
-                "echo '3a1c1e72c4f6f7334d72df4c50b510d7  mrouted-3.9.5.tar.bz2' > archive_sums.txt && "
-                "wget -q -c -O mrouted-3.9.5.tar.bz2 ftp://ftp.vmlinux.org/pub/People/jocke/mrouted/mrouted-3.9.5.tar.bz2 && "
-                "md5sum -c archive_sums.txt && "
-                "tar xvjf mrouted-3.9.5.tar.bz2 -C mrouted --strip-components=1 && "
-                "cd mrouted && patch -p1 < ${SOURCES}/mrouted-3.9.5-pl.patch && make"
-                ,
-            'install' : "cp mrouted/mrouted ${SOURCES}",
-            'command' : 
-                "while test \\! -e /var/run/mcastrt ; do sleep 1 ; done ; "
-                "echo 'phyint eth0 disable' > ./mrouted.conf ; "
-                "for iface in %(nonifaces)s ; do echo \"phyint $iface disable\" >> ./mrouted.conf ; done ; "
-                "./mrouted -f %(debugbit)s -c ./mrouted.conf"
-                ,
-            'debugbit' : "-dpacket,igmp,routing,interface,pruning,membership,cache",
-        }
-    }
-    
-    def __init__(self, *p, **kw):
-        super(MulticastRouter, self).__init__(*p, **kw)
-        
-        self.algorithm = 'dvmrp'
-        self.sudo = True
-        self.nonifaces = []
-    
-    def _non_set(self, value):
-        # ignore
-        return
-    
-    def _gen_get(attribute, self):
-        return self.ALGORITHM_MAP[self.algorithm][attribute]
-    
-    def _command_get(self):
-        command = self.ALGORITHM_MAP[self.algorithm]['command']
-        debugbit = self.ALGORITHM_MAP[self.algorithm]['debugbit']
-        
-        # download rpms and pack into a tar archive
-        return command % {
-            'nonifaces' : ' '.join([iface.if_name for iface in self.nonifaces if iface.if_name]),
-            'debugbit' : (debugbit if self.stderr else ""),
-        }
-    command = property(_command_get, _non_set)
-
-    build = property(functools.partial(_gen_get, "build"), _non_set)
-    install = property(functools.partial(_gen_get, "install"), _non_set)
-    sources = property(functools.partial(_gen_get, "sources"), _non_set)
-    depends = property(functools.partial(_gen_get, "depends"), _non_set)
-    buildDepends = property(functools.partial(_gen_get, "buildDepends"), _non_set)
-
diff --git a/src/nepi/testbeds/planetlab/node.py b/src/nepi/testbeds/planetlab/node.py
deleted file mode 100644 (file)
index a73f6ab..0000000
+++ /dev/null
@@ -1,826 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID
-import plcapi
-import operator
-import rspawn
-import time
-import os
-import collections
-import cStringIO
-import resourcealloc
-import socket
-import sys
-import logging
-import ipaddr
-import operator
-import re
-
-from nepi.util import server
-from nepi.util import parallel
-
-import application
-
-MAX_VROUTE_ROUTES = 5
-
-class UnresponsiveNodeError(RuntimeError):
-    pass
-
-def _castproperty(typ, propattr):
-    def _get(self):
-        return getattr(self, propattr)
-    def _set(self, value):
-        if value is not None or (isinstance(value, basestring) and not value):
-            value = typ(value)
-        return setattr(self, propattr, value)
-    def _del(self, value):
-        return delattr(self, propattr)
-    _get.__name__ = propattr + '_get'
-    _set.__name__ = propattr + '_set'
-    _del.__name__ = propattr + '_del'
-    return property(_get, _set, _del)
-
-class Node(object):
-    BASEFILTERS = {
-        # Map Node attribute to plcapi filter name
-        'hostname' : 'hostname',
-    }
-    
-    TAGFILTERS = {
-        # Map Node attribute to (<tag name>, <plcapi filter expression>)
-        #   There are replacements that are applied with string formatting,
-        #   so '%' has to be escaped as '%%'.
-        'architecture' : ('arch','value'),
-        'operatingSystem' : ('fcdistro','value'),
-        'pl_distro' : ('pldistro','value'),
-        'city' : ('city','value'),
-        'country' : ('country','value'),
-        'region' : ('region','value'),
-        'minReliability' : ('reliability%(timeframe)s', ']value'),
-        'maxReliability' : ('reliability%(timeframe)s', '[value'),
-        'minBandwidth' : ('bw%(timeframe)s', ']value'),
-        'maxBandwidth' : ('bw%(timeframe)s', '[value'),
-        'minLoad' : ('load%(timeframe)s', ']value'),
-        'maxLoad' : ('load%(timeframe)s', '[value'),
-        'minCpu' : ('cpu%(timeframe)s', ']value'),
-        'maxCpu' : ('cpu%(timeframe)s', '[value'),
-    }
-    
-    RATE_FACTORS = (
-        # (<tag name>, <weight>, <default>)
-        ('bw%(timeframe)s', -0.001, 1024.0),
-        ('cpu%(timeframe)s', 0.1, 40.0),
-        ('load%(timeframe)s', -0.2, 3.0),
-        ('reliability%(timeframe)s', 1, 100.0),
-    )
-    
-    DEPENDS_PIDFILE = '/tmp/nepi-depends.pid'
-    DEPENDS_LOGFILE = '/tmp/nepi-depends.log'
-
-    RPM_FUSION_URL = 'http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-stable.noarch.rpm'
-    RPM_FUSION_URL_F12 = 'http://download1.rpmfusion.org/free/fedora/releases/12/Everything/x86_64/os/rpmfusion-free-release-12-1.noarch.rpm'
-    
-    minReliability = _castproperty(float, '_minReliability')
-    maxReliability = _castproperty(float, '_maxReliability')
-    minBandwidth = _castproperty(float, '_minBandwidth')
-    maxBandwidth = _castproperty(float, '_maxBandwidth')
-    minCpu = _castproperty(float, '_minCpu')
-    maxCpu = _castproperty(float, '_maxCpu')
-    minLoad = _castproperty(float, '_minLoad')
-    maxLoad = _castproperty(float, '_maxLoad')
-    
-    def __init__(self, api=None, sliceapi=None):
-        if not api:
-            api = plcapi.PLCAPI()
-        self._api = api
-        self._sliceapi = sliceapi or api
-        
-        # Attributes
-        self.hostname = None
-        self.architecture = None
-        self.operatingSystem = None
-        self.pl_distro = None
-        self.site = None
-        self.city = None
-        self.country = None
-        self.region = None
-        self.minReliability = None
-        self.maxReliability = None
-        self.minBandwidth = None
-        self.maxBandwidth = None
-        self.minCpu = None
-        self.maxCpu = None
-        self.minLoad = None
-        self.maxLoad = None
-        self.min_num_external_ifaces = None
-        self.max_num_external_ifaces = None
-        self._timeframe = 'w'
-        
-        # Applications and routes add requirements to connected nodes
-        self.required_packages = set()
-        self.required_vsys = set()
-        self.pythonpath = []
-        self.rpmFusion = False
-        self.env = collections.defaultdict(list)
-        
-        # Some special applications - initialized when connected
-        self.multicast_forwarder = None
-        
-        # Testbed-derived attributes
-        self.slicename = None
-        self.ident_path = None
-        self.server_key = None
-        self.home_path = None
-        self.enable_proc_cleanup = False
-        self.enable_home_cleanup = False
-        
-        # Those are filled when an actual node is allocated
-        self._node_id = None
-        self._yum_dependencies = None
-        self._installed = False
-
-        # Logging
-        self._logger = logging.getLogger('nepi.testbeds.planetlab')
-
-    def set_timeframe(self, timeframe):
-        if timeframe == "latest":
-            self._timeframe = ""
-        elif timeframe == "month":
-            self._timeframe = "m"
-        elif timeframe == "year":
-            self._timeframe = "y"
-        else:
-            self._timeframe = "w"
-
-    def get_timeframe(self):
-        if self._timeframe == "":
-            return "latest"
-        if self._timeframe == "m":
-            return "month"
-        if self._timeframe == "y":
-            return "year"
-        return "week"
-
-    timeframe = property(get_timeframe, set_timeframe)
-    
-    def _nepi_testbed_environment_setup_get(self):
-        command = cStringIO.StringIO()
-        command.write('export PYTHONPATH=$PYTHONPATH:%s' % (
-            ':'.join(["${HOME}/"+server.shell_escape(s) for s in self.pythonpath])
-        ))
-        command.write(' ; export PATH=$PATH:%s' % (
-            ':'.join(["${HOME}/"+server.shell_escape(s) for s in self.pythonpath])
-        ))
-        if self.env:
-            for envkey, envvals in self.env.iteritems():
-                for envval in envvals:
-                    command.write(' ; export %s=%s' % (envkey, envval))
-        return command.getvalue()
-
-    def _nepi_testbed_environment_setup_set(self, value):
-        pass
-
-    _nepi_testbed_environment_setup = property(
-        _nepi_testbed_environment_setup_get,
-        _nepi_testbed_environment_setup_set)
-    
-    def build_filters(self, target_filters, filter_map):
-        for attr, tag in filter_map.iteritems():
-            value = getattr(self, attr, None)
-            if value is not None:
-                target_filters[tag] = value
-        return target_filters
-    
-    @property
-    def applicable_filters(self):
-        has = lambda att : getattr(self,att,None) is not None
-        return (
-            filter(has, self.BASEFILTERS.iterkeys())
-            + filter(has, self.TAGFILTERS.iterkeys())
-        )
-    
-    def find_candidates(self, filter_slice_id=None):
-        self._logger.info("Finding candidates for %s", self.make_filter_description())
-        
-        fields = ('node_id',)
-        replacements = {'timeframe':self._timeframe}
-        
-        # get initial candidates (no tag filters)
-        basefilters = self.build_filters({}, self.BASEFILTERS)
-        rootfilters = basefilters.copy()
-        if filter_slice_id:
-            basefilters['|slice_ids'] = (filter_slice_id,)
-        
-        # only pick healthy nodes
-        basefilters['run_level'] = 'boot'
-        basefilters['boot_state'] = 'boot'
-        basefilters['node_type'] = 'regular' # nepi can only handle regular nodes (for now)
-        basefilters['>last_contact'] = int(time.time()) - 5*3600 # allow 5h out of contact, for timezone discrepancies
-        
-        # keyword-only "pseudofilters"
-        extra = {}
-        if self.site:
-            extra['peer'] = self.site
-            
-        candidates = set(map(operator.itemgetter('node_id'), 
-            self._sliceapi.GetNodes(filters=basefilters, fields=fields, **extra)))
-
-        # filter by tag, one tag at a time
-        applicable = self.applicable_filters
-        for tagfilter in self.TAGFILTERS.iteritems():
-            attr, (tagname, expr) = tagfilter
-            
-            # don't bother if there's no filter defined
-            if attr in applicable:
-                tagfilter = rootfilters.copy()
-                tagfilter['tagname'] = tagname % replacements
-                tagfilter[expr % replacements] = str(getattr(self,attr))
-                tagfilter['node_id'] = list(candidates)
-              
-                candidates &= set(map(operator.itemgetter('node_id'),
-                    self._sliceapi.GetNodeTags(filters=tagfilter, fields=fields)))
-
-        # filter by vsys tags - special case since it doesn't follow
-        # the usual semantics
-        if self.required_vsys:
-            newcandidates = collections.defaultdict(set)
-            
-            vsys_tags = self._sliceapi.GetNodeTags(
-                tagname='vsys', 
-                node_id = list(candidates), 
-                fields = ['node_id','value'])
-
-            vsys_tags = map(
-                operator.itemgetter(['node_id','value']),
-                vsys_tags)
-            
-            required_vsys = self.required_vsys
-            for node_id, value in vsys_tags:
-                if value in required_vsys:
-                    newcandidates[value].add(node_id)
-            
-            # take only those that have all the required vsys tags
-            newcandidates = reduce(
-                lambda accum, new : accum & new,
-                newcandidates.itervalues(),
-                candidates)
-        
-        # filter by iface count
-        if self.min_num_external_ifaces is not None or self.max_num_external_ifaces is not None:
-            # fetch interfaces for all, in one go
-            filters = basefilters.copy()
-            filters['node_id'] = list(candidates)
-            ifaces = dict(map(operator.itemgetter('node_id','interface_ids'),
-                self._sliceapi.GetNodes(filters=basefilters, fields=('node_id','interface_ids')) ))
-            
-            # filter candidates by interface count
-            if self.min_num_external_ifaces is not None and self.max_num_external_ifaces is not None:
-                predicate = ( lambda node_id : 
-                    self.min_num_external_ifaces <= len(ifaces.get(node_id,())) <= self.max_num_external_ifaces )
-            elif self.min_num_external_ifaces is not None:
-                predicate = ( lambda node_id : 
-                    self.min_num_external_ifaces <= len(ifaces.get(node_id,())) )
-            else:
-                predicate = ( lambda node_id : 
-                    len(ifaces.get(node_id,())) <= self.max_num_external_ifaces )
-            
-            candidates = set(filter(predicate, candidates))
-       
-        # make sure hostnames are resolvable
-        hostnames = dict() 
-        if candidates:
-            self._logger.info("  Found %s candidates. Checking for reachability...", len(candidates))
-           
-            hostnames = dict(map(operator.itemgetter('node_id','hostname'),
-                self._sliceapi.GetNodes(list(candidates), ['node_id','hostname'])
-            ))
-
-            def resolvable(node_id):
-                try:
-                    addr = server.gethostbyname(hostnames[node_id])
-                    return addr is not None
-                except:
-                    return False
-            candidates = set(parallel.pfilter(resolvable, candidates,
-                maxthreads = 16))
-
-            self._logger.info("  Found %s reachable candidates.", len(candidates))
-
-            for h in hostnames.keys():
-                if h not in candidates:
-                    del hostnames[h]
-
-            hostnames = dict((v,k) for k, v in hostnames.iteritems())
-
-        return hostnames
-    
-    def make_filter_description(self):
-        """
-        Makes a human-readable description of filtering conditions
-        for find_candidates.
-        """
-        
-        # get initial candidates (no tag filters)
-        filters = self.build_filters({}, self.BASEFILTERS)
-        
-        # keyword-only "pseudofilters"
-        if self.site:
-            filters['peer'] = self.site
-            
-        # filter by tag, one tag at a time
-        applicable = self.applicable_filters
-        for tagfilter in self.TAGFILTERS.iteritems():
-            attr, (tagname, expr) = tagfilter
-            
-            # don't bother if there's no filter defined
-            if attr in applicable:
-                filters[attr] = getattr(self,attr)
-        
-        # filter by vsys tags - special case since it doesn't follow
-        # the usual semantics
-        if self.required_vsys:
-            filters['vsys'] = ','.join(list(self.required_vsys))
-        
-        # filter by iface count
-        if self.min_num_external_ifaces is not None or self.max_num_external_ifaces is not None:
-            filters['num_ifaces'] = '-'.join([
-                str(self.min_num_external_ifaces or '0'),
-                str(self.max_num_external_ifaces or 'inf')
-            ])
-            
-        return '; '.join(map('%s: %s'.__mod__,filters.iteritems()))
-
-    def assign_node_id(self, node_id):
-        self._node_id = node_id
-        self.fetch_node_info()
-    
-    def unassign_node(self):
-        self._node_id = None
-        self.hostip = None
-        
-        try:
-            orig_attrs = self.__orig_attrs
-        except AttributeError:
-            return
-            
-        for key, value in orig_attrs.iteritems():
-            setattr(self, key, value)
-        del self.__orig_attrs
-    
-    def rate_nodes(self, nodes):
-        rates = collections.defaultdict(int)
-        tags = collections.defaultdict(dict)
-        replacements = {'timeframe':self._timeframe}
-        tagnames = [ tagname % replacements 
-                     for tagname, weight, default in self.RATE_FACTORS ]
-       
-        taginfo = self._sliceapi.GetNodeTags(
-            node_id=list(nodes), 
-            tagname=tagnames,
-            fields=('node_id','tagname','value'))
-
-        unpack = operator.itemgetter('node_id','tagname','value')
-        for value in taginfo:
-            node, tagname, value = unpack(value)
-            if value and value.lower() != 'n/a':
-                tags[tagname][node] = float(value)
-        
-        for tagname, weight, default in self.RATE_FACTORS:
-            taginfo = tags[tagname % replacements].get
-            for node in nodes:
-                rates[node] += weight * taginfo(node,default)
-        
-        return map(rates.__getitem__, nodes)
-            
-    def fetch_node_info(self):
-        orig_attrs = {}
-        
-        info, tags = self._sliceapi.GetNodeInfo(self._node_id)
-        info = info[0]
-        
-        tags = dict( (t['tagname'],t['value'])
-                     for t in tags )
-
-        orig_attrs['min_num_external_ifaces'] = self.min_num_external_ifaces
-        orig_attrs['max_num_external_ifaces'] = self.max_num_external_ifaces
-        self.min_num_external_ifaces = None
-        self.max_num_external_ifaces = None
-        if not self._timeframe: self._timeframe = 'w'
-        
-        replacements = {'timeframe':self._timeframe}
-
-        for attr, tag in self.BASEFILTERS.iteritems():
-            if tag in info:
-                value = info[tag]
-                if hasattr(self, attr):
-                    orig_attrs[attr] = getattr(self, attr)
-                setattr(self, attr, value)
-        for attr, (tag,_) in self.TAGFILTERS.iteritems():
-            tag = tag % replacements
-            if tag in tags:
-                value = tags[tag]
-                if hasattr(self, attr):
-                    orig_attrs[attr] = getattr(self, attr)
-                if not value or value.lower() == 'n/a':
-                    value = None
-                setattr(self, attr, value)
-        
-        if 'peer_id' in info:
-            orig_attrs['site'] = self.site
-            self.site = self._sliceapi.peer_map[info['peer_id']]
-        
-        if 'interface_ids' in info:
-            self.min_num_external_ifaces = \
-            self.max_num_external_ifaces = len(info['interface_ids'])
-        
-        if 'ssh_rsa_key' in info:
-            orig_attrs['server_key'] = self.server_key
-            self.server_key = info['ssh_rsa_key']
-        
-        self.hostip = server.gethostbyname(self.hostname)
-        
-        try:
-            self.__orig_attrs
-        except AttributeError:
-            self.__orig_attrs = orig_attrs
-
-    def validate(self):
-        if self.home_path is None:
-            raise AssertionError, "Misconfigured node: missing home path"
-        if self.ident_path is None or not os.access(self.ident_path, os.R_OK):
-            raise AssertionError, "Misconfigured node: missing slice SSH key"
-        if self.slicename is None:
-            raise AssertionError, "Misconfigured node: unspecified slice"
-
-    def recover(self):
-        # Mark dependencies installed
-        self._installed = True
-        
-        # Clear load attributes, they impair re-discovery
-        self.minReliability = \
-        self.maxReliability = \
-        self.minBandwidth = \
-        self.maxBandwidth = \
-        self.minCpu = \
-        self.maxCpu = \
-        self.minLoad = \
-        self.maxLoad = None
-
-    def install_dependencies(self):
-        if self.required_packages and not self._installed:
-            # If we need rpmfusion, we must install the repo definition and the gpg keys
-            if self.rpmFusion:
-                if self.operatingSystem == 'f12':
-                    # Fedora 12 requires a different rpmfusion package
-                    RPM_FUSION_URL = self.RPM_FUSION_URL_F12
-                else:
-                    # This one works for f13+
-                    RPM_FUSION_URL = self.RPM_FUSION_URL
-                    
-                rpmFusion = (
-                  'rpm -q rpmfusion-free-release || sudo -S rpm -i %(RPM_FUSION_URL)s'
-                ) % {
-                    'RPM_FUSION_URL' : RPM_FUSION_URL
-                }
-            else:
-                rpmFusion = ''
-           
-            if rpmFusion:
-                (out,err),proc = server.popen_ssh_command(
-                    rpmFusion,
-                    host = self.hostip,
-                    port = None,
-                    user = self.slicename,
-                    agent = None,
-                    ident_key = self.ident_path,
-                    server_key = self.server_key,
-                    timeout = 600,
-                    )
-                
-                if proc.wait():
-                    if self.check_bad_host(out,err):
-                        self.blacklist()
-                    raise RuntimeError, "Failed to set up application on host %s: %s %s" % (self.hostname, out,err,)
-            
-            # Launch p2p yum dependency installer
-            self._yum_dependencies.async_setup()
-    
-    def wait_provisioning(self, timeout = 20*60):
-        # Wait for the p2p installer
-        sleeptime = 1.0
-        totaltime = 0.0
-        while not self.is_alive():
-            time.sleep(sleeptime)
-            totaltime += sleeptime
-            sleeptime = min(30.0, sleeptime*1.5)
-            
-            if totaltime > timeout:
-                # PlanetLab has a 15' delay on configuration propagation
-                # If we're above that delay, the unresponsiveness is not due
-                # to this delay.
-                if not self.is_alive(verbose=True):
-                    raise UnresponsiveNodeError, "Unresponsive host %s" % (self.hostname,)
-        
-        # Ensure the node is clean (no apps running that could interfere with operations)
-        if self.enable_proc_cleanup:
-            self.do_proc_cleanup()
-        if self.enable_home_cleanup:
-            self.do_home_cleanup()
-   
-    def wait_dependencies(self, pidprobe=1, probe=0.5, pidmax=10, probemax=10):
-        # Wait for the p2p installer
-        if self._yum_dependencies and not self._installed:
-            self._yum_dependencies.async_setup_wait()
-            self._installed = True
-        
-    def is_alive(self, verbose = False):
-        # Make sure all the paths are created where 
-        # they have to be created for deployment
-        (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-            "echo 'ALIVE'",
-            host = self.hostip,
-            port = None,
-            user = self.slicename,
-            agent = None,
-            ident_key = self.ident_path,
-            server_key = self.server_key,
-            timeout = 60,
-            err_on_timeout = False,
-            persistent = False
-            )
-        
-        if proc.wait():
-            if verbose:
-                self._logger.warn("Unresponsive node %s got:\n%s%s", self.hostname, out, err)
-            return False
-        elif not err and out.strip() == 'ALIVE':
-            return True
-        else:
-            if verbose:
-                self._logger.warn("Unresponsive node %s got:\n%s%s", self.hostname, out, err)
-            return False
-    
-    def destroy(self):
-        if self.enable_proc_cleanup:
-            self.do_proc_cleanup()
-    
-    def blacklist(self):
-        if self._node_id:
-            self._logger.warn("Blacklisting malfunctioning node %s", self.hostname)
-            import util
-            util.appendBlacklist(self.hostname)
-    
-    def do_proc_cleanup(self):
-        if self.testbed().recovering:
-            # WOW - not now
-            return
-            
-        self._logger.info("Cleaning up processes on %s", self.hostname)
-        
-        cmds = [
-            "sudo -S killall python tcpdump || /bin/true ; "
-            "sudo -S killall python tcpdump || /bin/true ; "
-            "sudo -S kill $(ps -N -T -o pid --no-heading | grep -v $PPID | sort) || /bin/true ",
-            "sudo -S killall -u %(slicename)s || /bin/true ",
-            "sudo -S killall -u root || /bin/true ",
-            "sudo -S killall -u %(slicename)s || /bin/true ",
-            "sudo -S killall -u root || /bin/true ",
-        ]
-
-        for cmd in cmds:
-            (out,err),proc = server.popen_ssh_command(
-                # Some apps need two kills
-                cmd % {
-                    'slicename' : self.slicename ,
-                },
-                host = self.hostip,
-                port = None,
-                user = self.slicename,
-                agent = None,
-                ident_key = self.ident_path,
-                server_key = self.server_key,
-                tty = True, # so that ps -N -T works as advertised...
-                timeout = 60,
-                retry = 3
-                )
-            proc.wait()
-     
-    def do_home_cleanup(self):
-        if self.testbed().recovering:
-            # WOW - not now
-            return
-            
-        self._logger.info("Cleaning up home on %s", self.hostname)
-        
-        cmds = [
-            "find . -maxdepth 1  \( -name '.cache' -o -name '.local' -o -name '.config' -o -name 'nepi-*' \) -execdir rm -rf {} + "
-        ]
-
-        for cmd in cmds:
-            (out,err),proc = server.popen_ssh_command(
-                # Some apps need two kills
-                cmd,
-                host = self.hostip,
-                port = None,
-                user = self.slicename,
-                agent = None,
-                ident_key = self.ident_path,
-                server_key = self.server_key,
-                tty = True, # so that ps -N -T works as advertised...
-                timeout = 60,
-                retry = 3
-                )
-            proc.wait()
-   
-    def prepare_dependencies(self):
-        # Configure p2p yum dependency installer
-        if self.required_packages and not self._installed:
-            self._yum_dependencies = application.YumDependency(self._api)
-            self._yum_dependencies.node = self
-            self._yum_dependencies.home_path = "nepi-yumdep"
-            self._yum_dependencies.depends = ' '.join(self.required_packages)
-
-    def routing_method(self, routes, vsys_vnet):
-        """
-        There are two methods, vroute and sliceip.
-        
-        vroute:
-            Modifies the node's routing table directly, validating that the IP
-            range lies within the network given by the slice's vsys_vnet tag.
-            This method is the most scalable for very small routing tables
-            that need not modify other routes (including the default)
-        
-        sliceip:
-            Uses policy routing and iptables filters to create per-sliver
-            routing tables. It's the most flexible way, but it doesn't scale
-            as well since only 155 routing tables can be created this way.
-        
-        This method will return the most appropriate routing method, which will
-        prefer vroute for small routing tables.
-        """
-        
-        # For now, sliceip results in kernel panics
-        # so we HAVE to use vroute
-        return 'vroute'
-        
-        # We should not make the routing table grow too big
-        if len(routes) > MAX_VROUTE_ROUTES:
-            return 'sliceip'
-        
-        vsys_vnet = ipaddr.IPv4Network(vsys_vnet)
-        for route in routes:
-            dest, prefix, nexthop, metric, device = route
-            dest = ipaddr.IPv4Network("%s/%d" % (dest,prefix))
-            nexthop = ipaddr.IPAddress(nexthop)
-            if dest not in vsys_vnet or nexthop not in vsys_vnet:
-                return 'sliceip'
-        
-        return 'vroute'
-    
-    def format_route(self, route, dev, method, action):
-        dest, prefix, nexthop, metric, device = route
-        if method == 'vroute':
-            return (
-                "%s %s%s gw %s %s" % (
-                    action,
-                    dest,
-                    (("/%d" % (prefix,)) if prefix and prefix != 32 else ""),
-                    nexthop,
-                    dev,
-                )
-            )
-        elif method == 'sliceip':
-            return (
-                "route %s to %s%s via %s metric %s dev %s" % (
-                    action,
-                    dest,
-                    (("/%d" % (prefix,)) if prefix and prefix != 32 else ""),
-                    nexthop,
-                    metric or 1,
-                    dev,
-                )
-            )
-        else:
-            raise AssertionError, "Unknown method"
-    
-    def _annotate_routes_with_devs(self, routes, devs, method):
-        dev_routes = []
-        for route in routes:
-            for dev in devs:
-                if dev.routes_here(route):
-                    dev_routes.append(tuple(route) + (dev.if_name,))
-                    
-                    # Stop checking
-                    break
-            else:
-                if method == 'sliceip':
-                    dev_routes.append(tuple(route) + ('eth0',))
-                else:
-                    raise RuntimeError, "Route %s cannot be bound to any virtual interface " \
-                        "- PL can only handle rules over virtual interfaces. Candidates are: %s" % (route,devs)
-        return dev_routes
-    
-    def configure_routes(self, routes, devs, vsys_vnet):
-        """
-        Add the specified routes to the node's routing table
-        """
-        rules = []
-        method = self.routing_method(routes, vsys_vnet)
-        tdevs = set()
-        
-        # annotate routes with devices
-        dev_routes = self._annotate_routes_with_devs(routes, devs, method)
-        for route in dev_routes:
-            route, dev = route[:-1], route[-1]
-            
-            # Schedule rule
-            tdevs.add(dev)
-            rules.append(self.format_route(route, dev, method, 'add'))
-        
-        if method == 'sliceip':
-            rules = map('enable '.__add__, tdevs) + rules
-        
-        self._logger.info("Setting up routes for %s using %s", self.hostname, method)
-        self._logger.debug("Routes for %s:\n\t%s", self.hostname, '\n\t'.join(rules))
-        
-        self.apply_route_rules(rules, method)
-        
-        self._configured_routes = set(routes)
-        self._configured_devs = tdevs
-        self._configured_method = method
-    
-    def reconfigure_routes(self, routes, devs, vsys_vnet):
-        """
-        Updates the routes in the node's routing table to match
-        the given route list
-        """
-        method = self._configured_method
-        
-        dev_routes = self._annotate_routes_with_devs(routes, devs, method)
-
-        current = self._configured_routes
-        current_devs = self._configured_devs
-        
-        new = set(dev_routes)
-        new_devs = set(map(operator.itemgetter(-1), dev_routes))
-        
-        deletions = current - new
-        insertions = new - current
-        
-        dev_deletions = current_devs - new_devs
-        dev_insertions = new_devs - current_devs
-        
-        # Generate rules
-        rules = []
-        
-        # Rule deletions first
-        for route in deletions:
-            route, dev = route[:-1], route[-1]
-            rules.append(self.format_route(route, dev, method, 'del'))
-        
-        if method == 'sliceip':
-            # Dev deletions now
-            rules.extend(map('disable '.__add__, dev_deletions))
-
-            # Dev insertions now
-            rules.extend(map('enable '.__add__, dev_insertions))
-
-        # Rule insertions now
-        for route in insertions:
-            route, dev = route[:-1], dev[-1]
-            rules.append(self.format_route(route, dev, method, 'add'))
-        
-        # Apply
-        self.apply_route_rules(rules, method)
-        
-        self._configured_routes = dev_routes
-        self._configured_devs = new_devs
-        
-    def apply_route_rules(self, rules, method):
-        (out,err),proc = server.popen_ssh_command(
-            "( sudo -S bash -c 'cat /vsys/%(method)s.out >&2' & ) ; sudo -S bash -c 'cat > /vsys/%(method)s.in' ; sleep 0.5" % dict(
-                home = server.shell_escape(self.home_path),
-                method = method),
-            host = self.hostip,
-            port = None,
-            user = self.slicename,
-            agent = None,
-            ident_key = self.ident_path,
-            server_key = self.server_key,
-            stdin = '\n'.join(rules),
-            timeout = 300
-            )
-        
-        if proc.wait() or err:
-            raise RuntimeError, "Could not set routes (%s) errors: %s%s" % (rules,out,err)
-        elif out or err:
-            logger.debug("%s said: %s%s", method, out, err)
-
-    def check_bad_host(self, out, err):
-        badre = re.compile(r'(?:'
-                           #r"curl: [(]\d+[)] Couldn't resolve host 'download1[.]rpmfusion[.]org'"
-                           r'|Error: disk I/O error'
-                           r')', 
-                           re.I)
-        return badre.search(out) or badre.search(err)
-
diff --git a/src/nepi/testbeds/planetlab/plcapi.py b/src/nepi/testbeds/planetlab/plcapi.py
deleted file mode 100644 (file)
index d1d7980..0000000
+++ /dev/null
@@ -1,363 +0,0 @@
-import xmlrpclib
-import functools
-import socket
-import time
-import threading
-
-def _retry(fn):
-    def rv(*p, **kw):
-        for x in xrange(5):
-            try:
-                return fn(*p, **kw)
-            except (socket.error, IOError, OSError):
-                time.sleep(x*5+5)
-        else:
-            return fn (*p, **kw)
-    return rv
-
-class PLCAPI(object):
-
-    _expected_methods = set(
-        ['AddNodeTag', 'AddConfFile', 'DeletePersonTag', 'AddNodeType', 'DeleteBootState', 'SliceListNames', 'DeleteKey', 
-         'SliceGetTicket', 'SliceUsersList', 'SliceUpdate', 'GetNodeGroups', 'SliceCreate', 'GetNetworkMethods', 'GetNodeFlavour', 
-         'DeleteNode', 'BootNotifyOwners', 'AddPersonKey', 'AddNode', 'UpdateNodeGroup', 'GetAddressTypes', 'AddIlink', 'DeleteNetworkType', 
-         'GetInitScripts', 'GenerateNodeConfFile', 'AddSite', 'BindObjectToPeer', 'SliceListUserSlices', 'GetPeers', 'AddPeer', 'DeletePeer', 
-         'AddRole', 'DeleteRole', 'SetPersonPrimarySite', 'AddSiteAddress', 'SliceDelete', 'NotifyPersons', 'GetKeyTypes', 'GetConfFiles', 
-         'GetIlinks', 'AddTagType', 'GetNodes', 'DeleteNodeTag', 'DeleteSliceFromNodesWhitelist', 'UpdateAddress', 'ResetPassword', 
-         'AddSliceToNodesWhitelist', 'AddRoleToTagType', 'AddLeases', 'GetAddresses', 'AddInitScript', 'RebootNode', 'GetPCUTypes', 
-         'RefreshPeer', 'GetBootMedium', 'UpdateKey', 'UpdatePCU', 'GetSession', 'AddInterfaceTag', 'UpdatePCUType', 'GetInterfaces', 
-         'SliceExtendedInfo', 'SliceNodesList', 'DeleteRoleFromTagType', 'DeleteSlice', 'GetSites', 'DeleteMessage', 'GetSliceFamily', 
-         'GetPlcRelease', 'UpdateTagType', 'AddSliceInstantiation', 'ResolveSlices', 'GetSlices', 'DeleteRoleFromPerson', 'GetSessions', 
-         'UpdatePeer', 'VerifyPerson', 'GetPersonTags', 'DeleteKeyType', 'AddSlice', 'SliceUserAdd', 'DeleteSession', 'GetMessages', 
-         'DeletePCU', 'GetPeerData', 'DeletePersonFromSite', 'DeleteTagType', 'GetPCUs', 'UpdateLeases', 'AddMessage', 
-         'DeletePCUProtocolType', 'DeleteInterfaceTag', 'AddPersonToSite', 'GetSlivers', 'SliceNodesDel', 'DeleteAddressTypeFromAddress', 
-         'AddNodeGroup', 'GetSliceTags', 'DeleteSite', 'GetSiteTags', 'UpdateMessage', 'DeleteSliceFromNodes', 'SliceRenew', 
-         'UpdatePCUProtocolType', 'DeleteSiteTag', 'GetPCUProtocolTypes', 'GetEvents', 'GetSliceTicket', 'AddPersonTag', 'BootGetNodeDetails', 
-         'DeleteInterface', 'DeleteNodeGroup', 'AddPCUProtocolType', 'BootCheckAuthentication', 'AddSiteTag', 'AddAddressTypeToAddress', 
-         'DeleteConfFile', 'DeleteInitScript', 'DeletePerson', 'DeleteIlink', 'DeleteAddressType', 'AddBootState', 'AuthCheck', 
-         'NotifySupport', 'GetSliceInstantiations', 'AddPCUType', 'AddPCU', 'AddSession', 'GetEventObjects', 'UpdateSiteTag', 
-         'UpdateNodeTag', 'AddPerson', 'BlacklistKey', 'UpdateInitScript', 'AddSliceToNodes', 'RebootNodeWithPCU', 'GetNodeTags', 
-         'GetSliceKeys', 'GetSliceSshKeys', 'AddNetworkMethod', 'SliceNodesAdd', 'DeletePersonFromSlice', 'ReportRunlevel', 
-         'GetNetworkTypes', 'UpdateSite', 'DeleteConfFileFromNodeGroup', 'UpdateNode', 'DeleteSliceInstantiation', 'DeleteSliceTag', 
-         'BootUpdateNode', 'UpdatePerson', 'UpdateConfFile', 'SliceUserDel', 'DeleteLeases', 'AddConfFileToNodeGroup', 'UpdatePersonTag', 
-         'DeleteConfFileFromNode', 'AddPersonToSlice', 'UnBindObjectFromPeer', 'AddNodeToPCU', 'GetLeaseGranularity', 'DeletePCUType', 
-         'GetTagTypes', 'GetNodeTypes', 'UpdateInterfaceTag', 'GetRoles', 'UpdateSlice', 'UpdateSliceTag', 'AddSliceTag', 'AddNetworkType', 
-         'AddInterface', 'AddAddressType', 'AddRoleToPerson', 'DeleteNodeType', 'GetLeases', 'UpdateInterface', 'SliceInfo', 'DeleteAddress', 
-         'SliceTicketGet', 'GetPersons', 'GetWhitelist', 'AddKeyType', 'UpdateAddressType', 'GetPeerName', 'DeleteNetworkMethod', 
-         'UpdateIlink', 'AddConfFileToNode', 'GetKeys', 'DeleteNodeFromPCU', 'GetInterfaceTags', 'GetBootStates', 'SetInterfaceSens', 'SetNodeLoadm', 
-         'GetInterfaceRate', 'GetNodeLoadw', 'SetInterfaceKey', 'GetNodeSlices', 'GetNodeLoadm', 'SetSliceVref', 'GetInterfaceIwpriv', 'SetNodeLoadw', 
-         'SetNodeSerial', 'GetNodePlainBootstrapfs', 'SetNodeMEMw', 'GetNodeResponse', 'SetInterfaceRate', 'SetSliceInitscript', 
-         'SetNodeFcdistro', 'GetNodeLoady', 'SetNodeArch', 'SetNodeKargs', 'SetNodeMEMm', 'SetNodeBWy', 'SetNodeBWw', 
-         'SetInterfaceSecurityMode', 'SetNodeBWm', 'SetNodeASType', 'GetNodeKargs', 'GetPersonColumnconf', 'GetNodeResponsem', 
-         'GetNodeCPUy', 'GetNodeCramfs', 'SetNodeSlicesw', 'SetPersonColumnconf', 'SetNodeSlicesy', 'GetNodeCPUw', 'GetNodeBWy', 
-         'GetNodeCPUm', 'GetInterfaceDriver', 'GetNodeLoad', 'GetInterfaceMode', 'GetNodeSerial', 'SetNodeSlicesm', 'SetNodeLoady', 
-         'GetNodeReliabilityw', 'SetSliceFcdistro', 'GetNodeReliabilityy', 'SetInterfaceEssid', 'SetSliceInitscriptCode', 
-         'GetNodeExtensions', 'GetSliceOmfControl', 'SetNodeCity', 'SetInterfaceIfname', 'SetNodeHrn', 'SetNodeNoHangcheck', 
-         'GetNodeNoHangcheck', 'GetSliceFcdistro', 'SetNodeCountry', 'SetNodeKvariant', 'GetNodeKvariant', 'GetNodeMEMy', 
-         'SetInterfaceIwpriv', 'GetNodeMEMw', 'SetInterfaceBackdoor', 'GetInterfaceFreq', 'SetInterfaceChannel', 'SetInterfaceNw', 
-         'GetPersonShowconf', 'GetSliceInitscriptCode', 'SetNodeMEM', 'GetInterfaceEssid', 'GetNodeMEMm', 'SetInterfaceMode', 
-         'SetInterfaceIwconfig', 'GetNodeSlicesm', 'GetNodeBWm', 'SetNodePlainBootstrapfs', 'SetNodeRegion', 'SetNodeCPU', 
-         'GetNodeSlicesw', 'SetNodeBW', 'SetNodeSlices', 'SetNodeCramfs', 'GetNodeSlicesy', 'GetInterfaceKey', 'GetSliceInitscript', 
-         'SetNodeCPUm', 'SetSliceArch', 'SetNodeLoad', 'SetNodeResponse', 'GetSliceSliverHMAC', 'GetNodeBWw', 'GetNodeRegion', 
-         'SetNodeMEMy', 'GetNodeASType', 'SetNodePldistro', 'GetSliceArch', 'GetNodeCountry', 'SetSliceOmfControl', 'GetNodeHrn', 
-         'GetNodeCity', 'SetInterfaceAlias', 'GetNodeBW', 'GetNodePldistro', 'GetSlicePldistro', 'SetNodeASNumber', 'GetSliceHmac', 
-         'SetSliceHmac', 'GetNodeMEM', 'GetNodeASNumber', 'GetInterfaceAlias', 'GetSliceVref', 'GetNodeArch', 'GetSliceSshKey', 
-         'GetInterfaceKey4', 'GetInterfaceKey2', 'GetInterfaceKey3', 'GetInterfaceKey1', 'GetInterfaceBackdoor', 'GetInterfaceIfname', 
-         'SetSliceSliverHMAC', 'SetNodeReliability', 'GetNodeCPU', 'SetPersonShowconf', 'SetNodeExtensions', 'SetNodeCPUy', 
-         'SetNodeCPUw', 'GetNodeResponsew', 'SetNodeResponsey', 'GetInterfaceSens', 'SetNodeResponsew', 'GetNodeResponsey', 
-         'GetNodeReliability', 'GetNodeReliabilitym', 'SetNodeResponsem', 'SetInterfaceDriver', 'GetInterfaceSecurityMode', 
-         'SetNodeDeployment', 'SetNodeReliabilitym', 'GetNodeFcdistro', 'SetInterfaceFreq', 'GetInterfaceNw', 'SetNodeReliabilityy', 
-         'SetNodeReliabilityw', 'GetInterfaceIwconfig', 'SetSlicePldistro', 'SetSliceSshKey', 'GetNodeDeployment', 'GetInterfaceChannel', 
-         'SetInterfaceKey2', 'SetInterfaceKey3', 'SetInterfaceKey1', 'SetInterfaceKey4'])
-     
-    _required_methods = set()
-
-    def __init__(self, username=None, password=None, sessionkey=None, proxy=None,
-            hostname = "www.planet-lab.eu",
-            urlpattern = "https://%(hostname)s:443/PLCAPI/",
-            localPeerName = "PLE"):
-        if sessionkey is not None:
-            self.auth = dict(AuthMethod='session', session=sessionkey)
-        elif username is not None and password is not None:
-            self.auth = dict(AuthMethod='password', Username=username, AuthString=password)
-        else:
-            self.auth = dict(AuthMethod='anonymous')
-        
-        self._localPeerName = localPeerName
-        self._url = urlpattern % {'hostname':hostname}
-        if (proxy is not None):
-            import urllib2
-            class HTTPSProxyTransport(xmlrpclib.Transport):
-                def __init__(self, proxy, use_datetime=0):
-                    opener = urllib2.build_opener(urllib2.ProxyHandler({"https" : proxy}))
-                    xmlrpclib.Transport.__init__(self, use_datetime)
-                    self.opener = opener
-                def request(self, host, handler, request_body, verbose=0):
-                    req = urllib2.Request('https://%s%s' % (host, handler), request_body)
-                    req.add_header('User-agent', self.user_agent)
-                    self.verbose = verbose
-                    return self.parse_response(self.opener.open(req))
-            self._proxyTransport = lambda : HTTPSProxyTransport(proxy)
-        else:
-            self._proxyTransport = lambda : None
-        
-        self.threadlocal = threading.local()
-    
-    @property
-    def api(self):
-        # Cannot reuse same proxy in all threads, py2.7 is not threadsafe
-        return xmlrpclib.ServerProxy(
-            self._url ,
-            transport = self._proxyTransport(),
-            allow_none = True)
-        
-    @property
-    def mcapi(self):
-        try:
-            return self.threadlocal.mc
-        except AttributeError:
-            return self.api
-        
-    def test(self):
-        import warnings
-        
-        # validate XMLRPC server checking supported API calls
-        methods = set(_retry(self.mcapi.system.listMethods)())
-        if self._required_methods - methods:
-            warnings.warn("Unsupported REQUIRED methods: %s" % ( ", ".join(sorted(self._required_methods - methods)), ) )
-            return False
-        if self._expected_methods - methods:
-            warnings.warn("Unsupported EXPECTED methods: %s" % ( ", ".join(sorted(self._expected_methods - methods)), ) )
-        
-        try:
-            # test authorization
-            network_types = _retry(self.mcapi.GetNetworkTypes)(self.auth)
-        except (xmlrpclib.ProtocolError, xmlrpclib.Fault),e:
-            warnings.warn(str(e))
-        
-        return True
-    
-    
-    @property
-    def network_types(self):
-        try:
-            return self._network_types
-        except AttributeError:
-            self._network_types = _retry(self.mcapi.GetNetworkTypes)(self.auth)
-            return self._network_types
-    
-    @property
-    def peer_map(self):
-        try:
-            return self._peer_map
-        except AttributeError:
-            peers = _retry(self.mcapi.GetPeers)(self.auth, {}, ['shortname','peername','peer_id'])
-            self._peer_map = dict(
-                (peer['shortname'], peer['peer_id'])
-                for peer in peers
-            )
-            self._peer_map.update(
-                (peer['peername'], peer['peer_id'])
-                for peer in peers
-            )
-            self._peer_map.update(
-                (peer['peer_id'], peer['shortname'])
-                for peer in peers
-            )
-            self._peer_map[None] = self._localPeerName
-            return self._peer_map
-    
-
-    def GetNodeFlavour(self, node):
-        """
-        Returns detailed information on a given node's flavour, i.e. its base installation.
-
-        This depends on the global PLC settings in the PLC_FLAVOUR area, optionnally overridden by any of the following tags if set on that node:
-        'arch', 'pldistro', 'fcdistro', 'deployment', 'extensions'
-        
-        Params:
-        
-            * node : int or string
-                - int, Node identifier
-                - string, Fully qualified hostname
-        
-        Returns:
-
-            struct
-                * extensions : array of string, extensions to add to the base install
-                * fcdistro : string, the fcdistro this node should be based upon
-                * nodefamily : string, the nodefamily this node should be based upon
-                * plain : boolean, use plain bootstrapfs image if set (for tests)  
-        """
-        if not isinstance(node, (str, int, long)):
-            raise ValueError, "Node must be either a non-unicode string or an int"
-        return _retry(self.mcapi.GetNodeFlavour)(self.auth, node)
-    
-    def GetNodes(self, nodeIdOrName=None, fields=None, **kw):
-        """
-        Returns an array of structs containing details about nodes. 
-        If nodeIdOrName is specified and is an array of node identifiers or hostnames, 
-        or the filters keyword argument with struct of node attributes, 
-        or node attributes by keyword argument,
-        only nodes matching the filter will be returned.
-
-        If fields is specified, only the specified details will be returned. 
-        NOTE that if fields is unspecified, the complete set of native fields are returned, 
-        which DOES NOT include tags at this time.
-
-        Some fields may only be viewed by admins.
-        
-        Special params:
-            
-            fields: an optional list of fields to retrieve. The default is all.
-            
-            filters: an optional mapping with custom filters, which is the only
-                way to support complex filters like negation and numeric comparisons.
-                
-            peer: a string (or sequence of strings) with the name(s) of peers
-                to filter - or None for local nodes.
-        """
-        if fields is not None:
-            fieldstuple = (fields,)
-        else:
-            fieldstuple = ()
-        if nodeIdOrName is not None:
-            return _retry(self.mcapi.GetNodes)(self.auth, nodeIdOrName, *fieldstuple)
-        else:
-            filters = kw.pop('filters',{})
-            
-            if 'peer' in kw:
-                peer = kw.pop('peer')
-                
-                nameToId = self.peer_map.get
-                
-                if hasattr(peer, '__iter__'):
-                    # we can't mix local and external nodes, so
-                    # split and re-issue recursively in that case
-                    if None in peer or self._localPeerName in peer:
-                        if None in peer:    
-                            peer.remove(None)
-                        if self._localPeerName in peer:
-                            peer.remove(self._localPeerName)
-                        return (
-                            self.GetNodes(nodeIdOrName, fields, filters=filters, peer=peer, **kw)
-                            + self.GetNodes(nodeIdOrName, fields, filters=filters, peer=None, **kw)
-                        )
-                    else:
-                        peer_filter = map(nameToId, peer)
-                elif peer is None or peer == self._localPeerName:
-                    peer_filter = None
-                else:
-                    peer_filter = nameToId(peer)
-                
-                filters['peer_id'] = peer_filter
-            
-            filters.update(kw)
-            return _retry(self.mcapi.GetNodes)(self.auth, filters, *fieldstuple)
-    
-    def GetNodeTags(self, nodeTagId=None, fields=None, **kw):
-        if fields is not None:
-            fieldstuple = (fields,)
-        else:
-            fieldstuple = ()
-        if nodeTagId is not None:
-            return _retry(self.mcapi.GetNodeTags)(self.auth, nodeTagId, *fieldstuple)
-        else:
-            filters = kw.pop('filters',{})
-            filters.update(kw)
-            return _retry(self.mcapi.GetNodeTags)(self.auth, filters, *fieldstuple)
-
-    def GetSliceTags(self, sliceTagId=None, fields=None, **kw):
-        if fields is not None:
-            fieldstuple = (fields,)
-        else:
-            fieldstuple = ()
-        if sliceTagId is not None:
-            return _retry(self.mcapi.GetSliceTags)(self.auth, sliceTagId, *fieldstuple)
-        else:
-            filters = kw.pop('filters',{})
-            filters.update(kw)
-            return _retry(self.mcapi.GetSliceTags)(self.auth, filters, *fieldstuple)
-    
-    def GetInterfaces(self, interfaceIdOrIp=None, fields=None, **kw):
-        if fields is not None:
-            fieldstuple = (fields,)
-        else:
-            fieldstuple = ()
-        if interfaceIdOrIp is not None:
-            return _retry(self.mcapi.GetInterfaces)(self.auth, interfaceIdOrIp, *fieldstuple)
-        else:
-            filters = kw.pop('filters',{})
-            filters.update(kw)
-            return _retry(self.mcapi.GetInterfaces)(self.auth, filters, *fieldstuple)
-        
-    def GetSlices(self, sliceIdOrName=None, fields=None, **kw):
-        if fields is not None:
-            fieldstuple = (fields,)
-        else:
-            fieldstuple = ()
-        if sliceIdOrName is not None:
-            return _retry(self.mcapi.GetSlices)(self.auth, sliceIdOrName, *fieldstuple)
-        else:
-            filters = kw.pop('filters',{})
-            filters.update(kw)
-            return _retry(self.mcapi.GetSlices)(self.auth, filters, *fieldstuple)
-        
-    def UpdateSlice(self, sliceIdOrName, **kw):
-        return _retry(self.mcapi.UpdateSlice)(self.auth, sliceIdOrName, kw)
-
-    def StartMulticall(self):
-        self.threadlocal.mc = xmlrpclib.MultiCall(self.mcapi)
-    
-    def FinishMulticall(self):
-        mc = self.threadlocal.mc
-        del self.threadlocal.mc
-        return _retry(mc)()
-
-    def GetSliceNodes(self, slicename):
-        return self.GetSlices(slicename, ['node_ids'])[0]['node_ids']
-
-    def AddSliceNodes(self, slicename,  nodes = None):
-        self.UpdateSlice(slicename, nodes = nodes)
-
-    def GetNodeInfo(self, node_id):
-        self.StartMulticall()
-        info = self.GetNodes(node_id)
-        tags = self.GetNodeTags(node_id=node_id, fields=('tagname','value'))
-        info, tags = self.FinishMulticall()
-        return info, tags
-
-    def GetSliceId(self, slicename):
-        slice_id = None
-        slices = self.GetSlices(slicename, fields=('slice_id',))
-        if slices:
-            slice_id = slices[0]['slice_id']
-        # If it wasn't found, don't remember this failure, keep trying
-        return slice_id
-
-    def GetSliceVnetSysTag(self, slicename):
-        slicetags = self.GetSliceTags(
-            name = slicename,
-            tagname = 'vsys_vnet',
-            fields=('value',))
-        if slicetags:
-            return slicetags[0]['value']
-        else:
-            return None
-def plcapi(auth_user, auth_string, plc_host, plc_url, proxy):
-    api = None
-    if auth_user:
-        api = PLCAPI(
-            username = auth_user,
-            password = auth_string,
-            hostname = plc_host,
-            urlpattern = plc_url,
-            proxy = proxy
-        )
-    else:
-        # anonymous access - may not be enough for much
-        api = PLCAPI()
-    return api
-
-
diff --git a/src/nepi/testbeds/planetlab/resourcealloc.py b/src/nepi/testbeds/planetlab/resourcealloc.py
deleted file mode 100644 (file)
index 23add87..0000000
+++ /dev/null
@@ -1,429 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import itertools
-import functools
-import operator
-import random
-import collections
-import heapq
-
-from nepi.util.settools import setclusters
-from nepi.util.settools import classify
-
-class ResourceAllocationError(Exception):
-    pass
-
-def multicardinal(multiset):
-    return sum(quant for c,quant in multiset.iteritems())
-
-def avail(cls, partition):
-    contains = classify.classContains
-    return reduce(operator.or_, 
-        classify.classComponents(cls, partition),
-        set())
-
-def _log(logstream, message, *args, **kwargs):
-    if logstream:
-        if args:
-            logstream.write(message % args)
-        elif kwargs:
-            logstream.write(message % kwargs)
-        else:
-            logstream.write(message)
-        logstream.write('\n')
-
-def alloc(requests, logstream = None, nonseparable = False, saveinteresting = None, backtracklim = 100000000, verbose = True, sample = random.sample):
-    """
-    Takes an iterable over requests, which are iterables of candidate node ids,
-    and returns a specific node id for each request (if successful).
-    
-    If it cannot, it will raise an ResourceAllocationError.
-    """
-    
-    # First, materialize the request iterable
-    requests = map(set,requests)
-    
-    # Classify all candidates
-    universe = reduce(operator.or_, requests, set())
-    partition = setclusters.disjoint_partition(*requests)
-    
-    # Classify requests
-    c_reqlist = classify.classify(requests, partition)
-    c_req = dict(
-        (c,len(r))
-        for c,r in c_reqlist.iteritems()
-    )
-    
-    # Classify universe
-    c_uni = map(len, partition)
-    
-    # Perform invariant sanity checks
-    if multicardinal(c_req) > sum(c_uni):
-        raise ResourceAllocationError, "Insufficient resources to grant request"
-    
-    for c,nreq in c_req.iteritems():
-        if nreq > len(avail(c, partition)):
-            raise ResourceAllocationError, "Insufficient resources to grant request, empty categories %s" % (
-                filter(lambda i : classify.classContains(c,i), xrange(len(c))),
-            )
-
-    # Test for separability
-    if nonseparable:
-        components = clusters = []
-    else:
-        components = [
-            classify.classMembers(c, partition)
-            for c in c_req
-        ]
-        clusters = setclusters.disjoint_sets(*components)
-    
-    if len(clusters) > 1:
-        if verbose:
-            _log(logstream, "\nDetected %d clusters", len(clusters))
-        
-        # Requests are separable
-        # Solve each part separately, then rejoin them
-        
-        # Build a class for each cluster
-        clustermaps = []
-        compmap = dict([(pid,idx) for idx,pid in enumerate(map(id,components))])
-        for cluster in clusters:
-            cluster_class = classify.getClass(
-                reduce(operator.or_, cluster, set()),
-                partition )
-            clustermaps.append(cluster_class)
-        
-        # Build a plan: assign a cluster to each request
-        plan = []
-        for cluster_class in clustermaps:
-            plan_reqs = []
-            for c, c_requests in c_reqlist.iteritems():
-                if classify.isSubclass(cluster_class, c):
-                    plan_reqs.extend(c_requests)
-            plan.append(plan_reqs)
-        
-        # Execute the plan
-        partial_results = []
-        for i,plan_req in enumerate(plan):
-            if verbose:
-                _log(logstream, "Solving cluster %d/%d", i+1, len(plan))
-            partial_results.append(alloc(plan_req, 
-                logstream, 
-                nonseparable = True,
-                saveinteresting = saveinteresting,
-                backtracklim = backtracklim,
-                verbose = verbose))
-        
-        # Join results
-        if verbose:
-            _log(logstream, "Joining partial results")
-        reqmap = dict([(pid,idx) for idx,pid in enumerate(map(id,requests))])
-        joint = [None] * len(requests)
-        for partial_result, partial_requests in zip(partial_results, plan):
-                for assignment, partial_request in zip(partial_result, partial_requests):
-                    joint[reqmap[id(partial_request)]] = assignment
-        
-        return joint
-    else:
-        # Non-separable request, solve
-        #_log(logstream, "Non-separable request")
-        
-        # Solve
-        partial = collections.defaultdict(list)
-        Pavail = list(c_uni)
-        Gavail = dict([
-            (c, len(avail(c, partition)))
-            for c in c_req
-        ])
-        req = dict(c_req)
-        
-        # build a cardinality map
-        cardinality = dict([
-            (c, [classify.classCardinality(c,partition), -nreq])
-            for c,nreq in req.iteritems()
-        ])
-        
-        classContains = classify.classContains
-        isSubclass = classify.isSubclass
-        
-        stats = [
-            0, # ops
-            0, # back tracks
-            0, # skipped branches
-        ]
-        
-        def recursive_alloc():
-            # Successful termination condition: all requests satisfied
-            if not req:
-                return True
-            
-            # Try in cardinality order
-            if quickstage:
-                order = heapq.nsmallest(2, req, key=Gavail.__getitem__)
-            else:
-                order = sorted(req, key=Gavail.__getitem__)
-            
-            # Do backtracking on those whose cardinality leaves a choice
-            # Force a pick when it does not
-            if order and (Gavail[order[0]] <= 1
-                          or classify.classCardinality(order[0]) <= 1):
-                order = order[:1]
-            
-            for c in order:
-                nreq = req[c]
-                #carditem = cardinality[c]
-                for i,bit in enumerate(c):
-                    if bit == "1" and Pavail[i]:
-                        stats[0] += 1 # ops+1
-                        
-                        subreq = min(Pavail[i], nreq)
-                        
-                        # branch sanity check
-                        skip = False
-                        for c2,navail in Gavail.iteritems():
-                            if c2 != c and classContains(c2, i) and (navail - subreq) < req.get(c2,0):
-                                # Fail branch, don't even try
-                                skip = True
-                                break
-                        if skip:
-                            stats[2] += 1 # skipped branches + 1
-                            continue
-                        
-                        # forward track
-                        partial[c].append((i,subreq))
-                        Pavail[i] -= subreq
-                        #carditem[1] -= subreq
-                        
-                        for c2 in Gavail:
-                            if classContains(c2, i):
-                                Gavail[c2] -= subreq
-                        
-                        if subreq < nreq:
-                            req[c] -= subreq
-                        else:
-                            del req[c]
-                        
-                        # Try to solve recursively
-                        success = recursive_alloc()
-                        
-                        if success:
-                            return success
-                        
-                        # Back track
-                        del partial[c][-1]
-                        Pavail[i] += subreq
-                        #carditem[1] += subreq
-                        
-                        for c2 in Gavail:
-                            if classContains(c2, i):
-                                Gavail[c2] += subreq
-                        
-                        if subreq < nreq:
-                            req[c] += subreq
-                        else:
-                            req[c] = subreq
-                        
-                        stats[1] += 1 # backtracks + 1
-                        
-                        if (logstream or (saveinteresting is not None)) and (stats[1] & 0xffff) == 0:
-                            _log(logstream, "%r\n%r\n... stats: ops=%d, backtracks=%d, skipped=%d", Gavail, req,
-                                *stats)
-                            
-                            if stats[1] == 0x1400000:
-                                # Interesting case, log it out
-                                _log(logstream, "... interesting case: %r", requests)
-                                
-                                if saveinteresting is not None:
-                                    saveinteresting.append(requests)
-                if stats[1] > backtracklim:
-                    break
-                            
-            
-            # We tried and tried... and failed
-            return False
-        
-        # First try quickly (assign most selective first exclusively)
-        quickstage = True
-        success = recursive_alloc()
-        if not success:
-            # If it fails, retry exhaustively (try all assignment orders)
-            quickstage = False
-            success = recursive_alloc()
-        
-        if verbose or (not success or stats[1] or stats[2]):
-            _log(logstream, "%s with stats: ops=%d, backtracks=%d, skipped=%d",
-                ("Succeeded" if success else "Failed"),
-                *stats)
-        
-        if not success:
-            raise ResourceAllocationError, "Insufficient resources to grant request"
-        
-        # Perform actual assignment
-        Pavail = map(set, partition)
-        solution = {}
-        for c, partial_assignments in partial.iteritems():
-            psol = set()
-            for i, nreq in partial_assignments:
-                part = Pavail[i]
-                if len(part) < nreq:
-                    raise AssertionError, "Cannot allocate resources for supposedly valid solution!"
-                assigned = set(sample(part, nreq))
-                psol |= assigned
-                part -= assigned
-            solution[c] = psol
-        
-        # Format solution for the caller (return a node id for each request)
-        reqmap = {}
-        for c,reqs in c_reqlist.iteritems():
-            for req in reqs:
-                reqmap[id(req)] = c
-        
-        req_solution = []
-        for req in requests:
-            c = reqmap[id(req)]
-            req_solution.append(solution[c].pop())
-        
-        return req_solution
-
-
-if __name__ == '__main__':
-    def test():
-        import random
-        import sys
-        
-        toughcases = [
-          (False,
-            [[9, 11, 12, 14, 16, 17, 18, 20, 21], 
-             [2], 
-             [2], 
-             [4, 5, 6, 7, 8, 11, 12, 13, 18, 22], 
-             [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 
-             [6, 10, 11, 13, 14, 15, 16, 18, 20], 
-             [3, 7, 8, 9, 10, 12, 14, 17, 22], 
-             [0, 1, 3, 4, 5, 6, 7, 8, 10, 13, 14, 17, 19, 21, 22], 
-             [16, 22]]),
-          (False,
-            [[2, 10, 0, 3, 4, 8], 
-             [4, 1, 6, 10, 2, 0, 5, 9, 8, 7], 
-             [8, 3, 0, 2, 1, 4, 10, 7, 5], 
-             [8], 
-             [2], 
-             [2, 8], 
-             [2, 7, 8, 3, 1, 0, 9, 10, 5, 4, 6], 
-             [2, 4, 8, 10, 1, 3, 9], 
-             [3, 0, 5]]),
-          (True,
-            [[2, 10, 0, 3, 4, 8], 
-             [4, 1, 6, 10, 2, 0, 5, 9, 8, 7], 
-             [8, 3, 0, 2, 1, 4, 10, 7, 5], 
-             [8], 
-             [2, 8], 
-             [2, 7, 8, 3, 1, 0, 9, 10, 5, 4, 6], 
-             [2, 4, 8, 10, 1, 3, 9], 
-             [3, 0, 5]]),
-        ]
-        
-        # Test tough cases
-        for n,(solvable,req) in enumerate(toughcases):
-            print "Trying #R = %4d, #S = %4d (tough case %d)" % (len(req), len(reduce(operator.or_, map(set,req), set())), n)
-            try:
-                solution = alloc(req, sys.stdout, verbose=False)
-                if solvable:
-                    print "  OK - allocation successful"
-                else:
-                    raise AssertionError, "Case %r had no solution, but got %r" % (req, solution)
-            except ResourceAllocationError: 
-                if not solvable:
-                    print "  OK - allocation not possible"
-                else:
-                    raise AssertionError, "Case %r had a solution, but got none" % (req,)
-        
-        interesting = []
-        
-        suc_mostlypossible = mostlypossible = 0
-        suc_mostlyimpossible = mostlyimpossible = 0
-        suc_huge = huge = 0
-        
-        try:
-            # Fuzzer - mostly impossible cases
-            for i in xrange(10000):
-                nreq = random.randint(1,20)
-                nsrv = random.randint(max(1,nreq-5),50)
-                srv = range(nsrv)
-                req = [
-                    random.sample(srv, random.randint(1,nsrv))
-                    for j in xrange(nreq)
-                ]
-                print "Trying %5d: #R = %4d, #S = %4d... " % (i, nreq, nsrv),
-                sys.stdout.flush()
-                mostlyimpossible += 1
-                try:
-                    solution = alloc(req, sys.stdout, saveinteresting = interesting, verbose=False)
-                    suc_mostlyimpossible += 1
-                    print "  OK - allocation successful  \r",
-                except ResourceAllocationError: 
-                    print "  OK - allocation not possible  \r",
-                except KeyboardInterrupt:
-                    print "ABORTING CASE %r" % (req,)
-                    raise
-                sys.stdout.flush()
-
-            # Fuzzer - mostly possible cases
-            for i in xrange(10000):
-                nreq = random.randint(1,10)
-                nsrv = random.randint(nreq,100)
-                srv = range(nsrv)
-                req = [
-                    random.sample(srv, random.randint(min(nreq,nsrv),nsrv))
-                    for j in xrange(nreq)
-                ]
-                print "Trying %5d: #R = %4d, #S = %4d... " % (i, nreq, nsrv),
-                sys.stdout.flush()
-                mostlypossible += 1
-                try:
-                    solution = alloc(req, sys.stdout, saveinteresting = interesting, verbose=False)
-                    suc_mostlypossible += 1
-                    print "  OK - allocation successful  \r",
-                except ResourceAllocationError: 
-                    print "  OK - allocation not possible  \r",
-                except KeyboardInterrupt:
-                    print "ABORTING CASE %r" % (req,)
-                    raise
-                sys.stdout.flush()
-
-            # Fuzzer - biiig cases
-            for i in xrange(10):
-                nreq = random.randint(1,500)
-                nsrv = random.randint(1,8000)
-                srv = range(nsrv)
-                req = [
-                    random.sample(srv, random.randint(min(nreq,nsrv),nsrv))
-                    for j in xrange(nreq)
-                ]
-                print "Trying %4d: #R = %4d, #S = %4d... " % (i, nreq, nsrv),
-                sys.stdout.flush()
-                huge += 1
-                try:
-                    solution = alloc(req, sys.stdout, saveinteresting = interesting, verbose=False)
-                    suc_huge += 1
-                    print "  OK - allocation successful  \r",
-                except ResourceAllocationError: 
-                    print "  OK - allocation not possible  \r",
-                except KeyboardInterrupt:
-                    print "ABORTING CASE %r" % (req,)
-                    raise
-                sys.stdout.flush()
-        except:
-            print "ABORTING TEST"
-        
-        print "\nSuccess rates:"
-        print "  Mostly possible: %d/%d (%.2f%%)" % (suc_mostlypossible, mostlypossible, 100.0 * suc_mostlypossible / max(1,mostlypossible))
-        print "  Mostly impossible: %d/%d (%.2f%%)" % (suc_mostlyimpossible, mostlyimpossible, 100.0 * suc_mostlyimpossible / max(1,mostlyimpossible))
-        print "  Huge: %d/%d (%.2f%%)" % (suc_huge, huge, 100.0 * suc_huge / max(1,huge))
-        
-        if interesting:
-            print "%d interesting requests:" % (len(interesting),)
-            for n,req in enumerate(interesting):
-                print "Interesting request %d/%d: %r", (n,len(interesting),req,)
-    test()
-
diff --git a/src/nepi/testbeds/planetlab/scripts/classqueue.py b/src/nepi/testbeds/planetlab/scripts/classqueue.py
deleted file mode 100644 (file)
index 01a5dc6..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-import collections
-import itertools
-import random
-import re
-import sys
-import iovec
-
-dstats = collections.defaultdict(int)
-astats = collections.defaultdict(int)
-dump_count = [0]
-
-_red = True
-_size = 1000
-_classes = (
-    "igmp.ggp.cbt.egp.igp.idrp.mhrp.narp.ospf.eigrp*p1:"
-    "udp.st.nvp.rdp.ddp.pvp.mtp.srp.smp.136:"
-    "tcp.icmp*4:"
-    "ip.gre.etherip.l2tp:"
-    "hopopt.shim6.ipv6.ipv6route.ipv6frag.ipv6icmp.ipv6nonxt.ipv6opts*4:"
-    "crtp.crudp*8:"
-    "*3"
-)
-_logdropped = False
-
-def clsmap(cls):
-    global _protomap
-    if cls in _protomap:
-        return _protomap[cls]
-    elif cls == "":
-        return None
-    else:
-        return int(cls)
-
-def _parse_classes(classes):
-    """
-     Class list structure:
-       <CLASSLIST> ::= <CLASS> ":" CLASSLIST
-                    |  <CLASS>
-       <CLASS>     ::= <PROTOLIST> "*" <PRIORITYSPEC>
-                    |  <DFLTCLASS>
-       <DFLTCLASS> ::= "*" <PRIORITYSPEC>
-       <PROTOLIST> ::= <PROTO> "." <PROTOLIST>
-                    |  <PROTO>
-       <PROTO>     ::= <NAME> | <NUMBER>
-       <NAME>      ::= --see http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers --
-                       --only in lowercase, with special characters removed--
-                       --or see below--
-       <NUMBER>    ::= [0-9]+
-       <PRIORITYSPEC> ::= <THOUGHPUT> [ "#" <SIZE> ] [ "p" <PRIORITY> ]
-       <THOUGHPUT> ::= NUMBER -- default 1
-       <PRIORITY>  ::= NUMBER -- default 0
-       <SIZE>      ::= NUMBER -- default 1
-    """
-    classes = map(lambda x:x.split('*',2),classes.split(':'))
-    priorex = re.compile(r"(?P<thoughput>\d+)?(?:#(?P<size>\d+))?(?:p(?P<priority>\d+))?")
-    for cls in classes:
-        if not cls:
-            cls.append("")
-        if len(cls) < 2:
-            cls.append("")
-        prio = priorex.match(cls[1])
-        if not prio:
-            prio = (1,0,1)
-        else:
-            prio = (
-                int(prio.group("thoughput") or 1),
-                int(prio.group("priority") or 0),
-                int(prio.group("size") or 1),
-            )
-        cls[1] = prio
-        cls[0] = map(clsmap, cls[0].split('.'))
-        if not cls[0]:
-            cls[0] = [None]
-    
-    return classes
-    
-
-class ClassQueue(object):
-    def __init__(self):
-        self.size = _size
-        self.len = 0
-
-        # Prepare classes
-        self.classspec = _parse_classes(_classes)
-
-        self.queues = [ collections.deque() for cls in xrange(len(self.classspec)) ]
-        
-        self.classmap = dict(
-            (proto, cls)
-            for cls, (protos, (thoughput, prio, size)) in enumerate(self.classspec)
-            for proto in protos
-        )
-
-        self.priomap = [
-            prio
-            for cls in xrange(len(self.classspec))
-            for protos, (thoughput, prio, size) in ( self.classspec[cls], )
-        ]
-        
-        self.sizemap = [
-            size * _size
-            for cls in xrange(len(self.classspec))
-            for protos, (thoughput, prio, size) in ( self.classspec[cls], )
-        ]
-        
-        order = [ 
-            cls
-            for cls, (protos, (thoughput, prio, size)) in enumerate(self.classspec)
-            for i in xrange(thoughput)
-        ]
-        self.order = [
-            filter(lambda x : self.priomap[x] == prio, order)
-            for prio in reversed(sorted(set(self.priomap)))
-        ]
-        for order in self.order:
-            random.shuffle(order)
-        
-        if None not in self.classmap:
-            raise RuntimeError, "No default class: a default class must be present"
-        
-        # add retries
-        self.queues.append(collections.deque())
-        self.priomap.append(-1)
-        self.sizemap.append(_size)
-        self.order.insert(0, [len(self.queues)-1])
-        
-        self.classes = set()
-        self.clear()
-    
-    def __nonzero__(self):
-        return self.len > 0
-    
-    def __len__(self):
-        return self.len
-
-    def clear(self):
-        self.classes.clear()
-        self.cycle = None
-        self.cyclelen = None
-        self.cycle_update = True
-        self.len = 0
-        self.queues[:] = [ collections.deque() for cls in xrange(len(self.classspec)) ]
-    
-    def queuefor(self, packet, ord=ord, len=len, classmask=0xEC):
-        if len(packet) >= 10:
-            proto = ord(packet[9])
-            rv = self.classmap.get(proto)
-            if rv is None:
-                rv = self.classmap.get(None)
-        else:
-            proto = 0
-            rv = self.classmap.get(None)
-        return proto, rv, self.sizemap[rv]
-    
-    def get_packetdrop_p(self, qlen, qsize, packet):
-        pdrop = ((qlen * 1.0 / qsize) - 0.5) * 2.0
-        pdrop *= pdrop
-        return pdrop
-    
-    def append(self, packet, len=len, dstats=dstats, astats=astats, rng=random.random):
-        proto,qi,size = self.queuefor(packet)
-        q = self.queues[qi]
-        lq = len(q)
-        if lq < size:
-            dropped = 0
-            if lq > (size/2) and _red:
-                pdrop = self.get_packetdrop_p(lq, size, packet)
-                if rng() < pdrop:
-                    dropped = 1
-            if not dropped:
-                classes = self.classes
-                if qi not in classes:
-                    classes.add(qi)
-                    self.cycle_update = True
-                q.append(packet)
-                self.len += 1
-        # packet dropped
-        else:
-            dropped = 1
-        if _logdropped:
-            if dropped:
-                dstats[proto] += 1
-            else:
-                astats[proto] += 1
-            self.dump_stats()
-        return dropped
-
-    def appendleft(self, packet):
-        self.queues[-1].append(packet)
-        self.len += 1
-    
-    def pop(self, xrange=xrange, len=len, iter=iter, pop=collections.deque.pop):
-        return self.popleft(pop=pop)
-    
-    def popleft(self, xrange=xrange, len=len, iter=iter, enumerate=enumerate, zip=zip, pop=collections.deque.popleft):
-        queues = self.queues
-        classes = self.classes
-
-        if len(classes)==1:
-            # shortcut for non-tos traffic
-            rv = pop(queues[iter(classes).next()])
-            self.len -= 1
-            return rv
-
-        if self.cycle_update:
-            cycle = [
-                filter(classes.__contains__, order)
-                for order in self.order
-            ]
-            self.cycle = map(itertools.cycle, cycle)
-            self.cyclelen = map(len,cycle)
-            self.cycle_update = False
-        
-        for prio, (cycle, cyclelen) in enumerate(zip(self.cycle, self.cyclelen)):
-            cycle = cycle.next
-            for i in xrange(cyclelen):
-                qi = cycle()
-                if qi in classes:
-                    q = queues[qi]
-                    if q:
-                        rv = pop(q)
-                        self.len -= 1
-                        return rv
-                    else:
-                        # Needs to update the cycle
-                        classes.remove(qi)
-                        self.cycle_update = True
-        else:
-            raise IndexError, "pop from an empty queue"
-
-    def dump_stats(self, astats=astats, dstats=dstats, dump_count=dump_count):
-        if dump_count[0] >= 10000:
-            try:
-                dstatsstr = "".join(['%s:%s\n' % (key, value) for key, value in dstats.items()])
-                astatsstr = "".join(['%s:%s\n' % (key, value) for key, value in astats.items()])
-                fd = open('dropped_stats', 'w')
-                iovec.writev(fd.fileno(), "Classes: ", _classes, "\nDropped:\n", dstatsstr, "Accepted:\n", astatsstr)
-                fd.close()
-            except:
-                # who cares
-                pass
-            dump_count[0] = 0
-        else:
-            dump_count[0] += 1
-
-queueclass = ClassQueue
-
-def init(size = 1000, classes = _classes, logdropped = 'False', red = True):
-    global _size, _classes, _logdropped
-    _size = int(size)
-    _classes = classes
-    _red = red
-    _logdropped = logdropped.lower() in ('true','1','on')
-    
-    if _logdropped:
-        # Truncate stats
-        open('dropped_stats', 'w').close()
-
-_protomap = {
-    '3pc'      :       34,
-    'an'       :       107,
-    'ah'       :       51,
-    'argus'    :       13,
-    'aris'     :       104,
-    'ax25'     :       93,
-    'bbn-rcc-mon'      :       10,
-    'bna'      :       49,
-    'brsatmon' :       76,
-    'cbt'      :       7,
-    'cftp'     :       62,
-    'chaos'    :       16,
-    'compaqpeer'       :       110,
-    'cphb'     :       73,
-    'cpnx'     :       72,
-    'crtp'     :       126,
-    'crudp'    :       127,
-    'dccp'     :       33,
-    'dcn-meas' :       19,
-    'ddp'      :       37,
-    'ddx'      :       116,
-    'dgp'      :       86,
-    'egp'      :       8,
-    'eigrp'    :       88,
-    'emcon'    :       14,
-    'encap'    :       98,
-    'esp'      :       50,
-    'etherip'  :       97,
-    'fc'       :       133,
-    'fire'     :       125,
-    'ggp'      :       3,
-    'gmtp'     :       100,
-    'gre'      :       47,
-    'hip'      :       139,
-    'hmp'      :       20,
-    'hopopt'   :       0,
-    'iatp'     :       117,
-    'icmp'     :       1,
-    'idpr'     :       35,
-    'idprcmtp' :       38,
-    'idrp'     :       45,
-    'ifmp'     :       101,
-    'igmp'     :       2,
-    'igp'      :       9,
-    'il'       :       40,
-    'inlsp'    :       52,
-    'ip'       :       4,
-    'ipcomp'   :       108,
-    'ipcv'     :       71,
-    'ipip'     :       94,
-    'iplt'     :       129,
-    'ippc'     :       67,
-    'iptm'     :       84,
-    'ipv6'     :       41,
-    'ipv6frag' :       44,
-    'ipv6icmp' :       58,
-    'ipv6nonxt'        :       59,
-    'ipv6opts' :       60,
-    'ipv6route'        :       43,
-    'ipxinip'  :       111,
-    'irtp'     :       28,
-    'isoip'    :       80,
-    'isotp4'   :       29,
-    'kryptolan'        :       65,
-    'l2tp'     :       115,
-    'larp'     :       91,
-    'leaf1'    :       25,
-    'leaf2'    :       26,
-    'manet'    :       138,
-    'meritinp' :       32,
-    'mfensp'   :       31,
-    'mhrp'     :       48,
-    'micp'     :       95,
-    'mobile'   :       55,
-    'mtp'      :       92,
-    'mux'      :       18,
-    'narp'     :       54,
-    'netblt'   :       30,
-    'nsfnetigp'        :       85,
-    'nvp'      :       11,
-    'ospf'     :       89,
-    'pgm'      :       113,
-    'pim'      :       103,
-    'pipe'     :       131,
-    'pnni'     :       102,
-    'prm'      :       21,
-    'ptp'      :       123,
-    'pup'      :       12,
-    'pvp'      :       75,
-    'qnx'      :       106,
-    'rdp'      :       27,
-    'rsvp'     :       46,
-    'rvd'      :       66,
-    'satexpak' :       64,
-    'satmon'   :       69,
-    'sccsp'    :       96,
-    'scps'     :       105,
-    'sctp'     :       132,
-    'sdrp'     :       42,
-    'securevmtp'       :       82,
-    'shim6'    :       140,
-    'skip'     :       57,
-    'sm'       :       122,
-    'smp'      :       121,
-    'snp'      :       109,
-    'spriterpc'        :       90,
-    'sps'      :       130,
-    'srp'      :       119,
-    'sscopmce' :       128,
-    'st'       :       5,
-    'stp'      :       118,
-    'sunnd'    :       77,
-    'swipe'    :       53,
-    'tcf'      :       87,
-    'tcp'      :       6,
-    'tlsp'     :       56,
-    'tp'       :       39,
-    'trunk1'   :       23,
-    'trunk2'   :       24,
-    'ttp'      :       84,
-    'udp'      :       17,
-    'uti'      :       120,
-    'vines'    :       83,
-    'visa'     :       70,
-    'vmtp'     :       81,
-    'vrrp'     :       112,
-    'wbexpak'  :       79,
-    'wbmon'    :       78,
-    'wsn'      :       74,
-    'xnet'     :       15,
-    'xnsidp'   :       22,
-    'xtp'      :       36
-}
-
diff --git a/src/nepi/testbeds/planetlab/scripts/consts.c b/src/nepi/testbeds/planetlab/scripts/consts.c
deleted file mode 100644 (file)
index 5021eed..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/socket.h>
-#include <sys/ioctl.h>
-#include <linux/ioctl.h>
-#include <linux/if_tun.h>
-#include <linux/if.h>
-
-int main()
-{
-       printf("ETH_P_ALL = 0x%08x\n", ETH_P_ALL);
-       printf("ETH_P_IP = 0x%08x\n", ETH_P_IP);
-       printf("TUNGETIFF = 0x%08x\n", TUNGETIFF);
-       printf("TUNSETIFF = 0x%08x\n", TUNSETIFF);
-       printf("IFF_NO_PI = 0x%08x\n", IFF_NO_PI);
-       printf("IFF_TAP = 0x%08x\n", IFF_TAP);
-       printf("IFF_TUN = 0x%08x\n", IFF_TUN);
-       printf("IFF_VNET_HDR = 0x%08x\n", IFF_VNET_HDR);
-       printf("TUN_PKT_STRIP = 0x%08x\n", TUN_PKT_STRIP);
-       printf("IFHWADDRLEN = 0x%08x\n", IFHWADDRLEN);
-       printf("IFNAMSIZ = 0x%08x\n", IFNAMSIZ);
-       printf("IFREQ_SZ = 0x%08x\n", sizeof(struct ifreq));
-       printf("FIONREAD = 0x%08x\n", FIONREAD);
-       return 0;
-}
diff --git a/src/nepi/testbeds/planetlab/scripts/loggingclassqueue.py b/src/nepi/testbeds/planetlab/scripts/loggingclassqueue.py
deleted file mode 100644 (file)
index 9b6b8fb..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-import collections
-import itertools
-import random
-import re
-import sys
-import iovec
-import threading
-import time
-import classqueue
-
-_outpath = "output"
-_interval = 0
-
-class QueueLogger(threading.Thread):
-    def __init__(self, queues, drops, accepts, outpath):
-        super(QueueLogger,self).__init__()
-        self.queues = queues
-        self.drops = drops
-        self.accepts = accepts
-        self.outpath = outpath
-        self.setDaemon(True)
-        self._event = threading.Event()
-        self._terminate = False
-    
-    def run(self):
-        if _interval > 0:
-            interval = _interval
-        else:
-            interval = 1
-        
-        t0 = time.time()
-        with open(self.outpath, "w") as outfile:
-            outfile.writelines((",".join(
-                ["time"]
-                + map("q%02dlen".__mod__, xrange(len(self.queues)))
-                + map("q%02ddrops".__mod__, xrange(len(self.queues)))
-                + map("q%02daccepts".__mod__, xrange(len(self.queues)))
-            ), "\n"))
-            
-            while not self._terminate:
-                self._event.wait(interval)
-                if self._terminate:
-                    break
-                
-                self._event.clear()
-                
-                queuelens = map(len,self.queues)
-                
-                queuedrops = list(self.drops)
-                self.drops[:] = [0] * len(self.drops)
-                
-                queueaccepts = list(self.accepts)
-                self.accepts[:] = [0] * len(self.accepts)
-                
-                outfile.writelines((",".join(
-                    [str(time.time()-t0)]
-                    + map(str, queuelens)
-                    + map(str, queuedrops)
-                    + map(str, queueaccepts)
-                ), "\n"))
-                outfile.flush()
-            
-    def terminate(self):
-        self._terminate = True
-        self.wakeup()
-    
-    def wakeup(self):
-        self._event.set()
-
-class LoggingClassQueue(classqueue.ClassQueue):
-    outpath_suffix = itertools.cycle(('_f','_b'))
-    
-    def __init__(self):
-        self.accepts = []
-        self.drops = []
-        super(LoggingClassQueue, self).__init__()
-        
-        # Prepare logger thread
-        self.logger = QueueLogger(self.queues, self.drops, self.accepts, _outpath+self.outpath_suffix.next())
-        self.logger.start()
-    
-    def __del__(self):
-        self.logger.terminate()
-
-    def clear(self):
-        super(LoggingClassQueue, self).clear()
-        self.accepts[:] = [0] * len(self.queues)
-        self.drops[:] = [0] * len(self.queues)
-    
-    def append(self, packet):
-        proto,qi,size = self.queuefor(packet)
-        dropped = super(LoggingClassQueue, self).append(packet)
-        
-        if dropped:
-            self.drops[qi] += 1
-        else:
-            self.accepts[qi] += 1
-        
-        if _interval == 0:
-            self.logger.wakeup()
-        
-        return dropped
-
-queueclass = LoggingClassQueue
-
-def init(outpath="output", interval=0, **kw):
-    global _outpath, _interval
-    _outpath = outpath
-    _interval = float(interval)
-    classqueue.init(**kw)
diff --git a/src/nepi/testbeds/planetlab/scripts/mcastfwd.py b/src/nepi/testbeds/planetlab/scripts/mcastfwd.py
deleted file mode 100644 (file)
index 77262ed..0000000
+++ /dev/null
@@ -1,504 +0,0 @@
-import sys
-
-import signal
-import socket
-import struct
-import optparse
-import threading
-import subprocess
-import re
-import time
-import collections
-import os
-import traceback
-import logging
-
-import ipaddr2
-
-usage = "usage: %prog [options] <enabled-addresses>"
-
-parser = optparse.OptionParser(usage=usage)
-
-parser.add_option(
-    "-d", "--poll-delay", dest="poll_delay", metavar="SECONDS", type="float",
-    default = 1.0,
-    help = "Multicast subscription polling interval")
-parser.add_option(
-    "-D", "--refresh-delay", dest="refresh_delay", metavar="SECONDS", type="float",
-    default = 30.0,
-    help = "Full-refresh interval - time between full IGMP reports")
-parser.add_option(
-    "-p", "--fwd-path", dest="fwd_path", metavar="PATH", 
-    default = "/var/run/mcastfwd",
-    help = "Path of the unix socket in which the program will listen for packets")
-parser.add_option(
-    "-r", "--router-path", dest="mrt_path", metavar="PATH", 
-    default = "/var/run/mcastrt",
-    help = "Path of the unix socket in which the program will listen for routing changes")
-parser.add_option(
-    "-A", "--announce-only", dest="announce_only", action="store_true",
-    default = False,
-    help = "If given, only group membership announcements will be made. "
-           "Useful for non-router non-member multicast nodes.")
-parser.add_option(
-    "-R", "--no-router", dest="no_router", action="store_true",
-    default = False,
-    help = "If given, only group membership announcements and forwarding to the default multicast egress will be made. "
-           "Useful for non-router but member multicast nodes.")
-parser.add_option(
-    "-v", "--verbose", dest="verbose", action="store_true",
-    default = False,
-    help = "Log more verbosely")
-
-(options, remaining_args) = parser.parse_args(sys.argv[1:])
-
-logging.basicConfig(
-    stream=sys.stderr, 
-    level=logging.DEBUG if options.verbose else logging.WARNING)
-
-ETH_P_ALL = 0x00000003
-ETH_P_IP = 0x00000800
-TUNSETIFF = 0x400454ca
-IFF_NO_PI = 0x00001000
-IFF_TAP = 0x00000002
-IFF_TUN = 0x00000001
-IFF_VNET_HDR = 0x00004000
-TUN_PKT_STRIP = 0x00000001
-IFHWADDRLEN = 0x00000006
-IFNAMSIZ = 0x00000010
-IFREQ_SZ = 0x00000028
-FIONREAD = 0x0000541b
-
-class IGMPThread(threading.Thread):
-    def __init__(self, vif_addr, *p, **kw):
-        super(IGMPThread, self).__init__(*p, **kw)
-        
-        vif_addr = vif_addr.strip()
-        self.vif_addr = vif_addr
-        self.igmp_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IGMP)
-        self.igmp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
-            socket.inet_aton(self.vif_addr) )
-        self.igmp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
-        self.igmp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
-        self._stop = False
-        self.setDaemon(True)
-        
-        # Find tun name
-        proc = subprocess.Popen(['ip','addr','show'],
-            stdout = subprocess.PIPE,
-            stderr = subprocess.STDOUT,
-            stdin = open('/dev/null','r+b') )
-        tun_name = None
-        heading = re.compile(r"\d+:\s*([-a-zA-Z0-9_]+):.*")
-        addr = re.compile(r"\s*inet\s*(\d{1,3}[.]\d{1,3}[.]\d{1,3}[.]\d{1,3}).*")
-        for line in proc.stdout:
-            match = heading.match(line)
-            if match:
-                tun_name = match.group(1)
-            else:
-                match = addr.match(line)
-                if match and match.group(1) == vif_addr:
-                    self.tun_name = tun_name
-                    break
-        else:
-            raise RuntimeError, "Could not find iterface for", vif_addr
-    
-    def run(self):
-        devnull = open('/dev/null','r+b')
-        maddr_re = re.compile(r"\s*inet\s*(\d{1,3}[.]\d{1,3}[.]\d{1,3}[.]\d{1,3})\s*")
-        cur_maddr = set()
-        lastfullrefresh = time.time()
-        vif_addr_i = socket.inet_aton(self.vif_addr)
-        while not self._stop:
-            mirror_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-            
-            for i in xrange(5):
-                # Get current subscriptions @ vif
-                proc = subprocess.Popen(['ip','maddr','show',self.tun_name],
-                    stdout = subprocess.PIPE,
-                    stderr = subprocess.STDOUT,
-                    stdin = devnull)
-                new_maddr = set()
-                for line in proc.stdout:
-                    match = maddr_re.match(line)
-                    if match:
-                        new_maddr.add(match.group(1))
-                proc.wait()
-                if new_maddr:
-                    break
-            
-            for i in xrange(5):
-                # Get current subscriptions @ eth0 (default on PL),
-                # they should be considered "universal" suscriptions.
-                proc = subprocess.Popen(['ip','maddr','show', 'eth0'],
-                    stdout = subprocess.PIPE,
-                    stderr = subprocess.STDOUT,
-                    stdin = devnull)
-                eth_maddr = set()
-                for line in proc.stdout:
-                    match = maddr_re.match(line)
-                    if match:
-                        eth_maddr.add(match.group(1))
-                proc.wait()
-                
-                if eth_maddr:
-                    for maddr in eth_maddr:
-                        try:
-                            mirror_socket.setsockopt(
-                                socket.IPPROTO_IP,
-                                socket.IP_ADD_MEMBERSHIP,
-                                socket.inet_aton(maddr)+vif_addr_i )
-                        except:
-                            traceback.print_exc(file=sys.stderr)
-                    new_maddr.update(eth_maddr)
-                    break
-            
-            # Every now and then, send a full report
-            now = time.time()
-            report_new = new_maddr
-            if (now - lastfullrefresh) <= options.refresh_delay:
-                report_new = report_new - cur_maddr
-            else:
-                lastfullrefresh = now
-            
-            # Report subscriptions
-            for grp in report_new:
-                print >>sys.stderr, "JOINING", grp
-                igmpp = ipaddr2.ipigmp(
-                    self.vif_addr, grp, 1, 0x16, 0, grp, 
-                    noipcksum=True)
-                try:
-                    self.igmp_socket.sendto(igmpp, 0, (grp,0))
-                except:
-                    traceback.print_exc(file=sys.stderr)
-
-            # Notify group leave
-            for grp in cur_maddr - new_maddr:
-                print >>sys.stderr, "LEAVING", grp
-                igmpp = ipaddr2.ipigmp(
-                    self.vif_addr, '224.0.0.2', 1, 0x17, 0, grp, 
-                    noipcksum=True)
-                try:
-                    self.igmp_socket.sendto(igmpp, 0, ('224.0.0.2',0))
-                except:
-                    traceback.print_exc(file=sys.stderr)
-
-            cur_maddr = new_maddr
-            
-            time.sleep(options.poll_delay)
-    
-    def stop(self):
-        self._stop = True
-        self.join(1+5*options.poll_delay)
-
-
-class FWDThread(threading.Thread):
-    def __init__(self, rt_cache, router_socket, vifs, *p, **kw):
-        super(FWDThread, self).__init__(*p, **kw)
-        
-        self.in_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-        self.in_socket.bind(options.fwd_path)
-        
-        self.pending = collections.deque()
-        self.maxpending = 1000
-        self.rt_cache = rt_cache
-        self.router_socket = router_socket
-        self.vifs = vifs
-        
-        # prepare forwarding sockets 
-        self.fwd_sockets = {}
-        for fwd_target in remaining_args:
-            fwd_target = socket.inet_aton(fwd_target)
-            fwd_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
-            fwd_socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
-            fwd_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, fwd_target)
-            fwd_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
-            self.fwd_sockets[fwd_target] = fwd_socket
-        
-        self._stop = False
-        self.setDaemon(True)
-    
-    def run(self):
-        in_socket = self.in_socket
-        rt_cache = self.rt_cache
-        vifs = self.vifs
-        router_socket = self.router_socket
-        len_ = len
-        ord_ = ord
-        str_ = str
-        pending = self.pending
-        in_socket.settimeout(options.poll_delay)
-        buffer_ = buffer
-        enumerate_ = enumerate
-        fwd_sockets = self.fwd_sockets
-        npending = 0
-        npendingpop = 0
-        noent = (None,None)
-        verbose = options.verbose
-        
-        while not self._stop:
-            # Get packet
-            try:
-                if pending and npending:
-                    packet = pending.pop()
-                    npending -= 1
-                    npendingpop += 1
-                    if npendingpop > 10:
-                        # Don't hurry too much, 
-                        # we'll saturate the kernel's queue
-                        time.sleep(0)
-                else:
-                    npendingpop = 0
-                    packet = in_socket.recv(2000)
-            except socket.timeout, e:
-                if pending and not npending:
-                    npending = len_(pending)
-                continue
-            if not packet or len_(packet) < 24:
-                continue
-            
-            fullpacket = packet
-            parent = packet[:4]
-            packet = buffer_(packet,4)
-            
-            if packet[9] == '\x02':
-                # IGMP packet? It's for mrouted
-                # unless it's coming from it
-                # NOTE: mrouted already picks it up when it's sent
-                #       to the virtual interface. Injecting it would
-                #       only duplicate it.
-                #if router_socket and packet[12:16] not in fwd_sockets:
-                #    try:
-                #        router_socket.send(packet)
-                #    except:
-                #        traceback.print_exc(file=sys.stderr)
-                continue
-            elif packet[9] == '\x00':
-                # LOOPING packet, discard
-                continue
-            
-            # To-Do: PIM asserts
-            
-            # Get route
-            addrinfo = packet[12:20]
-            fwd_targets, rparent = rt_cache.get(addrinfo, noent)
-            
-            if fwd_targets is not None and (rparent == '\x00\x00\x00\x00' or rparent == parent):
-                # Forward to vifs
-                ttl = ord_(packet[8])
-                tgt_group = (socket.inet_ntoa(addrinfo[4:]),0)
-                if verbose:
-                    print >>sys.stderr, map(socket.inet_ntoa, (parent, addrinfo[:4], addrinfo[4:])), "-> ttl", ttl,
-                nfwd_targets = len_(fwd_targets)
-                for vifi, vif in vifs.iteritems():
-                    if vifi < nfwd_targets:
-                        ttl_thresh = ord_(fwd_targets[vifi])
-                        if ttl_thresh > 0 and ttl > ttl_thresh:
-                            if vif[4] in fwd_sockets:
-                                try:
-                                    if verbose:
-                                        print >>sys.stderr, socket.inet_ntoa(vif[4]),
-                                    fwd_socket = fwd_sockets[vif[4]]
-                                    fwd_socket.sendto(packet, 0, tgt_group)
-                                except Exception,e:
-                                    print >>sys.stderr, "ERROR: forwarding packet:", str(e)
-                
-                if verbose:
-                    print >>sys.stderr, "."
-            elif router_socket:
-                # Mark pending
-                if len_(pending) < self.maxpending:
-                    if verbose:
-                        tgt_group = addrinfo[4:]
-                        print >>sys.stderr, map(socket.inet_ntoa, (parent, addrinfo[:4], addrinfo[4:])), "-> ?"
-                    
-                    pending.append(fullpacket)
-                    
-                    # Notify mrouted by forwarding it with protocol 0
-                    router_socket.send(''.join(
-                        (packet[:9],'\x00',packet[10:]) ))
-    
-    def stop(self):
-        self._stop = True
-        self.join(1+5*options.poll_delay)
-
-
-class RouterThread(threading.Thread):
-    def __init__(self, rt_cache, router_socket, vifs, *p, **kw):
-        super(RouterThread, self).__init__(*p, **kw)
-        
-        self.rt_cache = rt_cache
-        self.vifs = vifs
-        self.router_socket = router_socket
-
-        self._stop = False
-        self.setDaemon(True)
-    
-    def run(self):
-        rt_cache = self.rt_cache
-        vifs = self.vifs
-        addr_vifs = {}
-        router_socket = self.router_socket
-        router_socket.settimeout(options.poll_delay)
-        len_ = len
-        buffer_ = buffer
-        
-        buf = ""
-        
-        MRT_BASE       = 200
-        MRT_ADD_VIF    = MRT_BASE+2    # Add a virtual interface               
-        MRT_DEL_VIF    = MRT_BASE+3    # Delete a virtual interface            
-        MRT_ADD_MFC    = MRT_BASE+4    # Add a multicast forwarding entry      
-        MRT_DEL_MFC = MRT_BASE+5       # Delete a multicast forwarding entry   
-        
-        def cmdhdr(cmd, unpack=struct.unpack, buffer=buffer):
-            op,dlen = unpack('II', buffer(cmd,0,8))
-            cmd = buffer(cmd,8)
-            return op,dlen,cmd
-        def vifctl(data, unpack=struct.unpack):
-            #vifi, flags,threshold,rate_limit,lcl_addr,rmt_addr = unpack('HBBI4s4s', data)
-            return unpack('HBBI4s4s', data)
-        def mfcctl(data, unpack=struct.unpack):
-            #origin,mcastgrp,parent,ttls,pkt_cnt,byte_cnt,wrong_if,expire = unpack('4s4sH10sIIIi', data)
-            return unpack('4s4sH32sIIIi', data)
-        
-        
-        def add_vif(cmd):
-            vifi = vifctl(cmd)
-            vifs[vifi[0]] = vifi
-            addr_vifs[vifi[4]] = vifi[0]
-            print >>sys.stderr, "Added VIF", vifi
-        def del_vif(cmd):
-            vifi = vifctl(cmd)
-            vifi = vifs[vifi[0]]
-            del addr_vifs[vifi[4]]
-            del vifs[vifi[0]]
-            print >>sys.stderr, "Removed VIF", vifi
-        def add_mfc(cmd):
-            origin,mcastgrp,parent,ttls,pkt_cnt,byte_cnt,wrong_if,expire = mfcctl(data)
-            if parent in vifs:
-                parent_addr = vifs[parent][4]
-            else:
-                parent_addr = '\x00\x00\x00\x00'
-            addrinfo = origin + mcastgrp
-            rt_cache[addrinfo] = (ttls, parent_addr)
-            print >>sys.stderr, "Added RT", '-'.join(map(socket.inet_ntoa,(parent_addr,origin,mcastgrp))), map(ord,ttls)
-        def del_mfc(cmd):
-            origin,mcastgrp,parent,ttls,pkt_cnt,byte_cnt,wrong_if,expire = mfcctl(data)
-            if parent in vifs:
-                parent_addr = vifs[parent][4]
-            else:
-                parent_addr = '\x00\x00\x00\x00'
-            addrinfo = origin + mcastgrp
-            del rt_cache[addrinfo]
-            print >>sys.stderr, "Removed RT", '-'.join(map(socket.inet_ntoa,(parent_addr,origin,mcastgrp)))
-        
-        commands = {
-            MRT_ADD_VIF : add_vif,
-            MRT_DEL_VIF : del_vif,
-            MRT_ADD_MFC : add_mfc,
-            MRT_DEL_MFC : del_mfc,
-        }
-
-        while not self._stop:
-            if len_(buf) < 8 or len_(buf) < (cmdhdr(buf)[1]+8):
-                # Get cmd
-                try:
-                    cmd = router_socket.recv(2000)
-                except socket.timeout, e:
-                    continue
-                if not cmd:
-                    print >>sys.stderr, "PLRT CONNECTION BROKEN"
-                    TERMINATE.append(None)
-                    break
-            
-            if buf:
-                buf += cmd
-                cmd = buf
-            
-            if len_(cmd) < 8:
-                continue
-            
-            op,dlen,data = cmdhdr(cmd)
-            if len_(data) < dlen:
-                continue
-            
-            buf = buffer_(data, dlen)
-            data = buffer_(data, 0, dlen)
-            
-            print >>sys.stderr, "COMMAND", op, "DATA", dlen
-            
-            if op in commands:
-                try:
-                    commands[op](data)
-                except:
-                    traceback.print_exc(file=sys.stderr)
-            else:
-                print >>sys.stderr, "IGNORING UNKNOWN COMMAND", op
-    
-    def stop(self):
-        self._stop = True
-        self.join(1+5*options.poll_delay)
-
-
-
-igmp_threads = []
-valid_vifs = []
-for vif_addr in remaining_args:
-    try:
-        igmp_threads.append(IGMPThread(vif_addr))
-        valid_vifs.append(vif_addr)
-    except:
-        traceback.print_exc()
-        print >>sys.stderr, "WARNING: could not listen on interface", vif_addr
-
-remaining_args = valid_vifs
-
-rt_cache = {}
-vifs = {}
-
-TERMINATE = []
-TERMINATE = []
-def _finalize(sig,frame):
-    global TERMINATE
-    TERMINATE.append(None)
-signal.signal(signal.SIGTERM, _finalize)
-
-
-try:
-    if not options.announce_only and not options.no_router:
-        router_socket = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
-        router_socket.bind(options.mrt_path)
-        router_socket.listen(0)
-        router_remote_socket, router_remote_addr = router_socket.accept()
-        router_thread = RouterThread(rt_cache, router_remote_socket, vifs)
-    else:
-        router_remote_socket = None
-        router_thread = None
-
-    if not options.announce_only:
-        fwd_thread = FWDThread(rt_cache, router_remote_socket, vifs)
-
-    for thread in igmp_threads:
-        thread.start()
-    
-    if not options.announce_only:
-        fwd_thread.start()
-    if not options.no_router and not options.announce_only:
-        router_thread.start()
-
-    while not TERMINATE:
-        time.sleep(30)
-finally:
-    if os.path.exists(options.mrt_path):
-        try:
-            os.remove(options.mrt_path)
-        except:
-            pass
-    if os.path.exists(options.fwd_path):
-        try:
-            os.remove(options.fwd_path)    
-        except:
-            pass
-
-
diff --git a/src/nepi/testbeds/planetlab/scripts/mrouted-3.9.5-pl.patch b/src/nepi/testbeds/planetlab/scripts/mrouted-3.9.5-pl.patch
deleted file mode 100644 (file)
index 172864d..0000000
+++ /dev/null
@@ -1,555 +0,0 @@
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/config.c mrouted-3.9.5-pl/config.c
---- mrouted-3.9.5/config.c     2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/config.c  2011-08-29 16:32:43.710565000 +0200
-@@ -91,11 +91,41 @@
-       v->uv_subnetbcast = subnet | ~mask;
-       strncpy(v->uv_name, ifa->ifa_name, sizeof(v->uv_name));
--      if (flags & IFF_POINTOPOINT)
-+      if (flags & IFF_POINTOPOINT) {
-           v->uv_flags |= VIFF_REXMIT_PRUNES;
-+          v->uv_ptp_addr = ((struct sockaddr_in *)ifa->ifa_dstaddr)->sin_addr.s_addr;
-+      } 
-+      {   /* getifaddr doesn't give us the p2p link reliably */
-+          /* So use ip, which uses netlink, to query it */
-+          char buf[1024];
-+        size_t rode;
-+        FILE *peer;
-+        
-+        logit(LOG_INFO,0,"Getting ptp for %s", ifa->ifa_name);
-+        
-+        snprintf(buf,sizeof(buf),"ip addr show %s | grep -o 'peer [0-9.]*' | grep -o '[0-9.]*'", ifa->ifa_name);
-+        peer = popen(buf, "r");
-+        rode = fread(buf, 1, sizeof(buf), peer);
-+        pclose(peer);
-+        
-+        if (rode > 0) {
-+            /* It has a pointopoint address */
-+            struct in_addr ptp_in;
-+            
-+            for (--rode; rode && buf[rode] <= 13;)
-+                --rode;
-+            buf[++rode] = 0;
-+
-+            logit(LOG_INFO,0,"Got %s", buf);
-+
-+              if (inet_aton(buf, &ptp_in))
-+                  v->uv_ptp_addr = ptp_in.s_addr;
-+        } 
-+      }
--      logit(LOG_INFO,0,"installing %s (%s on subnet %s) as vif #%u - rate=%d",
-+      logit(LOG_INFO,0,"installing %s (%s on subnet %s%s%s) as vif #%u - rate=%d",
-           v->uv_name, inet_fmt(addr, s1, sizeof(s1)), inet_fmts(subnet, mask, s2, sizeof(s2)),
-+          (v->uv_ptp_addr) ? "peer " : "", (v->uv_ptp_addr) ? inet_fmt(v->uv_ptp_addr, s3, sizeof(s3)) : "",
-           numvifs, v->uv_rate_limit);
-       ++numvifs;
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/defs.h mrouted-3.9.5-pl/defs.h
---- mrouted-3.9.5/defs.h       2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/defs.h    2011-08-30 15:30:23.691662000 +0200
-@@ -9,6 +9,8 @@
- #ifndef __MROUTED_DEFS_H__
- #define __MROUTED_DEFS_H__
-+#define PLANETLAB 1
-+
- #include <stdio.h>
- #include <stdlib.h>
- #include <unistd.h>
-@@ -28,6 +30,7 @@
- #include <sys/time.h>
- #include <sys/uio.h>
- #include <net/if.h>
-+#include <arpa/inet.h>
- #include <netinet/in.h>
- #include <netinet/in_systm.h>
- #include <netinet/ip.h>
-@@ -61,7 +64,7 @@
- #include <libutil.h>
- #endif
- #endif
--#ifdef RSRR
-+#if defined(RSRR) || defined(PLANETLAB)
- #include <sys/un.h>
- #endif /* RSRR */
-@@ -137,6 +140,11 @@
- extern u_int32                dvmrp_group;
- extern u_int32                dvmrp_genid;
-+#ifdef PLANETLAB
-+extern int      plrt_socket;
-+extern char     *plrt_socket_path;
-+#endif
-+
- #define       IF_DEBUG(l)     if (debug && debug & (l))
- #define       DEBUG_PKT       0x0001
-@@ -353,6 +361,8 @@
- extern void           k_leave(u_int32, u_int32);
- extern void           k_init_dvmrp(void);
- extern void           k_stop_dvmrp(void);
-+extern void     k_init_plrt(void);
-+extern void     k_stop_plrt(void);
- extern void           k_add_vif(vifi_t, struct uvif *);
- extern void           k_del_vif(vifi_t, struct uvif *);
- extern void           k_add_rg(u_int32, struct gtable *);
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/igmp.c mrouted-3.9.5-pl/igmp.c
---- mrouted-3.9.5/igmp.c       2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/igmp.c    2011-08-29 17:03:24.187961000 +0200
-@@ -190,8 +190,10 @@
-     if (ip->ip_p == 0) {
-       if (src == 0 || dst == 0)
-           logit(LOG_WARNING, 0, "kernel request not accurate");
--      else
-+      else {
-+          logit(LOG_DEBUG, 0, "received kernel miss");
-           add_table_entry(src, dst);
-+    }
-       return;
-     }
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/kern.c mrouted-3.9.5-pl/kern.c
---- mrouted-3.9.5/kern.c       2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/kern.c    2011-08-31 15:09:21.457071000 +0200
-@@ -7,16 +7,26 @@
-  * Leland Stanford Junior University.
-  */
-+#include <paths.h>
- #include "defs.h"
- int curttl = 0;
-+#ifdef PLANETLAB
-+
-+int plrt_socket;
-+char *plrt_socket_path = _PATH_MROUTED_PLRT;
-+
-+#endif
-+
- /*
-  * Open/init the multicast routing in the kernel and sets the
-  * MRT_PIM (aka MRT_ASSERT) flag in the kernel.
-  */
- void k_init_dvmrp(void)
- {
-+#ifndef PLANETLAB
-+
- #ifdef OLD_KERNEL
-     if (setsockopt(igmp_socket, IPPROTO_IP, MRT_INIT, (char *)NULL, 0) < 0) {
- #else
-@@ -24,11 +34,34 @@
-     if (setsockopt(igmp_socket, IPPROTO_IP, MRT_INIT, (char *)&v, sizeof(int)) < 0) {
- #endif
-+
-       if (errno == EADDRINUSE)
-           logit(LOG_ERR, 0, "Another multicast routing application is already running.");
-       else
-           logit(LOG_ERR, errno, "Cannot enable multicast routing in kernel");
-     }
-+
-+#endif
-+}
-+
-+void k_init_plrt(void)
-+{
-+#ifdef PLANETLAB
-+    /* 
-+     * Just open a connection to the user-space forwarder
-+     */
-+
-+    if ((plrt_socket = socket(AF_UNIX, SOCK_SEQPACKET, 0)) < 0) 
-+      logit(LOG_ERR, errno, "PLRT socket");
-+      
-+      struct sockaddr_un sun;
-+      memset(&sun, 0, sizeof(sun));
-+      sun.sun_family = AF_UNIX;
-+      strncpy(sun.sun_path, plrt_socket_path, sizeof(sun.sun_path));
-+      
-+    if ((connect(plrt_socket, &sun, sizeof(sun))) < 0) 
-+      logit(LOG_ERR, errno, "PLRT socket connect");
-+#endif
- }
-@@ -38,8 +71,17 @@
-  */
- void k_stop_dvmrp(void)
- {
-+#ifndef PLANETLAB
-     if (setsockopt(igmp_socket, IPPROTO_IP, MRT_DONE, (char *)NULL, 0) < 0)
-       logit(LOG_WARNING, errno, "Cannot disable multicast routing in kernel");
-+#endif
-+}
-+
-+void k_stop_plrt(void)
-+{
-+#ifdef PLANETLAB
-+    close(plrt_socket);
-+#endif
- }
-@@ -194,11 +236,25 @@
-  */
- void k_add_vif(vifi_t vifi, struct uvif *v)
- {
--    struct vifctl vc;
--
--    vc.vifc_vifi = vifi;
--    uvif_to_vifctl(&vc, v);
--    if (setsockopt(igmp_socket, IPPROTO_IP, MRT_ADD_VIF, (char *)&vc, sizeof(vc)) < 0)
-+    /* 
-+     * PlanetLab does application-level forwarding
-+     */
-+    struct {
-+        u_int32 op;
-+        u_int32 len;
-+        struct vifctl vc;
-+    } op;
-+
-+    op.vc.vifc_vifi = vifi;
-+    uvif_to_vifctl(&op.vc, v);
-+
-+#ifdef PLANETLAB
-+    op.op = MRT_ADD_VIF;
-+    op.len = sizeof(op.vc);
-+    if (send(plrt_socket, &op, sizeof(op), 0) < 0)
-+#else
-+    if (setsockopt(igmp_socket, IPPROTO_IP, MRT_ADD_VIF, (char *)&op.vc, sizeof(op.vc)) < 0)
-+#endif
-       logit(LOG_ERR, errno, "setsockopt MRT_ADD_VIF on vif %d", vifi);
- }
-@@ -214,6 +270,20 @@
-      * we're going to delete.  *BSD systems on the other hand exepect only the index
-      * of that VIF.
-      */
-+#ifdef PLANETLAB
-+    struct {
-+        u_int32 op;
-+        u_int32 len;
-+        struct vifctl vc;
-+    } op;
-+
-+    op.vc.vifc_vifi = vifi;
-+    uvif_to_vifctl(&op.vc, v);
-+
-+    op.op = MRT_DEL_VIF;
-+    op.len = sizeof(op.vc);
-+    if (send(plrt_socket, &op, sizeof(op), 0) < 0)
-+#else
- #ifdef __linux__
-     struct vifctl vc;
-@@ -224,6 +294,7 @@
- #else /* *BSD et al. */
-     if (setsockopt(igmp_socket, IPPROTO_IP, MRT_DEL_VIF, (char *)&vifi, sizeof(vifi)) < 0)
- #endif /* !__linux__ */
-+#endif /* PLANETLAB */
-     {
-         if (errno == EADDRNOTAVAIL || errno == EINVAL)
-             return;
-@@ -238,31 +309,57 @@
-  */
- void k_add_rg(u_int32 origin, struct gtable *g)
- {
--    struct mfcctl mc;
-+    struct {
-+        u_int32 op;
-+        u_int32 len;
-+        struct mfcctl mc;
-+    } op;
-+    
-     vifi_t i;
- #ifdef DEBUG_MFC
-     md_log(MD_ADD, origin, g->gt_mcastgrp);
- #endif
-     /* copy table values so that setsockopt can process it */
--    mc.mfcc_origin.s_addr = origin;
-+    op.mc.mfcc_origin.s_addr = origin;
- #ifdef OLD_KERNEL
--    mc.mfcc_originmask.s_addr = 0xffffffff;
-+    op.mc.mfcc_originmask.s_addr = 0xffffffff;
- #endif
--    mc.mfcc_mcastgrp.s_addr = g->gt_mcastgrp;
--    mc.mfcc_parent = g->gt_route ? g->gt_route->rt_parent : NO_VIF;
-+    op.mc.mfcc_mcastgrp.s_addr = g->gt_mcastgrp;
-+    op.mc.mfcc_parent = g->gt_route ? g->gt_route->rt_parent : NO_VIF;
-     for (i = 0; i < numvifs; i++)
--      mc.mfcc_ttls[i] = g->gt_ttls[i];
-+      op.mc.mfcc_ttls[i] = g->gt_ttls[i];
-+
-+#ifdef PLANETLAB
-+
-+      logit(LOG_DEBUG, 0, "setsockopt MRT_ADD_MFC %s-%s %d[%d %d %d %d %d %d]", 
-+          inet_fmt(origin, s1, sizeof(s1)),
-+          inet_fmt(g->gt_mcastgrp, s2, sizeof(s2)),
-+          numvifs,
-+          op.mc.mfcc_ttls[0], op.mc.mfcc_ttls[1], op.mc.mfcc_ttls[2],
-+          op.mc.mfcc_ttls[3], op.mc.mfcc_ttls[4], op.mc.mfcc_ttls[5] );
-+
-+    /* Send to PlanetLab's user-space MRT daemon */
-+    op.op = MRT_ADD_MFC;
-+    op.len = sizeof(op.mc);
-+    if (send(plrt_socket, &op, sizeof(op), 0) < 0)
-+
-+#else /* here if not PLANETLAB */
-     /* write to kernel space */
-     if (setsockopt(igmp_socket, IPPROTO_IP, MRT_ADD_MFC,
--                 (char *)&mc, sizeof(mc)) < 0) {
-+                 (char *)&op.mc, sizeof(op.mc)) < 0) 
-+   
-+#endif /* PLANETLAB */
-+   
-+    {
- #ifdef DEBUG_MFC
-       md_log(MD_ADD_FAIL, origin, g->gt_mcastgrp);
- #endif
-       logit(LOG_WARNING, errno, "setsockopt MRT_ADD_MFC",
-               inet_fmt(origin, s1, sizeof(s1)), inet_fmt(g->gt_mcastgrp, s2, sizeof(s2)));
-     }
-+
- }
-@@ -271,20 +368,37 @@
-  */
- int k_del_rg(u_int32 origin, struct gtable *g)
- {
--    struct mfcctl mc;
-+    struct {
-+        u_int32 op;
-+        u_int32 len;
-+        struct mfcctl mc;
-+    } op;
- #ifdef DEBUG_MFC
-     md_log(MD_DEL, origin, g->gt_mcastgrp);
- #endif
-     /* copy table values so that setsockopt can process it */
--    mc.mfcc_origin.s_addr = origin;
-+    op.mc.mfcc_origin.s_addr = origin;
- #ifdef OLD_KERNEL
--    mc.mfcc_originmask.s_addr = 0xffffffff;
-+    op.mc.mfcc_originmask.s_addr = 0xffffffff;
- #endif
--    mc.mfcc_mcastgrp.s_addr = g->gt_mcastgrp;
-+    op.mc.mfcc_mcastgrp.s_addr = g->gt_mcastgrp;
-+
-+#ifdef PLANETLAB
-+
-+    /* Send to PlanetLab's user-space MRT daemon */
-+    op.op = MRT_DEL_MFC;
-+    op.len = sizeof(op.mc);
-+    if (send(plrt_socket, &op, sizeof(op), 0) < 0)
-+
-+#else /* here if not PLANETLAB */
-     /* write to kernel space */
--    if (setsockopt(igmp_socket, IPPROTO_IP, MRT_DEL_MFC, (char *)&mc, sizeof(mc)) < 0) {
-+    if (setsockopt(igmp_socket, IPPROTO_IP, MRT_DEL_MFC, (char *)&op.mc, sizeof(op.mc)) < 0) 
-+
-+#endif /* PLANETLAB */
-+    
-+    {
- #ifdef DEBUG_MFC
-       md_log(MD_DEL_FAIL, origin, g->gt_mcastgrp);
- #endif
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/main.c mrouted-3.9.5-pl/main.c
---- mrouted-3.9.5/main.c       2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/main.c    2011-08-30 15:31:01.281814000 +0200
-@@ -52,9 +52,9 @@
- time_t mrouted_init_time;
- #ifdef SNMP
--#define NHANDLERS     34
-+#define NHANDLERS     35
- #else
--#define NHANDLERS     2
-+#define NHANDLERS     3
- #endif
- static struct ihandler {
-@@ -206,6 +206,9 @@
-     fputs("  -h, --help           Show this help text\n", stderr);
-     fputs("  -p                   Disable pruning.  Deprecated, compatibility option\n", stderr);
-     fputs("  -r, --show-routes    Show state of VIFs and multicast routing tables\n", stderr);
-+#ifdef PLANETLAB
-+    fputs("  -F                   Path to the PlanetLab userland forwarder, default /var/run/mcastfwd\n", stderr);
-+#endif
-     fprintf(stderr, "  -v, --version        Show %s version\n", __progname);
-     fputs("\n", stderr);
-@@ -258,11 +261,23 @@
-     snprintf(versionstring, sizeof(versionstring), "mrouted version %s", todaysversion);
--    while ((ch = getopt_long(argc, argv, "c:d::fhpP::rv", long_options, NULL)) != EOF) {
-+#ifdef PLANETLAB
-+#define PLOPTIONS ":F"
-+#else
-+#define PLOPTIONS ""
-+#endif
-+
-+    while ((ch = getopt_long(argc, argv, "c:d::fhpP::rv" PLOPTIONS, long_options, NULL)) != EOF) {
-       switch (ch) {
-           case 'c':
-               configfilename = optarg;
-               break;
-+              
-+#ifdef PLANETLAB
-+              case 'F':
-+              plrt_socket_path = optarg;
-+              break;
-+#endif
-           case 'd':
-               if (!optarg)
-@@ -410,6 +425,8 @@
-     init_ipip();
-     init_routes();
-     init_ktable();
-+    k_init_plrt();
-+
- #ifndef OLD_KERNEL
-     /*
-      * Unfortunately, you can't k_get_version() unless you've
-@@ -422,6 +439,7 @@
-     k_init_dvmrp();
-     vers = k_get_version();
-     k_stop_dvmrp();
-+    
-     /*XXX
-      * This function must change whenever the kernel version changes
-      */
-@@ -466,6 +484,15 @@
-        logit(LOG_ERR, 0, "Descriptor too big");
-     FD_SET(igmp_socket, &readers);
-     nfds = igmp_socket + 1;
-+    
-+#ifdef PLANETLAB
-+    if (plrt_socket >= (int)FD_SETSIZE)
-+       logit(LOG_ERR, 0, "Descriptor too big");
-+    FD_SET(plrt_socket, &readers);
-+    if (plrt_socket >= nfds)
-+        nfds = plrt_socket + 1;
-+#endif
-+
-     for (i = 0; i < nhandlers; i++) {
-       if (ihandlers[i].fd >= (int)FD_SETSIZE)
-           logit(LOG_ERR, 0, "Descriptor too big");
-@@ -602,6 +629,17 @@
-               accept_igmp(recvlen);
-           }
-+#ifdef PLANETLAB
-+          if (FD_ISSET(plrt_socket, &rfds)) {
-+              recvlen = recvfrom(plrt_socket, recv_buf, RECV_BUF_SIZE, 0, NULL, &dummy);
-+              if (recvlen < 0) {
-+                  if (errno != EINTR) logit(LOG_ERR, errno, "recvfrom");
-+                  continue;
-+              }
-+              if (did_final_init) accept_igmp(recvlen);
-+          }
-+#endif
-+
-           for (i = 0; i < nhandlers; i++) {
-               if (FD_ISSET(ihandlers[i].fd, &rfds)) {
-                   (*ihandlers[i].func)(ihandlers[i].fd, &rfds);
-@@ -808,6 +846,7 @@
-       if (did_final_init)
-           k_stop_dvmrp();
-     }
-+    k_stop_plrt();
- }
- /*
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/Makefile mrouted-3.9.5-pl/Makefile
---- mrouted-3.9.5/Makefile     2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/Makefile  2011-08-10 17:18:22.233596000 +0200
-@@ -43,7 +43,7 @@
- ## Common
- CFLAGS        = $(MCAST_INCLUDE) $(SNMPDEF) $(RSRRDEF) $(INCLUDES) $(DEFS) $(USERCOMPILE)
--CFLAGS       += -O2 -W -Wall -Werror
-+CFLAGS       += -O2 -W -Wall
- #CFLAGS       += -O -g
- LDLIBS        = $(SNMPLIBDIR) $(SNMPLIBS) $(EXTRA_LIBS)
- LDFLAGS      += -Wl,-Map,$@.map
-Only in mrouted-3.9.5-pl: Makefile.bk
-Only in mrouted-3.9.5-pl: map-mbone
-Only in mrouted-3.9.5-pl: map-mbone.map
-Only in mrouted-3.9.5-pl: mrinfo
-Only in mrouted-3.9.5-pl: mrinfo.map
-Only in mrouted-3.9.5-pl: mrouted
-Only in mrouted-3.9.5-pl: mrouted.map
-Only in mrouted-3.9.5-pl: mtrace
-Only in mrouted-3.9.5-pl: mtrace.map
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/pathnames.h mrouted-3.9.5-pl/pathnames.h
---- mrouted-3.9.5/pathnames.h  2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/pathnames.h       2011-08-29 13:10:27.043684000 +0200
-@@ -14,5 +14,6 @@
- #define _PATH_MROUTED_GENID   _PATH_MROUTED_RUNDIR "/mrouted.genid"
- #define _PATH_MROUTED_DUMP    _PATH_MROUTED_RUNDIR "/mrouted.dump"
- #define _PATH_MROUTED_CACHE   _PATH_MROUTED_RUNDIR "/mrouted.cache"
-+#define _PATH_MROUTED_PLRT    _PATH_VARRUN "mcastrt"
- #endif /* __MROUTED_PATHNAMES_H__ */
-Only in mrouted-3.9.5-pl: vers.c
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/vif.c mrouted-3.9.5-pl/vif.c
---- mrouted-3.9.5/vif.c        2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/vif.c     2011-08-31 19:26:52.955453000 +0200
-@@ -139,6 +139,7 @@
-     v->uv_lcl_addr    = 0;
-     v->uv_rmt_addr    = 0;
-     v->uv_dst_addr    = t ? 0 : dvmrp_group;
-+    v->uv_ptp_addr  = 0;
-     v->uv_subnet      = 0;
-     v->uv_subnetmask  = 0;
-     v->uv_subnetbcast = 0;
-@@ -379,6 +380,8 @@
-           start_route_updates();
-           update_route(p->pa_subnet, p->pa_subnetmask, 0, 0, vifi, NULL);
-       }
-+      if (v->uv_ptp_addr)
-+          update_route(v->uv_ptp_addr, 0xffffffff, 0, 0, vifi, NULL);
-       /*
-        * Until neighbors are discovered, assume responsibility for sending
-@@ -526,6 +529,8 @@
-                   return(vifi);
-           }
-           else {
-+              if (src == v->uv_ptp_addr)
-+                  return(vifi);
-               if ((src & v->uv_subnetmask) == v->uv_subnet &&
-                   ((v->uv_subnetmask == 0xffffffff) ||
-                    (src != v->uv_subnetbcast)))
-@@ -1666,6 +1671,10 @@
-                       scaletime(now - v->uv_querier->al_ctime),
-                       scaletime(v->uv_querier->al_timer));
-       }
-+      if (0 != v->uv_ptp_addr) {
-+          fprintf(fp, "                     PtP remote: %-18s\n",
-+                                  inet_fmt(v->uv_ptp_addr, s1, sizeof(s1)));
-+      }
-       if (v->uv_flags & VIFF_BLASTER)
-           fprintf(fp, "                  blasterbuf size: %dk\n",
-                       v->uv_blasterlen / 1024);
-diff -ur --exclude='*.o' --exclude=cfparse.c --exclude='*~' mrouted-3.9.5/vif.h mrouted-3.9.5-pl/vif.h
---- mrouted-3.9.5/vif.h        2011-03-05 21:45:25.000000000 +0100
-+++ mrouted-3.9.5-pl/vif.h     2011-08-29 14:35:35.695829000 +0200
-@@ -109,6 +109,7 @@
-     u_int32        uv_lcl_addr;   /* local address of this vif            */
-     u_int32        uv_rmt_addr;   /* remote end-point addr (tunnels only) */
-     u_int32        uv_dst_addr;   /* destination for DVMRP/PIM messages   */
-+    u_int32        uv_ptp_addr;   /* remote peer address   (pointopoint only) */
-     u_int32        uv_subnet;     /* subnet number         (phyints only) */
-     u_int32        uv_subnetmask; /* subnet mask           (phyints only) */
-     u_int32        uv_subnetbcast;/* subnet broadcast addr (phyints only) */
diff --git a/src/nepi/testbeds/planetlab/scripts/plr50.c b/src/nepi/testbeds/planetlab/scripts/plr50.c
deleted file mode 100644 (file)
index b573c73..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-
-static int plr = 50;
-
-int init(const char* args)
-{
-    int seed;
-    int rv;
-    seed = 1234;
-    rv = sscanf(args, "plr=%d,seed=%d", &plr, &seed);
-    srand(seed);
-    return rv;
-}
-
-int accept_packet(const char* packet, int direction)
-{
-    return (direction != 0) || (rand() > (RAND_MAX/100*plr));
-}
-
diff --git a/src/nepi/testbeds/planetlab/scripts/plr50.py b/src/nepi/testbeds/planetlab/scripts/plr50.py
deleted file mode 100644 (file)
index b7dde17..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-import random
-
-_plr = 0.5
-
-random.seed(1234)
-
-def init(plr):
-    global _plr
-    _plr = float(plr) / 100.0
-
-def accept_packet(packet, direction, rng=random.random):
-    return direction or rng() > _plr
-
-
diff --git a/src/nepi/testbeds/planetlab/scripts/tosqueue.py b/src/nepi/testbeds/planetlab/scripts/tosqueue.py
deleted file mode 100644 (file)
index 1d6a61a..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-import collections
-import itertools
-import random
-
-_size = 1000
-
-class TOSQueue(object):
-    def __init__(self):
-        self.size = _size
-        self.queues = collections.defaultdict(collections.deque)
-        self.retries = collections.deque()
-        self.len = 0
-        
-        # Prepare collection order
-        self.order = [
-            (precedence << 5) | (thoughput << 3) | (reliability << 2)
-            for precedence in xrange(7,-1,-1) 
-            for thoughput in (0,1,1)
-            for reliability in (0,1)
-        ]
-        random.shuffle(self.order)
-        
-        self.cycle = None
-        self.cyclelen = None
-        self.cycle_update = True
-        self.classes = set()
-    
-    def __nonzero__(self):
-        return self.len > 0
-    
-    def __len__(self):
-        return self.len
-    
-    def clear(self):
-        self.classes.clear()
-        self.cycle = None
-        self.cyclelen = None
-        self.cycle_update = True
-        self.len = 0
-        self.queues.clear()
-        self.retries = collections.deque()
-    
-    def queuefor(self, packet, ord=ord, len=len, classmask=0xEC):
-        if len(packet) >= 2:
-            tos = ord(packet[1])
-            return (tos & classmask, tos & 0x10)
-        else:
-            return (0,0)
-    
-    def append(self, packet, len=len):
-        qi,urgent = self.queuefor(packet)
-        q = self.queues[qi]
-        if len(q) < _size:
-            classes = self.classes
-            if qi not in classes:
-                classes.add(qi)
-                self.cycle_update = True
-            if urgent:
-                q.appendleft(packet)
-            else:
-                q.append(packet)
-            self.len += 1
-
-    def appendleft(self, packet):
-        self.retries.append(packet)
-        self.len += 1
-    
-    def pop(self, xrange=xrange, len=len, iter=iter, pop=collections.deque.pop):
-        return self.popleft(pop=pop)
-    
-    def popleft(self, xrange=xrange, len=len, iter=iter, pop=collections.deque.popleft):
-        if self.retries:
-            rv = pop(self.retries)
-            self.len -= 1
-            return rv
-        
-        queues = self.queues
-        classes = self.classes
-        
-        if len(classes)==1:
-            # shortcut for non-tos traffic
-            rv = pop(queues[iter(classes).next()])
-            self.len -= 1
-            return rv
-        
-        if self.cycle_update:
-            cycle = filter(classes.__contains__, self.order)
-            self.cycle = itertools.cycle(cycle)
-            self.cyclelen = len(cycle)
-            self.cycle_update = False
-
-        cycle = self.cycle.next
-        for i in xrange(self.cyclelen):
-            qi = cycle()
-            if qi in classes:
-                q = queues[qi]
-                if q:
-                    rv = pop(q)
-                    self.len -= 1
-                    return rv
-                else:
-                    # Needs to update the cycle
-                    classes.remove(qi)
-                    self.cycle_update = True
-        else:
-            raise IndexError, "pop from an empty queue"
-
-queueclass = TOSQueue
-
-def init(size):
-    global _size
-    _size = size
-
diff --git a/src/nepi/testbeds/planetlab/scripts/tun_connect.py b/src/nepi/testbeds/planetlab/scripts/tun_connect.py
deleted file mode 100644 (file)
index 0812d7c..0000000
+++ /dev/null
@@ -1,978 +0,0 @@
-import sys
-
-import socket
-import fcntl
-import os
-import os.path
-import select
-import signal
-
-import struct
-import ctypes
-import optparse
-import threading
-import subprocess
-import re
-import functools
-import time
-import base64
-import traceback
-from Queue import Queue
-
-import tunchannel
-
-try:
-    import iovec
-    HAS_IOVEC = True
-except:
-    HAS_IOVEC = False
-
-tun_name = 'tun0'
-tun_path = '/dev/net/tun'
-hostaddr = socket.gethostbyname(socket.gethostname())
-
-usage = "usage: %prog [options]"
-
-parser = optparse.OptionParser(usage=usage)
-
-parser.add_option(
-    "-i", "--iface", dest="tun_name", metavar="DEVICE",
-    default = "tun0",
-    help = "TUN/TAP interface to tap into")
-parser.add_option(
-    "-d", "--tun-path", dest="tun_path", metavar="PATH",
-    default = "/dev/net/tun",
-    help = "TUN/TAP device file path or file descriptor number")
-parser.add_option(
-    "-p", "--peer-port", dest="peer_port", metavar="PEER_PORT", type="int",
-    default = 15000,
-    help = "Remote TCP/UDP port to connect to.")
-parser.add_option(
-    "--pass-fd", dest="pass_fd", metavar="UNIX_SOCKET",
-    default = None,
-    help = "Path to a unix-domain socket to pass the TUN file descriptor to. "
-           "If given, all other connectivity options are ignored, tun_connect will "
-           "simply wait to be killed after passing the file descriptor, and it will be "
-           "the receiver's responsability to handle the tunneling.")
-parser.add_option(
-    "-m", "--mode", dest="mode", metavar="MODE",
-    default = "none",
-    help = 
-        "Set mode. One of none, tun, tap, pl-tun, pl-tap, pl-gre-ip, pl-gre-eth. In any mode except none, a TUN/TAP will be created "
-        "by using the proper interface (tunctl for tun/tap, /vsys/fd_tuntap.control for pl-tun/pl-tap), "
-        "and it will be brought up (with ifconfig for tun/tap, with /vsys/vif_up for pl-tun/pl-tap). You have "
-        "to specify an VIF_ADDRESS and VIF_MASK in any case (except for none).")
-parser.add_option(
-    "-t", "--protocol", dest="protocol", metavar="PROTOCOL",
-    default = None,
-    help = 
-        "Set protocol. One of tcp, udp, fd, gre. In any mode except none, a TUN/TAP will be created.")
-parser.add_option(
-    "-A", "--vif-address", dest="vif_addr", metavar="VIF_ADDRESS",
-    default = None,
-    help = 
-        "See mode. This specifies the VIF_ADDRESS, "
-        "the IP address of the virtual interface.")
-parser.add_option(
-    "-M", "--vif-mask", dest="vif_mask", type="int", metavar="VIF_MASK", 
-    default = None,
-    help = 
-        "See mode. This specifies the VIF_MASK, "
-        "a number indicating the network type (ie: 24 for a C-class network).")
-parser.add_option(
-    "-P", "--port", dest="port", type="int", metavar="PORT", 
-    default = None,
-    help = 
-        "This specifies the LOCAL_PORT. This will be the local bind port for UDP/TCP.")
-parser.add_option(
-    "-S", "--vif-snat", dest="vif_snat", 
-    action = "store_true",
-    default = False,
-    help = "See mode. This specifies whether SNAT will be enabled for the virtual interface. " )
-parser.add_option(
-    "-Z", "--vif-pointopoint", dest="vif_pointopoint",  metavar="DST_ADDR",
-    default = None,
-    help = 
-        "See mode. This specifies the remote endpoint's virtual address, "
-        "for point-to-point routing configuration. "
-        "Not supported by PlanetLab" )
-parser.add_option(
-    "-Q", "--vif-txqueuelen", dest="vif_txqueuelen", metavar="SIZE", type="int",
-    default = None,
-    help = 
-        "See mode. This specifies the interface's transmission queue length. " )
-parser.add_option(
-    "-b", "--bwlimit", dest="bwlimit", metavar="BYTESPERSECOND", type="int",
-    default = None,
-    help = 
-        "This specifies the interface's emulated bandwidth in bytes per second." )
-parser.add_option(
-    "-a", "--peer-address", dest="peer_addr", metavar="PEER_ADDRESS",
-    default = None,
-    help = 
-        "This specifies the PEER_ADDRESS, "
-        "the IP address of the remote interface.")
-parser.add_option(
-    "-k", "--key", dest="cipher_key", metavar="KEY",
-    default = None,
-    help = 
-        "Specify a symmetric encryption key with which to protect packets across "
-        "the tunnel. python-crypto must be installed on the system." )
-parser.add_option(
-    "-K", "--gre-key", dest="gre_key", metavar="KEY", type="string",
-    default = "true",
-    help = 
-        "Specify a demultiplexing 32-bit numeric key for GRE." )
-parser.add_option(
-    "-C", "--cipher", dest="cipher", metavar="CIPHER",
-    default = 'AES',
-    help = "One of PLAIN, AES, Blowfish, DES, DES3. " )
-parser.add_option(
-    "-N", "--no-capture", dest="no_capture", 
-    action = "store_true",
-    default = False,
-    help = "If specified, packets won't be logged to standard output "
-           "(default is to log them to standard output). " )
-parser.add_option(
-    "-c", "--pcap-capture", dest="pcap_capture", metavar="FILE",
-    default = None,
-    help = "If specified, packets won't be logged to standard output, "
-           "but dumped to a pcap-formatted trace in the specified file. " )
-parser.add_option(
-    "--multicast-forwarder", dest="multicast_fwd", 
-    default = None,
-    help = "If specified, multicast packets will be forwarded to "
-           "the specified unix-domain socket. If the device uses ethernet "
-           "frames, ethernet headers will be stripped and IP packets "
-           "will be forwarded, prefixed with the interface's address." )
-parser.add_option(
-    "--filter", dest="filter_module", metavar="PATH",
-    default = None,
-    help = "If specified, it should be either a .py or .so module. "
-           "It will be loaded, and all incoming and outgoing packets "
-           "will be routed through it. The filter will not be responsible "
-           "for buffering, packet queueing is performed in tun_connect "
-           "already, so it should not concern itself with it. It should "
-           "not, however, block in one direction if the other is congested.\n"
-           "\n"
-           "Modules are expected to have the following methods:\n"
-           "\tinit(**args)\n"
-           "\t\tIf arguments are given, this method will be called with the\n"
-           "\t\tgiven arguments (as keyword args in python modules, or a single\n"
-           "\t\tstring in c modules).\n"
-           "\taccept_packet(packet, direction):\n"
-           "\t\tDecide whether to drop the packet. Direction is 0 for packets "
-               "coming from the local side to the remote, and 1 is for packets "
-               "coming from the remote side to the local. Return a boolean, "
-               "true if the packet is not to be dropped.\n"
-           "\tfilter_init():\n"
-           "\t\tInitializes a filtering pipe (filter_run). It should "
-               "return two file descriptors to use as a bidirectional "
-               "pipe: local and remote. 'local' is where packets from the "
-               "local side will be written to. After filtering, those packets "
-               "should be written to 'remote', where tun_connect will read "
-               "from, and it will forward them to the remote peer. "
-               "Packets from the remote peer will be written to 'remote', "
-               "where the filter is expected to read from, and eventually "
-               "forward them to the local side. If the file descriptors are "
-               "not nonblocking, they will be set to nonblocking. So it's "
-               "better to set them from the start like that.\n"
-           "\tfilter_run(local, remote):\n"
-           "\t\tIf filter_init is provided, it will be called repeatedly, "
-               "in a separate thread until the process is killed. It should "
-               "sleep at most for a second.\n"
-           "\tfilter_close(local, remote):\n"
-           "\t\tCalled then the process is killed, if filter_init was provided. "
-               "It should, among other things, close the file descriptors.\n"
-           "\n"
-           "Python modules are expected to return a tuple in filter_init, "
-           "either of file descriptors or file objects, while native ones "
-           "will receive two int*.\n"
-           "\n"
-           "Python modules can additionally contain a custom queue class "
-           "that will replace the FIFO used by default. The class should "
-           "be named 'queueclass' and contain an interface compatible with "
-           "collections.deque. That is, indexing (especiall for q[0]), "
-           "bool(q), popleft, appendleft, pop (right), append (right), "
-           "len(q) and clear. When using a custom queue, queue size will "
-           "have no effect, pass an effective queue size to the module "
-           "by using filter_args" )
-parser.add_option(
-    "--filter-args", dest="filter_args", metavar="FILE",
-    default = None,
-    help = "If specified, packets won't be logged to standard output, "
-           "but dumped to a pcap-formatted trace in the specified file. " )
-
-(options,args) = parser.parse_args(sys.argv[1:])
-
-options.cipher = {
-    'aes' : 'AES',
-    'des' : 'DES',
-    'des3' : 'DES3',
-    'blowfish' : 'Blowfish',
-    'plain' : None,
-}[options.cipher.lower()]
-
-ETH_P_ALL = 0x00000003
-ETH_P_IP = 0x00000800
-TUNSETIFF = 0x400454ca
-IFF_NO_PI = 0x00001000
-IFF_TAP = 0x00000002
-IFF_TUN = 0x00000001
-IFF_VNET_HDR = 0x00004000
-TUN_PKT_STRIP = 0x00000001
-IFHWADDRLEN = 0x00000006
-IFNAMSIZ = 0x00000010
-IFREQ_SZ = 0x00000028
-FIONREAD = 0x0000541b
-
-class HostLock(object):
-    # This class is used as a lock to prevent concurrency issues with more
-    # than one instance of netns running in the same machine. Both in 
-    # different processes or different threads.
-    taken = False
-    processcond = threading.Condition()
-    
-    def __init__(self, lockfile):
-        processcond = self.__class__.processcond
-        
-        processcond.acquire()
-        try:
-            # It's not reentrant
-            while self.__class__.taken:
-                processcond.wait()
-            self.__class__.taken = True
-        finally:
-            processcond.release()
-        
-        self.lockfile = lockfile
-        
-        while True:
-            try:
-                fcntl.flock(self.lockfile, fcntl.LOCK_EX)
-                break
-            except (OSError, IOError), e:
-                if e.args[0] != os.errno.EINTR:
-                    raise
-    
-    def __del__(self):
-        processcond = self.__class__.processcond
-        
-        processcond.acquire()
-        try:
-            if not self.lockfile.closed:
-                fcntl.flock(self.lockfile, fcntl.LOCK_UN)
-            
-            # It's not reentrant
-            self.__class__.taken = False
-            processcond.notify()
-        finally:
-            processcond.release()
-
-def ifnam(x):
-    return x+'\x00'*(IFNAMSIZ-len(x))
-
-def ifreq(iface, flags):
-    # ifreq contains:
-    #   char[IFNAMSIZ] : interface name
-    #   short : flags
-    #   <padding>
-    ifreq = ifnam(iface)+struct.pack("H",flags);
-    ifreq += '\x00' * (len(ifreq)-IFREQ_SZ)
-    return ifreq
-
-def tunopen(tun_path, tun_name):
-    if tun_path.isdigit():
-        # open TUN fd
-        print >>sys.stderr, "Using tun:", tun_name, "fd", tun_path
-        tun = os.fdopen(int(tun_path), 'r+b', 0)
-    else:
-        # open TUN path
-        print >>sys.stderr, "Using tun:", tun_name, "at", tun_path
-        tun = open(tun_path, 'r+b', 0)
-
-        # bind file descriptor to the interface
-        fcntl.ioctl(tun.fileno(), TUNSETIFF, ifreq(tun_name, IFF_NO_PI|IFF_TUN))
-    
-    return tun
-
-def tunclose(tun_path, tun_name, tun):
-    if tun_path and tun_path.isdigit():
-        # close TUN fd
-        os.close(int(tun_path))
-        tun.close()
-    elif tun:
-        # close TUN object
-        tun.close()
-
-def noopen(tun_path, tun_name):
-    print >>sys.stderr, "Using tun:", tun_name
-    return None
-def noclose(tun_path, tun_name, tun):
-    pass
-
-def tuntap_alloc(kind, tun_path, tun_name):
-    args = ["tunctl"]
-    if kind == "tun":
-        args.append("-n")
-    if tun_name:
-        args.append("-t")
-        args.append(tun_name)
-    proc = subprocess.Popen(args, stdout=subprocess.PIPE)
-    out,err = proc.communicate()
-    if proc.wait():
-        raise RuntimeError, "Could not allocate %s device" % (kind,)
-        
-    match = re.search(r"Set '(?P<dev>(?:tun|tap)[0-9]*)' persistent and owned by .*", out, re.I)
-    if not match:
-        raise RuntimeError, "Could not allocate %s device - tunctl said: %s" % (kind, out)
-    
-    tun_name = match.group("dev")
-    print >>sys.stderr, "Allocated %s device: %s" % (kind, tun_name)
-    
-    return tun_path, tun_name
-
-def tuntap_dealloc(tun_path, tun_name):
-    args = ["tunctl", "-d", tun_name]
-    proc = subprocess.Popen(args, stdout=subprocess.PIPE)
-    out,err = proc.communicate()
-    if proc.wait():
-        print >> sys.stderr, "WARNING: error deallocating %s device" % (tun_name,)
-
-def nmask_to_dot_notation(mask):
-    mask = hex(((1 << mask) - 1) << (32 - mask)) # 24 -> 0xFFFFFF00
-    mask = mask[2:] # strip 0x
-    mask = mask.decode("hex") # to bytes
-    mask = '.'.join(map(str,map(ord,mask))) # to 255.255.255.0
-    return mask
-
-def vif_start(tun_path, tun_name):
-    args = ["ifconfig", tun_name, options.vif_addr, 
-            "netmask", nmask_to_dot_notation(options.vif_mask),
-            "-arp" ]
-    if options.vif_pointopoint:
-        args.extend(["pointopoint",options.vif_pointopoint])
-    if options.vif_txqueuelen is not None:
-        args.extend(["txqueuelen",str(options.vif_txqueuelen)])
-    args.append("up")
-    proc = subprocess.Popen(args, stdout=subprocess.PIPE)
-    out,err = proc.communicate()
-    if proc.wait():
-        raise RuntimeError, "Error starting virtual interface"
-    
-    if options.vif_snat:
-        # set up SNAT using iptables
-        # TODO: stop vif on error. 
-        #   Not so necessary since deallocating the tun/tap device
-        #   will forcibly stop it, but it would be tidier
-        args = [ "iptables", "-t", "nat", "-A", "POSTROUTING", 
-                 "-s", "%s/%d" % (options.vif_addr, options.vif_mask),
-                 "-j", "SNAT",
-                 "--to-source", hostaddr, "--random" ]
-        proc = subprocess.Popen(args, stdout=subprocess.PIPE)
-        out,err = proc.communicate()
-        if proc.wait():
-            raise RuntimeError, "Error setting up SNAT"
-
-def vif_stop(tun_path, tun_name):
-    if options.vif_snat:
-        # set up SNAT using iptables
-        args = [ "iptables", "-t", "nat", "-D", "POSTROUTING", 
-                 "-s", "%s/%d" % (options.vif_addr, options.vif_mask),
-                 "-j", "SNAT",
-                 "--to-source", hostaddr, "--random" ]
-        proc = subprocess.Popen(args, stdout=subprocess.PIPE)
-        out,err = proc.communicate()
-    
-    args = ["ifconfig", tun_name, "down"]
-    proc = subprocess.Popen(args, stdout=subprocess.PIPE)
-    out,err = proc.communicate()
-    if proc.wait():
-        print >>sys.stderr, "WARNING: error stopping virtual interface"
-    
-    
-def pl_tuntap_alloc(kind, tun_path, tun_name):
-    tunalloc_so = ctypes.cdll.LoadLibrary("./tunalloc.so")
-    c_tun_name = ctypes.c_char_p("\x00"*IFNAMSIZ) # the string will be mutated!
-    kind = {"tun":IFF_TUN,
-            "tap":IFF_TAP}[kind]
-    fd = tunalloc_so.tun_alloc(kind, c_tun_name)
-    name = c_tun_name.value
-    return str(fd), name
-
-_name_reservation = None
-def pl_tuntap_namealloc(kind, tun_path, tun_name):
-    global _name_reservation
-    # Serialize access
-    lockfile = open("/tmp/nepi-tun-connect.lock", "a")
-    lock = HostLock(lockfile)
-    
-    # We need to do this, fd_tuntap is the only one who can
-    # tell us our slice id (this script runs as root, so no uid),
-    # and the pattern of device names accepted by vsys scripts
-    tunalloc_so = ctypes.cdll.LoadLibrary("./tunalloc.so")
-    c_tun_name = ctypes.c_char_p("\x00"*IFNAMSIZ) # the string will be mutated!
-    nkind= {"tun":IFF_TUN,
-            "tap":IFF_TAP}[kind]
-    fd = tunalloc_so.tun_alloc(nkind, c_tun_name)
-    name = c_tun_name.value
-    os.close(fd)
-
-    base = name[:name.index('-')+1]
-    existing = set(map(str.strip,os.popen("ip a | grep -o '%s[0-9]*'" % (base,)).read().strip().split('\n')))
-    
-    for i in xrange(9000,10000):
-        name = base + str(i)
-        if name not in existing:
-            break
-    else:
-        raise RuntimeError, "Could not assign interface name"
-    
-    _name_reservation = lock
-    
-    return None, name
-
-def pl_vif_start(tun_path, tun_name):
-    global _name_reservation
-
-    out = []
-    def outreader():
-        out.append(stdout.read())
-        stdout.close()
-        time.sleep(1)
-
-    # Serialize access to vsys
-    lockfile = open("/tmp/nepi-tun-connect.lock", "a")
-    lock = _name_reservation or HostLock(lockfile)
-    _name_reservation = None
-    
-    stdin = open("/vsys/vif_up.in","w")
-    stdout = open("/vsys/vif_up.out","r")
-
-    t = threading.Thread(target=outreader)
-    t.start()
-    
-    stdin.write(tun_name+"\n")
-    stdin.write(options.vif_addr+"\n")
-    stdin.write(str(options.vif_mask)+"\n")
-    if options.vif_snat:
-        stdin.write("snat=1\n")
-    if options.vif_pointopoint:
-        stdin.write("pointopoint=%s\n" % (options.vif_pointopoint,))
-    if options.vif_txqueuelen is not None:
-        stdin.write("txqueuelen=%d\n" % (options.vif_txqueuelen,))
-    if options.mode.startswith('pl-gre'):
-        stdin.write("gre=%s\n" % (options.gre_key,))
-        stdin.write("remote=%s\n" % (options.peer_addr,))
-    stdin.close()
-    
-    t.join()
-    out = ''.join(out)
-    if out.strip():
-        print >>sys.stderr, out
-    
-    del lock, lockfile
-
-def pl_vif_stop(tun_path, tun_name):
-    out = []
-    def outreader():
-        out.append(stdout.read())
-        stdout.close()
-        
-        if options.mode.startswith('pl-gre'):
-            lim = 120
-        else:
-            lim = 2
-        
-        for i in xrange(lim):
-            ifaces = set(map(str.strip,os.popen("ip a | grep -o '%s'" % (tun_name,)).read().strip().split('\n')))
-            if tun_name in ifaces:
-                time.sleep(1)
-            else:
-                break
-
-    # Serialize access to vsys
-    lockfile = open("/tmp/nepi-tun-connect.lock", "a")
-    lock = HostLock(lockfile)
-
-    stdin = open("/vsys/vif_down.in","w")
-    stdout = open("/vsys/vif_down.out","r")
-    
-    t = threading.Thread(target=outreader)
-    t.start()
-    
-    stdin.write(tun_name+"\n")
-    stdin.close()
-    
-    t.join()
-    out = ''.join(out)
-    if out.strip():
-        print >>sys.stderr, out
-    
-    del lock, lockfile
-
-
-def tun_fwd(tun, remote, reconnect = None, accept_local = None, accept_remote = None, slowlocal = True, bwlimit = None):
-    global TERMINATE
-    global SUSPEND
-    
-    tunqueue = options.vif_txqueuelen or 1000
-    tunkqueue = 500
-    
-    # in PL mode, we cannot strip PI structs
-    # so we'll have to handle them
-    tunchannel.tun_fwd(tun, remote,
-        with_pi = options.mode.startswith('pl-'),
-        ether_mode = tun_name.startswith('tap'),
-        cipher_key = options.cipher_key,
-        udp = options.protocol == 'udp',
-        TERMINATE = TERMINATE,
-        SUSPEND = SUSPEND,
-        stderr = None,
-        reconnect = reconnect,
-        tunqueue = tunqueue,
-        tunkqueue = tunkqueue,
-        cipher = options.cipher,
-        accept_local = accept_local,
-        accept_remote = accept_remote,
-        queueclass = queueclass,
-        slowlocal = slowlocal,
-        bwlimit = bwlimit
-    )
-
-
-
-nop = lambda tun_path, tun_name : (tun_path, tun_name)
-MODEINFO = {
-    'none' : dict(alloc=nop,
-                  tunopen=tunopen, tunclose=tunclose,
-                  dealloc=nop,
-                  start=nop,
-                  stop=nop),
-    'tun'  : dict(alloc=functools.partial(tuntap_alloc, "tun"),
-                  tunopen=tunopen, tunclose=tunclose,
-                  dealloc=tuntap_dealloc,
-                  start=vif_start,
-                  stop=vif_stop),
-    'tap'  : dict(alloc=functools.partial(tuntap_alloc, "tap"),
-                  tunopen=tunopen, tunclose=tunclose,
-                  dealloc=tuntap_dealloc,
-                  start=vif_start,
-                  stop=vif_stop),
-    'pl-tun'  : dict(alloc=functools.partial(pl_tuntap_alloc, "tun"),
-                  tunopen=tunopen, tunclose=tunclose,
-                  dealloc=nop,
-                  start=pl_vif_start,
-                  stop=pl_vif_stop),
-    'pl-tap'  : dict(alloc=functools.partial(pl_tuntap_alloc, "tap"),
-                  tunopen=tunopen, tunclose=tunclose,
-                  dealloc=nop,
-                  start=pl_vif_start,
-                  stop=pl_vif_stop),
-    'pl-gre-ip' : dict(alloc=functools.partial(pl_tuntap_namealloc, "tun"),
-                  tunopen=noopen, tunclose=tunclose,
-                  dealloc=nop,
-                  start=pl_vif_start,
-                  stop=pl_vif_stop),
-    'pl-gre-eth': dict(alloc=functools.partial(pl_tuntap_namealloc, "tap"),
-                  tunopen=noopen, tunclose=noclose,
-                  dealloc=nop,
-                  start=pl_vif_start,
-                  stop=pl_vif_stop),
-}
-    
-tun_path = options.tun_path
-tun_name = options.tun_name
-
-modeinfo = MODEINFO[options.mode]
-
-# Try to load filter module
-filter_thread = None
-if options.filter_module:
-    print >>sys.stderr, "Loading module", options.filter_module, "with args", options.filter_args
-    if options.filter_module.endswith('.py'):
-        sys.path.append(os.path.dirname(options.filter_module))
-        filter_module = __import__(os.path.basename(options.filter_module).rsplit('.',1)[0])
-        if options.filter_args:
-            try:
-                filter_args = dict(map(lambda x:x.split('=',1),options.filter_args.split(',')))
-                filter_module.init(**filter_args)
-            except:
-                traceback.print_exc()
-    elif options.filter_module.endswith('.so'):
-        filter_module = ctypes.cdll.LoadLibrary(options.filter_module)
-        if options.filter_args:
-            try:
-                filter_module.init(options.filter_args)
-            except:
-                traceback.print_exc()
-    try:
-        accept_packet = filter_module.accept_packet
-        print >>sys.stderr, "Installing packet filter (accept_packet)"
-    except:
-        accept_packet = None
-    
-    try:
-        queueclass = filter_module.queueclass
-        print >>sys.stderr, "Installing custom queue"
-    except:
-        queueclass = None
-    
-    try:
-        _filter_init = filter_module.filter_init
-        filter_run = filter_module.filter_run
-        filter_close = filter_module.filter_close
-        
-        def filter_init():
-            filter_local = ctypes.c_int(0)
-            filter_remote = ctypes.c_int(0)
-            _filter_init(filter_local, filter_remote)
-            return filter_local, filter_remote
-
-        print >>sys.stderr, "Installing packet filter (stream filter)"
-    except:
-        filter_init = None
-        filter_run = None
-        filter_close = None
-else:
-    accept_packet = None
-    filter_init = None
-    filter_run = None
-    filter_close = None
-    queueclass = None
-
-# install multicast forwarding hook
-if options.multicast_fwd:
-    print >>sys.stderr, "Connecting to mcast filter"
-    mcfwd_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-    #disable nonblocking, cannot handle EWOULDBLOCK
-    #tunchannel.nonblock(mcfwd_sock.fileno())
-    mcfwd_sock.settimeout(0.5) # 500ms tops - packet lost if it blocks more than that
-
-# be careful to roll back stuff on exceptions
-tun_path, tun_name = modeinfo['alloc'](tun_path, tun_name)
-try:
-    modeinfo['start'](tun_path, tun_name)
-    try:
-        tun = modeinfo['tunopen'](tun_path, tun_name)
-    except:
-        modeinfo['stop'](tun_path, tun_name)
-        raise
-except:
-    modeinfo['dealloc'](tun_path, tun_name)
-    raise
-
-
-# Trak SIGTERM, and set global termination flag instead of dying
-TERMINATE = []
-def _finalize(sig,frame):
-    global TERMINATE
-    TERMINATE.append(None)
-signal.signal(signal.SIGTERM, _finalize)
-
-# SIGUSR1 suspends forwading, SIGUSR2 resumes forwarding
-SUSPEND = []
-def _suspend(sig,frame):
-    global SUSPEND
-    if not SUSPEND:
-        SUSPEND.append(None)
-signal.signal(signal.SIGUSR1, _suspend)
-
-def _resume(sig,frame):
-    global SUSPEND
-    if SUSPEND:
-        SUSPEND.remove(None)
-signal.signal(signal.SIGUSR2, _resume)
-
-try:
-    tcpdump = None
-    reconnect = None
-    mcfwd_thread = None
-
-    # install multicast forwarding hook
-    if options.multicast_fwd:
-        print >>sys.stderr, "Installing mcast filter"
-        
-        if HAS_IOVEC:
-            writev = iovec.writev
-        else:
-            os_write = os.write
-            map_ = map
-            str_ = str
-            def writev(fileno, *stuff):
-                os_write(''.join(map_(str_,stuff)))
-        
-        
-        mcfwd_queue = Queue(options.vif_txqueuelen or 500)
-        def mcfwd_thread_fn(
-                sock=mcfwd_sock, 
-                sockno=mcfwd_sock.fileno(),
-                multicast_fwd = options.multicast_fwd,
-                vif_addr = socket.inet_aton(options.vif_addr),
-                writev=writev,
-                retrycodes=(os.errno.EWOULDBLOCK, os.errno.EAGAIN, os.errno.EINTR),
-                len=len, ord=ord):
-            TERMINATE_ = TERMINATE
-            connected = False
-            
-            while not TERMINATE_:
-                try:
-                    fwd = mcfwd_queue.get(True, 1)
-                except:
-                    continue
-                
-                # Forward it
-                if not connected:
-                    try:
-                        sock.connect(multicast_fwd)
-                        connected = True
-                    except:
-                        traceback.print_exc(file=sys.stderr)
-                if connected:
-                    try:
-                        writev(sockno, vif_addr,fwd)
-                    except OSError,e:
-                        if e.errno not in retrycodes:
-                            traceback.print_exc(file=sys.stderr)
-                        else:
-                            try:
-                                writev(sockno, vif_addr,fwd)
-                            except:
-                                traceback.print_exc(file=sys.stderr)
-                    except socket.timeout:
-                        # packet lost
-                        continue
-                    except:
-                        traceback.print_exc(file=sys.stderr)
-                
-                mcfwd_queue.task_done()
-        mcfwd_thread = threading.Thread(target=mcfwd_thread_fn)
-        mcfwd_thread.start()
-        
-        def accept_packet(packet, direction, 
-                _up_accept=accept_packet, 
-                etherProto=tunchannel.etherProto,
-                etherStrip=tunchannel.etherStrip,
-                etherMode=tun_name.startswith('tap'),
-                len=len, ord=ord):
-            if _up_accept:
-                rv = _up_accept(packet, direction)
-                if not rv:
-                    return rv
-
-            if direction == 1:
-                # Incoming... what?
-                if etherMode:
-                    if etherProto(packet)=='\x08\x00':
-                        fwd = etherStrip(packet)
-                    else:
-                        fwd = None
-                else:
-                    fwd = packet
-                if fwd is not None and len(fwd) >= 20:
-                    if (ord(fwd[16]) & 0xf0) == 0xe0:
-                        # Queue for forwarding
-                        try:
-                            mcfwd_queue.put_nowait(fwd)
-                        except:
-                            print >>sys.stderr, "Multicast packet dropped, forwarder queue full"
-            return 1
-
-    
-    if options.protocol == 'fd':
-        if accept_packet or filter_init:
-            raise NotImplementedError, "--pass-fd and --filter are not compatible"
-        
-        if options.pass_fd.startswith("base64:"):
-            options.pass_fd = base64.b64decode(
-                options.pass_fd[len("base64:"):])
-            options.pass_fd = os.path.expandvars(options.pass_fd)
-        
-        print >>sys.stderr, "Sending FD to: %r" % (options.pass_fd,)
-        
-        # send FD to whoever wants it
-        import passfd
-        
-        sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-        retrydelay = 1.0
-        for i in xrange(30):
-            if TERMINATE:
-                raise OSError, "Killed"
-            try:
-                sock.connect(options.pass_fd)
-                break
-            except socket.error:
-                # wait a while, retry
-                print >>sys.stderr, "%s: Could not connect. Retrying in a sec..." % (time.strftime('%c'),)
-                time.sleep(min(30.0,retrydelay))
-                retrydelay *= 1.1
-        else:
-            sock.connect(options.pass_fd)
-        passfd.sendfd(sock, tun.fileno(), '0')
-        
-        # just wait forever
-        def tun_fwd(tun, remote, **kw):
-            global TERMINATE
-            TERM = TERMINATE
-            while not TERM:
-                time.sleep(1)
-        remote = None
-    elif options.protocol == "gre":
-        if accept_packet or filter_init:
-            raise NotImplementedError, "--mode %s and --filter are not compatible" % (options.mode,)
-        
-        # just wait forever
-        def tun_fwd(tun, remote, **kw):
-            global TERMINATE
-            TERM = TERMINATE
-            while not TERM:
-                time.sleep(1)
-        remote = options.peer_addr
-    elif options.protocol == "udp":
-        # connect to remote endpoint
-        if options.peer_addr and options.peer_port:
-            rsock = tunchannel.udp_establish(TERMINATE, hostaddr, options.port, 
-                    options.peer_addr, options.peer_port)
-            remote = os.fdopen(rsock.fileno(), 'r+b', 0)
-        else:
-            print >>sys.stderr, "Error: need a remote endpoint in UDP mode"
-            raise AssertionError, "Error: need a remote endpoint in UDP mode"
-    elif options.protocol == "tcp":
-        # connect to remote endpoint
-        if options.peer_addr and options.peer_port:
-            rsock = tunchannel.tcp_establish(TERMINATE, hostaddr, options.port,
-                    options.peer_addr, options.peer_port)
-            remote = os.fdopen(rsock.fileno(), 'r+b', 0)
-        else:
-            print >>sys.stderr, "Error: need a remote endpoint in TCP mode"
-            raise AssertionError, "Error: need a remote endpoint in TCP mode"
-    else:
-        msg = "Error: Invalid protocol %s" % options.protocol
-        print >>sys.stderr, msg 
-        raise AssertionError, msg
-
-    if filter_init:
-        filter_local, filter_remote = filter_init()
-        
-        def filter_loop():
-            global TERMINATE
-            TERM = TERMINATE
-            run = filter_run
-            local = filter_local
-            remote = filter_remote
-            while not TERM:
-                run(local, remote)
-            filter_close(local, remote)
-            
-        filter_thread = threading.Thread(target=filter_loop)
-        filter_thread.start()
-    
-    print >>sys.stderr, "Connected"
-
-    if not options.no_capture:
-        # Launch a tcpdump subprocess, to capture and dump packets.
-        # Make sure to catch sigterm and kill the tcpdump as well
-        tcpdump = subprocess.Popen(
-            ["tcpdump","-l","-n","-i",tun_name, "-s", "4096"]
-            + ["-w",options.pcap_capture,"-U"] * bool(options.pcap_capture) )
-    
-    # Try to give us high priority
-    try:
-        os.nice(-20)
-    except:
-        # Ignore errors, we might not have enough privileges,
-        # or perhaps there is no os.nice support in the system
-        pass
-    
-    if not filter_init:
-        tun_fwd(tun, remote,
-            reconnect = reconnect,
-            accept_local = accept_packet,
-            accept_remote = accept_packet,
-            bwlimit = options.bwlimit,
-            slowlocal = True)
-    else:
-        # Hm...
-        # ...ok, we need to:
-        #  1. Forward packets from tun to filter
-        #  2. Forward packets from remote to filter
-        #
-        # 1. needs TUN rate-limiting, while 
-        # 2. needs reconnection
-        #
-        # 1. needs ONLY TUN-side acceptance checks, while
-        # 2. needs ONLY remote-side acceptance checks
-        if isinstance(filter_local, ctypes.c_int):
-            filter_local_fd = filter_local.value
-        else:
-            filter_local_fd = filter_local
-        if isinstance(filter_remote, ctypes.c_int):
-            filter_remote_fd = filter_remote.value
-        else:
-            filter_remote_fd = filter_remote
-
-        def localside():
-            tun_fwd(tun, filter_local_fd,
-                accept_local = accept_packet,
-                slowlocal = True)
-        
-        def remoteside():
-            tun_fwd(filter_remote_fd, remote,
-                reconnect = reconnect,
-                accept_remote = accept_packet,
-                bwlimit = options.bwlimit,
-                slowlocal = False)
-        
-        localthread = threading.Thread(target=localside)
-        remotethread = threading.Thread(target=remoteside)
-        localthread.start()
-        remotethread.start()
-        localthread.join()
-        remotethread.join()
-
-finally:
-    try:
-        print >>sys.stderr, "Shutting down..."
-    except:
-        # In case sys.stderr is broken
-        pass
-    
-    # tidy shutdown in every case - swallow exceptions
-    TERMINATE.append(None)
-    
-    if mcfwd_thread:
-        try:
-            mcfwd_thread.join()
-        except:
-            pass
-
-    if filter_thread:
-        try:
-            filter_thread.join()
-        except:
-            pass
-
-    try:
-        if tcpdump:
-            os.kill(tcpdump.pid, signal.SIGTERM)
-            tcpdump.wait()
-    except:
-        pass
-
-    try:
-        modeinfo['stop'](tun_path, tun_name)
-    except:
-        traceback.print_exc()
-
-    try:
-        modeinfo['tunclose'](tun_path, tun_name, tun)
-    except:
-        traceback.print_exc()
-        
-    try:
-        modeinfo['dealloc'](tun_path, tun_name)
-    except:
-        traceback.print_exc()
-    
-    print >>sys.stderr, "TERMINATED GRACEFULLY"
-
diff --git a/src/nepi/testbeds/planetlab/scripts/tunalloc.c b/src/nepi/testbeds/planetlab/scripts/tunalloc.c
deleted file mode 100644 (file)
index 9101392..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/* Slice-side code to allocate tuntap interface in root slice
- * Based on bmsocket.c
- *  Thom Haddow - 08/10/09
- *
- * Call tun_alloc() with IFFTUN or IFFTAP as an argument to get back fd to
- * new tuntap interface. Interface name can be acquired via TUNGETIFF ioctl.
- */
-
-#include <sys/un.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/socket.h>
-#include <sys/fcntl.h>
-#include <sys/ioctl.h>
-#include <linux/ioctl.h>
-#include <linux/if.h>
-#include <linux/if_tun.h>
-
-#define VSYS_TUNTAP "/vsys/fd_tuntap.control"
-
-/* Reads vif FD from "fd", writes interface name to vif_name, and returns vif FD.
- * vif_name should be IFNAMSIZ chars long. */
-int receive_vif_fd(int fd, char *vif_name)
-{
-       struct msghdr msg;
-       struct iovec iov;
-       int rv;
-       size_t ccmsg[CMSG_SPACE(sizeof(int)) / sizeof(size_t)];
-       struct cmsghdr *cmsg;
-
-    /* Use IOV to read interface name */
-       iov.iov_base = vif_name;
-       iov.iov_len = IFNAMSIZ;
-
-       msg.msg_name = 0;
-       msg.msg_namelen = 0;
-       msg.msg_iov = &iov;
-       msg.msg_iovlen = 1;
-       /* old BSD implementations should use msg_accrights instead of
-        * msg_control; the interface is different. */
-       msg.msg_control = ccmsg;
-       msg.msg_controllen = sizeof(ccmsg);
-
-       while(((rv = recvmsg(fd, &msg, 0)) == -1) && errno == EINTR);
-       if (rv == -1) {
-               perror("recvmsg");
-               return -1;
-       }
-       if(!rv) {
-               /* EOF */
-               return -1;
-       }
-
-       cmsg = CMSG_FIRSTHDR(&msg);
-       if (!cmsg->cmsg_type == SCM_RIGHTS) {
-               fprintf(stderr, "got control message of unknown type %d\n",
-                       cmsg->cmsg_type);
-               return -1;
-       }
-       return *(int*)CMSG_DATA(cmsg);
-}
-
-
-int tun_alloc(int iftype, char *if_name)
-{
-    int control_fd;
-    struct sockaddr_un addr;
-    int remotefd;
-
-    control_fd = socket(AF_UNIX, SOCK_STREAM, 0);
-    if (control_fd == -1) {
-        perror("Could not create UNIX socket\n");
-        exit(-1);
-    }
-
-    memset(&addr, 0, sizeof(struct sockaddr_un));
-    /* Clear structure */
-    addr.sun_family = AF_UNIX;
-    strncpy(addr.sun_path, VSYS_TUNTAP,
-            sizeof(addr.sun_path) - 1);
-
-    if (connect(control_fd, (struct sockaddr *) &addr,
-                sizeof(struct sockaddr_un)) == -1) {
-        perror("Could not connect to Vsys control socket");
-        exit(-1);
-    }
-
-    /* passing type param */
-    if (send(control_fd, &iftype, sizeof(iftype), 0) != sizeof(iftype)) {
-        perror("Could not send paramater to Vsys control socket");
-        exit(-1);
-    }
-
-    remotefd = receive_vif_fd(control_fd, if_name);
-    
-    /* set "safe" (non-breaking) queueing mode IFF_ONE_QUEUE */
-    struct ifreq ifr;
-    if (0 == fcntl(remotefd, TUNGETIFF, &ifr)) {
-        ifr.ifr_flags |= IFF_ONE_QUEUE;
-        fcntl(remotefd, TUNSETIFF, &ifr);
-    }
-    
-    return remotefd;
-}
diff --git a/src/nepi/testbeds/planetlab/tunproto.py b/src/nepi/testbeds/planetlab/tunproto.py
deleted file mode 100644 (file)
index 3fd1a7f..0000000
+++ /dev/null
@@ -1,720 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import weakref
-import os
-import os.path
-import rspawn
-import subprocess
-import threading
-import base64
-import time
-import re
-import sys
-import logging
-
-from nepi.util import server
-
-class TunProtoBase(object):
-    def __init__(self, local, peer, home_path, key):
-        # Weak references, since ifaces do have a reference to the
-        # tunneling protocol implementation - we don't want strong
-        # circular references.
-        self.peer = weakref.ref(peer)
-        self.local = weakref.ref(local)
-        
-        self.port = 15000
-        self.mode = 'pl-tun'
-        self.key = key
-        self.cross_slice = False
-        
-        self.home_path = home_path
-       
-        self._started = False
-
-        self._pid = None
-        self._ppid = None
-        self._if_name = None
-
-        self._pointopoint = None
-        self._netprefix = None
-        self._address = None
-
-        # Logging
-        self._logger = logging.getLogger('nepi.testbeds.planetlab')
-    
-    def __str__(self):
-        local = self.local()
-        if local:
-            return '<%s for %s>' % (self.__class__.__name__, local)
-        else:
-            return super(TunProtoBase,self).__str__()
-
-    def _make_home(self):
-        local = self.local()
-        
-        if not local:
-            raise RuntimeError, "Lost reference to peering interfaces before launching"
-        if not local.node:
-            raise RuntimeError, "Unconnected TUN - missing node"
-        
-        # Make sure all the paths are created where 
-        # they have to be created for deployment
-        # Also remove pidfile, if there is one.
-        # Old pidfiles from previous runs can be troublesome.
-        cmd = "mkdir -p %(home)s ; rm -f %(home)s/pid %(home)s/*.so" % {
-            'home' : server.shell_escape(self.home_path)
-        }
-        (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-            cmd,
-            host = local.node.hostname,
-            port = None,
-            user = local.node.slicename,
-            agent = None,
-            ident_key = local.node.ident_path,
-            server_key = local.node.server_key,
-            timeout = 60,
-            retry = 3
-            )
-        
-        if proc.wait():
-            raise RuntimeError, "Failed to set up TUN forwarder: %s %s" % (out,err,)
-    
-    def _install_scripts(self):
-        local = self.local()
-        
-        if not local:
-            raise RuntimeError, "Lost reference to peering interfaces before launching"
-        if not local.node:
-            raise RuntimeError, "Unconnected TUN - missing node"
-        
-        # Install the tun_connect script and tunalloc utility
-        from nepi.util import tunchannel
-        from nepi.util import ipaddr2
-        sources = [
-            os.path.join(os.path.dirname(__file__), 'scripts', 'tun_connect.py'),
-            os.path.join(os.path.dirname(__file__), 'scripts', 'tunalloc.c'),
-            re.sub(r"([.]py)[co]$", r'\1', tunchannel.__file__, 1), # pyc/o files are version-specific
-            re.sub(r"([.]py)[co]$", r'\1', ipaddr2.__file__, 1), # pyc/o files are version-specific
-        ]
-        if local.filter_module:
-            filter_sources = filter(bool,map(str.strip,local.filter_module.module.split()))
-            filter_module = filter_sources[0]
-            
-            # Translate paths to builtin sources
-            for i,source in enumerate(filter_sources):
-                if not os.path.exists(source):
-                    # Um... try the builtin folder
-                    source = os.path.join(os.path.dirname(__file__), "scripts", source)
-                    if os.path.exists(source):
-                        # Yep... replace
-                        filter_sources[i] = source
-
-            sources.extend(set(filter_sources))
-                
-        else:
-            filter_module = None
-            filter_sources = None
-        dest = "%s@%s:%s" % (
-            local.node.slicename, local.node.hostname, 
-            os.path.join(self.home_path,'.'),)
-        (out,err),proc = server.eintr_retry(server.popen_scp)(
-            sources,
-            dest,
-            ident_key = local.node.ident_path,
-            server_key = local.node.server_key
-            )
-    
-        if proc.wait():
-            raise RuntimeError, "Failed upload TUN connect script %r: %s %s" % (sources, out,err,)
-        
-        # Make sure all dependencies are satisfied
-        local.node.wait_dependencies()
-
-        cmd = ( (
-            "cd %(home)s && "
-            "gcc -fPIC -shared tunalloc.c -o tunalloc.so && "
-            
-            "wget -q -c -O python-iovec-src.tar.gz %(iovec_url)s && "
-            "mkdir -p python-iovec && "
-            "cd python-iovec && "
-            "tar xzf ../python-iovec-src.tar.gz --strip-components=1 && "
-            "python setup.py build && "
-            "python setup.py install --install-lib .. && "
-            "cd .. "
-            
-            + ( " && "
-                "gcc -fPIC -shared %(sources)s -o %(module)s.so " % {
-                   'module' : os.path.basename(filter_module).rsplit('.',1)[0],
-                   'sources' : ' '.join(map(os.path.basename,filter_sources))
-                }
-                
-                if filter_module is not None and filter_module.endswith('.c')
-                else ""
-            )
-            
-            + ( " && "
-                "wget -q -c -O python-passfd-src.tar.gz %(passfd_url)s && "
-                "mkdir -p python-passfd && "
-                "cd python-passfd && "
-                "tar xzf ../python-passfd-src.tar.gz --strip-components=1 && "
-                "python setup.py build && "
-                "python setup.py install --install-lib .. "
-                
-                if local.tun_proto == "fd" 
-                else ""
-            ) 
-          )
-        % {
-            'home' : server.shell_escape(self.home_path),
-            'passfd_url' : "http://nepi.pl.sophia.inria.fr/code/python-passfd/archive/tip.tar.gz",
-            'iovec_url' : "http://nepi.pl.sophia.inria.fr/code/python-iovec/archive/tip.tar.gz",
-        } )
-        (out,err),proc = server.popen_ssh_command(
-            cmd,
-            host = local.node.hostname,
-            port = None,
-            user = local.node.slicename,
-            agent = None,
-            ident_key = local.node.ident_path,
-            server_key = local.node.server_key,
-            timeout = 300
-            )
-        
-        if proc.wait():
-            raise RuntimeError, "Failed to set up TUN forwarder: %s %s" % (out,err,)
-        
-    def launch(self, check_proto):
-        peer = self.peer()
-        local = self.local()
-        
-        if not peer or not local:
-            raise RuntimeError, "Lost reference to peering interfaces before launching"
-        
-        peer_port = peer.tun_port
-        peer_addr = peer.tun_addr
-        peer_proto = peer.tun_proto
-        peer_cipher = peer.tun_cipher
-        
-        local_port = self.port
-        local_cap  = local.capture
-        self._address = local_addr = local.address
-        self._netprefix = local_mask = local.netprefix
-        local_snat = local.snat
-        local_txq  = local.txqueuelen
-        self._pointopoint = local_p2p  = local.pointopoint
-        local_cipher=local.tun_cipher
-        local_mcast= local.multicast
-        local_bwlim= local.bwlimit
-        local_mcastfwd = local.multicast_forwarder
-        
-        if not local_p2p and hasattr(peer, 'address'):
-            self._pointopoint = local_p2p = peer.address
-
-        if check_proto != peer_proto:
-            raise RuntimeError, "Peering protocol mismatch: %s != %s" % (check_proto, peer_proto)
-        
-        if local_cipher != peer_cipher:
-            raise RuntimeError, "Peering cipher mismatch: %s != %s" % (local_cipher, peer_cipher)
-        
-        if check_proto == 'gre' and local_cipher.lower() != 'plain':
-            raise RuntimeError, "Misconfigured TUN: %s - GRE tunnels do not support encryption. Got %s, you MUST use PLAIN" % (local, local_cipher,)
-
-        if local.filter_module:
-            if check_proto not in ('udp', 'tcp'):
-                raise RuntimeError, "Miscofnigured TUN: %s - filtered tunnels only work with udp or tcp links" % (local,)
-            filter_module = filter(bool,map(str.strip,local.filter_module.module.split()))
-            filter_module = os.path.join('.',os.path.basename(filter_module[0]))
-            if filter_module.endswith('.c'):
-                filter_module = filter_module.rsplit('.',1)[0] + '.so'
-            filter_args = local.filter_module.args
-        else:
-            filter_module = None
-            filter_args = None
-        
-        args = ["python", "tun_connect.py", 
-            "-m", str(self.mode),
-            "-t", str(check_proto),
-            "-A", str(local_addr),
-            "-M", str(local_mask),
-            "-C", str(local_cipher),
-            ]
-        
-        if check_proto == 'fd':
-            passfd_arg = str(peer_addr)
-            if passfd_arg.startswith('\x00'):
-                # cannot shell_encode null characters :(
-                passfd_arg = "base64:"+base64.b64encode(passfd_arg)
-            else:
-                passfd_arg = '$HOME/'+server.shell_escape(passfd_arg)
-            args.extend([
-                "--pass-fd", passfd_arg
-            ])
-        elif check_proto == 'gre':
-            if self.cross_slice:
-                args.extend([
-                    "-K", str(self.key.strip('='))
-                ])
-
-            args.extend([
-                "-a", str(peer_addr),
-            ])
-        # both udp and tcp
-        else:
-            args.extend([
-                "-P", str(local_port),
-                "-p", str(peer_port),
-                "-a", str(peer_addr),
-                "-k", str(self.key)
-            ])
-        
-        if local_snat:
-            args.append("-S")
-        if local_p2p:
-            args.extend(("-Z",str(local_p2p)))
-        if local_txq:
-            args.extend(("-Q",str(local_txq)))
-        if not local_cap:
-            args.append("-N")
-        elif local_cap == 'pcap':
-            args.extend(('-c','pcap'))
-        if local_bwlim:
-            args.extend(("-b",str(local_bwlim*1024)))
-        if filter_module:
-            args.extend(("--filter", filter_module))
-        if filter_args:
-            args.extend(("--filter-args", filter_args))
-        if local_mcast and local_mcastfwd:
-            args.extend(("--multicast-forwarder", local_mcastfwd))
-
-        self._logger.info("Starting %s", self)
-        
-        self._make_home()
-        self._install_scripts()
-
-        # Start process in a "daemonized" way, using nohup and heavy
-        # stdin/out redirection to avoid connection issues
-        (out,err),proc = rspawn.remote_spawn(
-            " ".join(args),
-            
-            pidfile = './pid',
-            home = self.home_path,
-            stdin = '/dev/null',
-            stdout = 'capture',
-            stderr = rspawn.STDOUT,
-            sudo = True,
-            
-            host = local.node.hostname,
-            port = None,
-            user = local.node.slicename,
-            agent = None,
-            ident_key = local.node.ident_path,
-            server_key = local.node.server_key
-            )
-        
-        if proc.wait():
-            raise RuntimeError, "Failed to set up TUN: %s %s" % (out,err,)
-       
-        self._started = True
-    
-    def recover(self):
-        # Tunnel should be still running in its node
-        # Just check its pidfile and we're done
-        self._started = True
-        self.checkpid()
-    
-    def wait(self):
-        local = self.local()
-        
-        # Wait for the connection to be established
-        retrytime = 2.0
-        for spin in xrange(30):
-            if self.status() != rspawn.RUNNING:
-                self._logger.warn("FAILED TO CONNECT! %s", self)
-                break
-            
-            # Connected?
-            (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                "cd %(home)s ; grep -a -c Connected capture" % dict(
-                    home = server.shell_escape(self.home_path)),
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key,
-                timeout = 60,
-                err_on_timeout = False
-                )
-            proc.wait()
-
-            if out.strip() == '1':
-                break
-
-            # At least listening?
-            (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                "cd %(home)s ; grep -a -c Listening capture" % dict(
-                    home = server.shell_escape(self.home_path)),
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key,
-                timeout = 60,
-                err_on_timeout = False
-                )
-            proc.wait()
-
-            time.sleep(min(30.0, retrytime))
-            retrytime *= 1.1
-        else:
-            (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                "cat %(home)s/capture" % dict(
-                    home = server.shell_escape(self.home_path)),
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key,
-                timeout = 60,
-                retry = 3,
-                err_on_timeout = False
-                )
-            proc.wait()
-
-            raise RuntimeError, "FAILED TO CONNECT %s: %s%s" % (self,out,err)
-    
-    @property
-    def if_name(self):
-        if not self._if_name:
-            # Inspect the trace to check the assigned iface
-            local = self.local()
-            if local:
-                cmd = "cd %(home)s ; grep -a 'Using tun:' capture | head -1" % dict(
-                            home = server.shell_escape(self.home_path))
-                for spin in xrange(30):
-                    (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                        cmd,
-                        host = local.node.hostname,
-                        port = None,
-                        user = local.node.slicename,
-                        agent = None,
-                        ident_key = local.node.ident_path,
-                        server_key = local.node.server_key,
-                        timeout = 60,
-                        err_on_timeout = False
-                        )
-                    
-                    if proc.wait():
-                        self._logger.debug("if_name: failed cmd %s", cmd)
-                        time.sleep(1)
-                        continue
-                    
-                    out = out.strip()
-                    
-                    match = re.match(r"Using +tun: +([-a-zA-Z0-9]*).*",out)
-                    if match:
-                        self._if_name = match.group(1)
-                        break
-                    elif out:
-                        self._logger.debug("if_name: %r does not match expected pattern from cmd %s", out, cmd)
-                    else:
-                        self._logger.debug("if_name: empty output from cmd %s", cmd)
-                    time.sleep(3)
-                else:
-                    self._logger.warn("if_name: Could not get interface name")
-        return self._if_name
-    
-    def if_alive(self):
-        name = self.if_name
-        if name:
-            local = self.local()
-            for i in xrange(30):
-                (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                    "ip link show %s >/dev/null 2>&1 && echo ALIVE || echo DEAD" % (name,),
-                    host = local.node.hostname,
-                    port = None,
-                    user = local.node.slicename,
-                    agent = None,
-                    ident_key = local.node.ident_path,
-                    server_key = local.node.server_key,
-                    timeout = 60,
-                    err_on_timeout = False
-                    )
-                
-                if proc.wait():
-                    time.sleep(1)
-                    continue
-                
-                if out.strip() == 'DEAD':
-                    return False
-                elif out.strip() == 'ALIVE':
-                    return True
-        return False
-    
-    def checkpid(self):            
-        local = self.local()
-        
-        if not local:
-            raise RuntimeError, "Lost reference to local interface"
-        
-        # Get PID/PPID
-        # NOTE: wait a bit for the pidfile to be created
-        if self._started and not self._pid or not self._ppid:
-            pidtuple = rspawn.remote_check_pid(
-                os.path.join(self.home_path,'pid'),
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key
-                )
-            
-            if pidtuple:
-                self._pid, self._ppid = pidtuple
-    
-    def status(self):
-        local = self.local()
-        
-        if not local:
-            raise RuntimeError, "Lost reference to local interface"
-        
-        self.checkpid()
-        if not self._started:
-            return rspawn.NOT_STARTED
-        elif not self._pid or not self._ppid:
-            return rspawn.NOT_STARTED
-        else:
-            status = rspawn.remote_status(
-                self._pid, self._ppid,
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key
-                )
-            return status
-    
-    def kill(self, nowait = True):
-        local = self.local()
-        
-        if not local:
-            raise RuntimeError, "Lost reference to local interface"
-        
-        status = self.status()
-        if status == rspawn.RUNNING:
-            self._logger.info("Stopping %s", self)
-            
-            # kill by ppid+pid - SIGTERM first, then try SIGKILL
-            rspawn.remote_kill(
-                self._pid, self._ppid,
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key,
-                sudo = True,
-                nowait = nowait
-                )
-    
-    def waitkill(self):
-        interval = 1.0
-        for i in xrange(30):
-            status = self.status()
-            if status != rspawn.RUNNING:
-                self._logger.info("Stopped %s", self)
-                break
-            time.sleep(interval)
-            interval = min(30.0, interval * 1.1)
-        else:
-            self.kill(nowait=False)
-
-        if self.if_name:
-            for i in xrange(30):
-                if not self.if_alive():
-                    self._logger.info("Device down %s", self)
-                    break
-                time.sleep(interval)
-                interval = min(30.0, interval * 1.1)
-            else:
-                local = self.local()
-                
-                if local:
-                    # Forcibly shut down interface
-                    (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                        "sudo -S bash -c 'echo %s > /vsys/vif_down.in'" % (self.if_name,),
-                        host = local.node.hostname,
-                        port = None,
-                        user = local.node.slicename,
-                        agent = None,
-                        ident_key = local.node.ident_path,
-                        server_key = local.node.server_key,
-                        timeout = 60,
-                        err_on_timeout = False
-                        )
-                    proc.wait()
-
-    def if_down(self):
-        # TODO!!! need to set the vif down with vsys/vif_down.in ... which 
-        # doesn't currently work.
-        local = self.local()
-        
-        if local:
-            (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                "sudo -S bash -c 'kill -s USR1 %d'" % (self._pid,),
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key,
-                timeout = 60,
-                err_on_timeout = False
-                )
-            proc.wait()
-
-    def if_up(self):
-        # TODO!!! need to set the vif up with vsys/vif_up.in ... which 
-        # doesn't currently work.
-        local = self.local()
-        
-        if local:
-            (out,err),proc = server.eintr_retry(server.popen_ssh_command)(
-                "sudo -S bash -c 'kill -s USR2 %d'" % (self._pid,),
-                host = local.node.hostname,
-                port = None,
-                user = local.node.slicename,
-                agent = None,
-                ident_key = local.node.ident_path,
-                server_key = local.node.server_key,
-                timeout = 60,
-                err_on_timeout = False
-                )
-            proc.wait()    
-
-    _TRACEMAP = {
-        # tracename : (remotename, localname)
-        'packets' : ('capture','capture'),
-        'pcap' : ('pcap','capture.pcap'),
-    }
-    
-    def remote_trace_path(self, whichtrace, tracemap = None):
-        tracemap = self._TRACEMAP if not tracemap else tracemap
-        
-        if whichtrace not in tracemap:
-            return None
-        
-        return os.path.join(self.home_path, tracemap[whichtrace][1])
-        
-    def sync_trace(self, local_dir, whichtrace, tracemap = None):
-        tracemap = self._TRACEMAP if not tracemap else tracemap
-        
-        if whichtrace not in tracemap:
-            return None
-        
-        local = self.local()
-        
-        if not local:
-            return None
-        
-        local_path = os.path.join(local_dir, tracemap[whichtrace][1])
-        
-        # create parent local folders
-        if os.path.dirname(local_path):
-            proc = subprocess.Popen(
-                ["mkdir", "-p", os.path.dirname(local_path)],
-                stdout = open("/dev/null","w"),
-                stdin = open("/dev/null","r"))
-
-            if proc.wait():
-                raise RuntimeError, "Failed to synchronize trace"
-        
-        # sync files
-        (out,err),proc = server.popen_scp(
-            '%s@%s:%s' % (local.node.slicename, local.node.hostname, 
-                os.path.join(self.home_path, tracemap[whichtrace][0])),
-            local_path,
-            port = None,
-            agent = None,
-            ident_key = local.node.ident_path,
-            server_key = local.node.server_key
-            )
-        
-        if proc.wait():
-            raise RuntimeError, "Failed to synchronize trace: %s %s" % (out,err,)
-        
-        return local_path
-        
-    def shutdown(self):
-        self.kill()
-    
-    def destroy(self):
-        self.waitkill()
-
-class TunProtoUDP(TunProtoBase):
-    def __init__(self, local, peer, home_path, key):
-        super(TunProtoUDP, self).__init__(local, peer, home_path, key)
-    
-    def launch(self):
-        super(TunProtoUDP, self).launch('udp')
-
-class TunProtoFD(TunProtoBase):
-    def __init__(self, local, peer, home_path, key):
-        super(TunProtoFD, self).__init__(local, peer, home_path, key)
-    
-    def launch(self):
-        super(TunProtoFD, self).launch('fd')
-
-class TunProtoGRE(TunProtoBase):
-    def __init__(self, local, peer, home_path, key):
-        super(TunProtoGRE, self).__init__(local, peer, home_path, key)
-        self.mode = 'pl-gre-ip'
-
-    def launch(self):
-        super(TunProtoGRE, self).launch('gre')
-
-class TunProtoTCP(TunProtoBase):
-    def __init__(self, local, peer, home_path, key):
-        super(TunProtoTCP, self).__init__(local, peer, home_path, key)
-    
-    def launch(self):
-        super(TunProtoTCP, self).launch('tcp')
-
-class TapProtoUDP(TunProtoUDP):
-    def __init__(self, local, peer, home_path, key):
-        super(TapProtoUDP, self).__init__(local, peer, home_path, key)
-        self.mode = 'pl-tap'
-
-class TapProtoTCP(TunProtoTCP):
-    def __init__(self, local, peer, home_path, key):
-        super(TapProtoTCP, self).__init__(local, peer, home_path, key)
-        self.mode = 'pl-tap'
-
-class TapProtoFD(TunProtoFD):
-    def __init__(self, local, peer, home_path, key):
-        super(TapProtoFD, self).__init__(local, peer, home_path, key)
-        self.mode = 'pl-tap'
-
-class TapProtoGRE(TunProtoGRE):
-    def __init__(self, local, peer, home_path, key):
-        super(TapProtoGRE, self).__init__(local, peer, home_path, key)
-        self.mode = 'pl-gre-eth'
-
-TUN_PROTO_MAP = {
-    'tcp' : TunProtoTCP,
-    'udp' : TunProtoUDP,
-    'fd'  : TunProtoFD,
-    'gre' : TunProtoGRE,
-}
-
-TAP_PROTO_MAP = {
-    'tcp' : TapProtoTCP,
-    'udp' : TapProtoUDP,
-    'fd'  : TapProtoFD,
-    'gre' : TapProtoGRE,
-}
-
diff --git a/src/nepi/testbeds/planetlab/util.py b/src/nepi/testbeds/planetlab/util.py
deleted file mode 100644 (file)
index e320c4c..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import testbed_impl
-from nepi.core.metadata import Parallel
-from nepi.util.constants import TIME_NOW
-from nepi.util.graphtools import mst
-from nepi.util import ipaddr2
-from nepi.util import environ
-from nepi.util import server
-from nepi.util.parallel import ParallelRun
-import sys
-import os
-import os.path
-import time
-import resourcealloc
-import collections
-import operator
-import functools
-import socket
-import struct
-import tempfile
-import subprocess
-import random
-import shutil
-import logging
-import metadata
-import weakref
-
-def getAPI(user, pass_, **kw):
-    import plcapi
-    return plcapi.PLCAPI(username=user, password=pass_, **kw)
-
-def filterBlacklist(candidates):
-    blpath = environ.homepath('plblacklist')
-    
-    try:
-        bl = open(blpath, "r")
-    except:
-        return candidates
-        
-    try:
-        blacklist = set(
-            map(int,
-                map(str.strip, bl.readlines())
-            )
-        )
-        return [ x for x in candidates if x not in blacklist ]
-    finally:
-        bl.close()
-
-def appendBlacklist(node_ids):
-    if not isinstance(node_ids, list):
-        node_ids = [ node_ids ]
-    
-    blpath = environ.homepath('plblacklist')
-    bl = open(blpath, "a")
-    
-    try:
-        for node_id in node_ids:
-            bl.write("%s\n" % (node_id,))
-    finally:
-        bl.close()
-
-def getVnet(api, slicename):
-    return api.GetSliceVsysNetTag(slicename)
-
-def getNodes(api, num, **constraints):
-    # Now do the backtracking search for a suitable solution
-    # First with existing slice nodes
-    reqs = []
-    nodes = []
-    
-    import node as Node
-        
-    for i in xrange(num):
-        node = Node.Node(api)
-        node.min_num_external_interface = 1
-        nodes.append(node)
-    
-    node = nodes[0]
-    candidates = filterBlacklist(node.find_candidates())
-    reqs = [candidates] * num
-
-    def pickbest(fullset, nreq, node=nodes[0]):
-        if len(fullset) > nreq:
-            fullset = zip(node.rate_nodes(fullset),fullset)
-            fullset.sort(reverse=True)
-            del fullset[nreq:]
-            return set(map(operator.itemgetter(1),fullset))
-        else:
-            return fullset
-    
-    solution = resourcealloc.alloc(reqs, sample=pickbest)
-    
-    # Do assign nodes
-    runner = ParallelRun(maxthreads=4)
-    for node, node_id in zip(nodes, solution):
-        runner.put(node.assign_node_id, node_id)
-    runner.join()
-    
-    return nodes
-
-def getSpanningTree(nodes, root = None, maxbranching = 2, hostgetter = operator.attrgetter('hostname')):
-    if not root:
-        # Pick root (deterministically)
-        root = min(nodes, key=hostgetter)
-    
-    # Obtain all IPs in numeric format
-    # (which means faster distance computations)
-    for node in nodes:
-        node._ip = server.gethostbyname(hostgetter(node))
-        node._ip_n = struct.unpack('!L', socket.inet_aton(node._ip))[0]
-    
-    # Compute plan
-    # NOTE: the plan is an iterator
-    plan = mst.mst(
-        nodes,
-        lambda a,b : ipaddr2.ipdistn(a._ip_n, b._ip_n),
-        root = root,
-        maxbranching = maxbranching)
-
-    return plan
-
diff --git a/src/nepi/util/constants.py b/src/nepi/util/constants.py
deleted file mode 100644 (file)
index 08c5f39..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-# -*- coding: utf-8 -*-
-
-AF_INET = 0
-AF_INET6 = 1
-
-TIME_NOW = "0s"
-
-CONNECTION_DELAY = 0
-
-ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP = "_nepi_testbed_environment_setup"
-
-class AttributeCategories:
-    CATEGORY_DEPLOYMENT = "Deployment"
-   
-class FactoryCategories:
-    CATEGORY_APPLICATIONS = "Applications"
-    CATEGORY_CHANNELS = "Channels"
-    CATEGORY_DEVICES = "Devices"
-    CATEGORY_DELAY_MODELS = "Delay models"
-    CATEGORY_ENERGY_MODELS = "Energy models"
-    CATEGORY_ERROR_MODELS = "Error models"
-    CATEGORY_MAC_MODELS = "Mac models"
-    CATEGORY_MANAGERS = "Managers"
-    CATEGORY_MOBILITY_MODELS = "Mobility models"
-    CATEGORY_NODES = "Nodes"
-    CATEGORY_LOSS_MODELS = "Loss models"
-    CATEGORY_PHY_MODELS = "Phy models"
-    CATEGORY_PROTOCOLS = "Protocols"
-    CATEGORY_ROUTING = "Routing"
-    CATEGORY_QUEUES = "Queues"
-    CATEGORY_SERVICE_FLOWS = "Service Flows"
-    CATEGORY_TUNNELS = "Tunnels"
-
-class ApplicationStatus:
-    STATUS_NOT_STARTED = 0
-    STATUS_RUNNING = 1
-    STATUS_FINISHED = 2
-    STATUS_UNDETERMINED = 3
-
-class TestbedStatus:
-    STATUS_ZERO = 0
-    STATUS_SETUP = 1
-    STATUS_CREATED = 2
-    STATUS_CONNECTED = 3
-    STATUS_CROSS_CONNECTED = 4
-    STATUS_CONFIGURED = 5
-    STATUS_STARTED = 6
-    STATUS_STOPPED = 7
-    STATUS_FAILED = 8
-    STATUS_UNRESPONSIVE = 9
-
-class DeploymentConfiguration:
-    MODE_SINGLE_PROCESS = "SINGLE"
-    MODE_DAEMON = "DAEMON"
-    ACCESS_SSH = "SSH"
-    ACCESS_LOCAL = "LOCAL"
-    ERROR_LEVEL = "Error"
-    DEBUG_LEVEL = "Debug"
-    POLICY_FAIL = "Fail"
-    POLICY_RECOVER = "Recover"
-    POLICY_RESTART = "Restart"
-    
-    DEPLOYMENT_MODE = "deployment_mode"
-    DEPLOYMENT_COMMUNICATION = "deployment_communication"
-
-    DEPLOYMENT_HOST = "deployment_host"
-    DEPLOYMENT_USER = "deployment_user"
-    DEPLOYMENT_PORT = "deployment_port"
-    DEPLOYMENT_KEY  = "deployment_key"
-    
-    DEPLOYMENT_ENVIRONMENT_SETUP = "deployment_environment_setup"
-    
-    ROOT_DIRECTORY = "rootDirectory"
-    USE_AGENT = "useAgent"
-    USE_SUDO = "useSudo"
-    LOG_LEVEL = "logLevel"
-    RECOVER = "recover"
-    RECOVERY_POLICY = "recoveryPolicy"
-    CLEAN_ROOT = "cleanRoot"
-
-
diff --git a/src/nepi/util/defer.py b/src/nepi/util/defer.py
deleted file mode 100644 (file)
index a1937b9..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-class Defer:
-    class NONE:
-        pass
-    
-    def __init__(self, ojetwait):
-        self.__ojet = Defer.NONE
-        self.__ojetwait = ojetwait
-    def __getattr__(self, attr):
-        if attr in ('_Defer__ojet', '_Defer__ojetwait', '_get'):
-            try:
-                return self.__dict__[attr]
-            except KeyError:
-                raise AttributeError, attr
-        else:
-            if self.__ojet is Defer.NONE:
-                self.__ojet = self.__ojetwait()
-            return getattr(self.__ojet, attr)
-    def __setattr__(self, attr, value):
-        if attr in ('_Defer__ojet', '_Defer__ojetwait'):
-            self.__dict__[attr] = value
-        else:
-            if self.__ojet is Defer.NONE:
-                self.__ojet = self.__ojetwait()
-                self.__ojetwait = None
-            return setattr(self.__ojet, attr, value)
-    def _get(self):
-        if self.__ojet is Defer.NONE:
-            self.__ojet = self.__ojetwait()
-        return self.__ojet
-
-    def __nonzero__(self):
-        return bool(self._get())
-    
index 0f2e455..599140a 100644 (file)
@@ -1,10 +1,32 @@
+import ctypes
+import imp
+import sys
 
-import os, subprocess, os.path
+import os, os.path, re, signal, shutil, socket, subprocess, tempfile
 
 __all__ =  ["python", "ssh_path"]
 __all__ += ["rsh", "tcpdump_path", "sshd_path"]
 __all__ += ["execute", "backticks"]
 
+
+# Unittest from Python 2.6 doesn't have these decorators
+def _bannerwrap(f, text):
+    name = f.__name__
+    def banner(*args, **kwargs):
+        sys.stderr.write("*** WARNING: Skipping test %s: `%s'\n" %
+                (name, text))
+        return None
+    return banner
+
+def skip(text):
+    return lambda f: _bannerwrap(f, text)
+
+def skipUnless(cond, text):
+    return (lambda f: _bannerwrap(f, text)) if not cond else lambda f: f
+
+def skipIf(cond, text):
+    return (lambda f: _bannerwrap(f, text)) if cond else lambda f: f
+
 def find_bin(name, extra_path = None):
     search = []
     if "PATH" in os.environ:
@@ -31,6 +53,25 @@ def find_bin_or_die(name, extra_path = None):
                 "continue.") % name)
     return r
 
+def find_bin(name, extra_path = None):
+    search = []
+    if "PATH" in os.environ:
+        search += os.environ["PATH"].split(":")
+    for pref in ("/", "/usr/", "/usr/local/"):
+        for d in ("bin", "sbin"):
+            search.append(pref + d)
+    if extra_path:
+        search += extra_path
+
+    for d in search:
+            try:
+                os.stat(d + "/" + name)
+                return d + "/" + name
+            except OSError, e:
+                if e.errno != os.errno.ENOENT:
+                    raise
+    return None
+
 ssh_path = find_bin_or_die("ssh")
 python_path = find_bin_or_die("python")
 
@@ -56,44 +97,85 @@ def backticks(cmd):
         raise RuntimeError("Error executing `%s': %s" % (" ".join(cmd), err))
     return out
 
-def homepath(path, app='.nepi', mode = 0500, directory = False):
-    home = os.environ.get('HOME')
-    if home is None:
-        home = os.path.join(os.sep, 'home', os.getlogin())
-    
-    path = os.path.join(home, app, path)
-    if directory:
-        dirname = path
-    else:
-        dirname = os.path.dirname(path)
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-    
-    return path
-
-def find_testbed(testbed_id):
-    mod_name = None
-    
-    # look for environment-specified testbeds
-    if 'NEPI_TESTBEDS' in os.environ:
-        try:
-            # parse testbed map
-            #   split space-separated items, filter empty items
-            testbed_map = filter(bool,os.environ['NEPI_TESTBEDS'].strip().split(' '))
-            #   split items, keep pairs only, build map
-            testbed_map = dict([map(str.strip,i.split(':',1)) for i in testbed_map if ':' in i])
-        except:
-            import traceback, sys
-            traceback.print_exc(file=sys.stderr)
-            
-            # ignore malformed environment
-            testbed_map = {}
-        
-        mod_name = testbed_map.get(testbed_id)
-    
-    if mod_name is None:
-        # no explicit map, load built-in testbeds
-        mod_name = "nepi.testbeds.%s" % (testbed_id.lower())
-
-    return mod_name
+
+# SSH stuff
+
+def gen_ssh_keypair(filename):
+    ssh_keygen = nepi.util.environ.find_bin_or_die("ssh-keygen")
+    args = [ssh_keygen, '-q', '-N', '', '-f', filename]
+    assert subprocess.Popen(args).wait() == 0
+    return filename, "%s.pub" % filename
+
+def add_key_to_agent(filename):
+    ssh_add = nepi.util.environ.find_bin_or_die("ssh-add")
+    args = [ssh_add, filename]
+    null = file("/dev/null", "w")
+    assert subprocess.Popen(args, stderr = null).wait() == 0
+    null.close()
+
+def get_free_port():
+    s = socket.socket()
+    s.bind(("127.0.0.1", 0))
+    port = s.getsockname()[1]
+    return port
+
+_SSH_CONF = """ListenAddress 127.0.0.1:%d
+Protocol 2
+HostKey %s
+UsePrivilegeSeparation no
+PubkeyAuthentication yes
+PasswordAuthentication no
+AuthorizedKeysFile %s
+UsePAM no
+AllowAgentForwarding yes
+PermitRootLogin yes
+StrictModes no
+PermitUserEnvironment yes
+"""
+
+def gen_sshd_config(filename, port, server_key, auth_keys):
+    conf = open(filename, "w")
+    text = _SSH_CONF % (port, server_key, auth_keys)
+    conf.write(text)
+    conf.close()
+    return filename
+
+def gen_auth_keys(pubkey, output, environ):
+    #opts = ['from="127.0.0.1/32"'] # fails in stupid yans setup
+    opts = []
+    for k, v in environ.items():
+        opts.append('environment="%s=%s"' % (k, v))
+
+    lines = file(pubkey).readlines()
+    pubkey = lines[0].split()[0:2]
+    out = file(output, "w")
+    out.write("%s %s %s\n" % (",".join(opts), pubkey[0], pubkey[1]))
+    out.close()
+    return output
+
+def start_ssh_agent():
+    ssh_agent = nepi.util.environ.find_bin_or_die("ssh-agent")
+    proc = subprocess.Popen([ssh_agent], stdout = subprocess.PIPE)
+    (out, foo) = proc.communicate()
+    assert proc.returncode == 0
+    d = {}
+    for l in out.split("\n"):
+        match = re.search("^(\w+)=([^ ;]+);.*", l)
+        if not match:
+            continue
+        k, v = match.groups()
+        os.environ[k] = v
+        d[k] = v
+    return d
+
+def stop_ssh_agent(data):
+    # No need to gather the pid, ssh-agent knows how to kill itself; after we
+    # had set up the environment
+    ssh_agent = nepi.util.environ.find_bin_or_die("ssh-agent")
+    null = file("/dev/null", "w")
+    proc = subprocess.Popen([ssh_agent, "-k"], stdout = null)
+    null.close()
+    assert proc.wait() == 0
+    for k in data:
+        del os.environ[k]
 
similarity index 56%
rename from src/nepi/testbeds/planetlab/rspawn.py
rename to src/nepi/util/execfuncs.py
index 20c36a5..7e2d506 100644 (file)
@@ -1,35 +1,66 @@
-# Utility library for spawning remote asynchronous tasks
-from nepi.util import server
-import getpass
-import logging
+from nepi.util.sshfuncs import RUNNING, FINISHED, NOT_STARTED, STDOUT 
 
-class STDOUT: 
-    """
-    Special value that when given to remote_spawn in stderr causes stderr to 
-    redirect to whatever stdout was redirected to.
-    """
+import subprocess
 
-class RUNNING:
+def lexec(command, 
+        user = None, 
+        sudo = False,
+        stdin = None,
+        env = None):
     """
-    Process is still running
+    Executes a local command, returns ((stdout,stderr),process)
     """
+    if env:
+        export = ''
+        for envkey, envval in env.iteritems():
+            export += '%s=%s ' % (envkey, envval)
+        command = "%s %s" % (export, command)
 
-class FINISHED:
-    """
-    Process is finished
-    """
+    if sudo:
+        command = "sudo %s" % command
+    elif user:
+        command = "su %s ; %s " % (user, command)
+
+    p = subprocess.Popen(command, 
+            stdout = subprocess.PIPE, 
+            stderr = subprocess.PIPE,
+            stdin  = stdin)
 
-class NOT_STARTED:
+    out, err = p.communicate()
+    return ((out, err), proc)
+
+def lcopy(source, dest, recursive = False):
     """
-    Process hasn't started running yet (this should be very rare)
+    Copies from/to localy.
     """
+    
+    if TRACE:
+        print "scp", source, dest
+    
+    command = ["cp"]
+    if recursive:
+        command.append("-R")
+    
+    command.append(src)
+    command.append(dst)
+    
+    p = subprocess.Popen(command, 
+        stdout=subprocess.PIPE, 
+        stderr=subprocess.PIPE)
 
-def remote_spawn(command, pidfile, stdout='/dev/null', stderr=STDOUT, stdin='/dev/null', home=None, create_home=False, sudo=False,
-        host = None, port = None, user = None, agent = None, 
-        ident_key = None, server_key = None,
-        tty = False, hostip = None):
+    out, err = p.communicate()
+    return ((out, err), proc)
+   
+def lspawn(command, pidfile, 
+        stdout = '/dev/null', 
+        stderr = STDOUT, 
+        stdin = '/dev/null', 
+        home = None, 
+        create_home = False, 
+        sudo = False,
+        user = None): 
     """
-    Spawn a remote command such that it will continue working asynchronously.
+    Spawn a local command such that it will continue working asynchronously.
     
     Parameters:
         command: the command to run - it should be a single line.
@@ -48,8 +79,6 @@ def remote_spawn(command, pidfile, stdout='/dev/null', stderr=STDOUT, stdin='/de
         
         sudo: whether the command needs to be executed as root
         
-        host/port/user/agent/ident_key: see nepi.util.server.popen_ssh_command
-    
     Returns:
         (stdout, stderr), process
         
@@ -65,70 +94,41 @@ def remote_spawn(command, pidfile, stdout='/dev/null', stderr=STDOUT, stdin='/de
     
     daemon_command = '{ { %(command)s  > %(stdout)s 2>%(stderr)s < %(stdin)s & } ; echo $! 1 > %(pidfile)s ; }' % {
         'command' : command,
-        'pidfile' : server.shell_escape(pidfile),
-        
+        'pidfile' : shell_escape(pidfile),
         'stdout' : stdout,
         'stderr' : stderr,
         'stdin' : stdin,
     }
     
     cmd = "%(create)s%(gohome)s rm -f %(pidfile)s ; %(sudo)s nohup bash -c %(command)s " % {
-            'command' : server.shell_escape(daemon_command),
-            
+            'command' : shell_escape(daemon_command),
             'sudo' : 'sudo -S' if sudo else '',
-            
-            'pidfile' : server.shell_escape(pidfile),
-            'gohome' : 'cd %s ; ' % (server.shell_escape(home),) if home else '',
-            'create' : 'mkdir -p %s ; ' % (server.shell_escape,) if create_home else '',
+            'pidfile' : shell_escape(pidfile),
+            'gohome' : 'cd %s ; ' % (shell_escape(home),) if home else '',
+            'create' : 'mkdir -p %s ; ' % (shell_escape(home),) if create_home else '',
         }
 
-    (out,err),proc = server.popen_ssh_command(
-        cmd,
-        host = host,
-        port = port,
-        user = user,
-        agent = agent,
-        ident_key = ident_key,
-        server_key = server_key,
-        tty = tty ,
-        hostip = hostip
-        )
+    (out,err),proc = lexec(cmd)
     
     if proc.wait():
         raise RuntimeError, "Failed to set up application on host %s: %s %s" % (host, out,err,)
 
     return (out,err),proc
 
-@server.eintr_retry
-def remote_check_pid(pidfile,
-        host = None, port = None, user = None, agent = None, 
-        ident_key = None, server_key = None, hostip = None):
+def lcheckpid(pidfile):
     """
     Check the pidfile of a process spawned with remote_spawn.
     
     Parameters:
         pidfile: the pidfile passed to remote_span
         
-        host/port/user/agent/ident_key: see nepi.util.server.popen_ssh_command
-    
     Returns:
         
         A (pid, ppid) tuple useful for calling remote_status and remote_kill,
         or None if the pidfile isn't valid yet (maybe the process is still starting).
     """
 
-    (out,err),proc = server.popen_ssh_command(
-        "cat %(pidfile)s" % {
-            'pidfile' : pidfile,
-        },
-        host = host,
-        port = port,
-        user = user,
-        agent = agent,
-        ident_key = ident_key,
-        server_key = server_key,
-        hostip = hostip
-        )
+    (out,err),proc = lexec("cat %s" % pidfile )
         
     if proc.wait():
         return None
@@ -140,60 +140,39 @@ def remote_check_pid(pidfile,
             # Ignore, many ways to fail that don't matter that much
             return None
 
-
-@server.eintr_retry
-def remote_status(pid, ppid, 
-        host = None, port = None, user = None, agent = None, 
-        ident_key = None, server_key = None, hostip = None):
+def lstatus(pid, ppid): 
     """
     Check the status of a process spawned with remote_spawn.
     
     Parameters:
         pid/ppid: pid and parent-pid of the spawned process. See remote_check_pid
         
-        host/port/user/agent/ident_key: see nepi.util.server.popen_ssh_command
-    
     Returns:
         
         One of NOT_STARTED, RUNNING, FINISHED
     """
 
-    (out,err),proc = server.popen_ssh_command(
+    (out,err),proc = lexec(
         # Check only by pid. pid+ppid does not always work (especially with sudo) 
         " (( ps --pid %(pid)d -o pid | grep -c %(pid)d && echo 'wait')  || echo 'done' ) | tail -n 1" % {
             'ppid' : ppid,
             'pid' : pid,
-        },
-        host = host,
-        port = port,
-        user = user,
-        agent = agent,
-        ident_key = ident_key,
-        server_key = server_key,
-        hostip = hostip
-        )
+        })
     
     if proc.wait():
         return NOT_STARTED
     
     status = False
-    if err:
-        if err.strip().find("Error, do this: mount -t proc none /proc") >= 0:
-            status = True
-    elif out:
+    if out:
         status = (out.strip() == 'wait')
     else:
         return NOT_STARTED
     return RUNNING if status else FINISHED
-    
 
-@server.eintr_retry
-def remote_kill(pid, ppid, sudo = False,
-        host = None, port = None, user = None, agent = None, 
-        ident_key = None, server_key = None, hostip = None,
-        nowait = False):
+def lkill(pid, ppid, sudo = False):
     """
-    Kill a process spawned with remote_spawn.
+    Kill a process spawned with lspawn.
     
     First tries a SIGTERM, and if the process does not end in 10 seconds,
     it sends a SIGKILL.
@@ -202,18 +181,13 @@ def remote_kill(pid, ppid, sudo = False,
         pid/ppid: pid and parent-pid of the spawned process. See remote_check_pid
         
         sudo: whether the command was run with sudo - careful killing like this.
-        
-        host/port/user/agent/ident_key: see nepi.util.server.popen_ssh_command
     
     Returns:
         
         Nothing, should have killed the process
     """
     
-    if sudo:
-        subkill = "$(ps --ppid %(pid)d -o pid h)" % { 'pid' : pid }
-    else:
-        subkill = ""
+    subkill = "$(ps --ppid %(pid)d -o pid h)" % { 'pid' : pid }
     cmd = """
 SUBKILL="%(subkill)s" ;
 %(sudo)s kill -- -%(pid)d $SUBKILL || /bin/true
@@ -236,23 +210,12 @@ fi
     if nowait:
         cmd = "( %s ) >/dev/null 2>/dev/null </dev/null &" % (cmd,)
 
-    (out,err),proc = server.popen_ssh_command(
+    (out,err),proc = lexec(
         cmd % {
             'ppid' : ppid,
             'pid' : pid,
             'sudo' : 'sudo -S' if sudo else '',
             'subkill' : subkill,
-        },
-        host = host,
-        port = port,
-        user = user,
-        agent = agent,
-        ident_key = ident_key,
-        server_key = server_key,
-        hostip = hostip
-        )
+        })
     
-    # wait, don't leave zombies around
-    proc.wait()
-
 
diff --git a/src/nepi/util/graphical_info.py b/src/nepi/util/graphical_info.py
deleted file mode 100644 (file)
index 04f93fe..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# -*- coding: utf-8 -*-
-
-class GraphicalInfo(object):
-    """ This class allows to describe the position and dimensions of a 
-    2D object in a GUI canvas"""
-    def __init__(self):
-        self.x = 0.0
-        self.y = 0.0
-        self.width = 0.0
-        self.height = 0.0
-
diff --git a/src/nepi/util/graphtools/__init__.py b/src/nepi/util/graphtools/__init__.py
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/nepi/util/graphtools/mst.py b/src/nepi/util/graphtools/mst.py
deleted file mode 100644 (file)
index 1c59b2f..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-import random
-import bisect
-
-def mst(nodes, connected, 
-        maxsoftbranching = None,
-        maxbranching = None, 
-        root = None,
-        untie = lambda l : iter(l).next()):
-    """
-    Returns an iterator over pairs (Node, Parent)
-    which form the spanning tree.
-    
-    Params:
-    
-        nodes: a list of nodes (can be anything)
-        
-        connected: a callable that takes two nodes
-            and returns either an edge weight (one
-            that can be compared with '<' with other
-            edge weights) or None if they're not
-            connected.
-        
-        maxbranching: the maximum number of branches
-            (children) allowed for a node. None for
-            no limit.
-            When maxbranching is used, the algorithm
-            implemented here gives no guarantee
-            of optimality (the spanning tree may not
-            be the minimum), as that problem becomes
-            NP-hard and we want a quick answer.
-        
-        maxsoftbranching: soft branching limit.
-            The algorithm is allowed to break it
-            if it has no other choice. Trees build with
-            soft branching limits are usually less
-            balanced than when using hard limits,
-            but the computation takes a lot less time.
-        
-        root: the desired root of the spanning tree,
-            or None to pick a random one.
-        
-        untie: a callable that, given an iterable
-            of candidate entries of equal weight for
-            the selection to be made, picks one to
-            be added to the spanning tree. The default
-            picks arbitrarily.
-            Entries are of the form (<weight>,<from>,<to>)
-            with <from> and <to> being indices in the
-            nodes array
-    """
-    
-    if not nodes:
-        return
-        
-    if root is None:
-        root = random.sample(nodes, 1)[0]
-    
-    # We want the root's index
-    root = nodes.index(root)
-    
-    # Unpicked nodes, nodes we still have to add.
-    unpicked = set(xrange(len(nodes)))
-    
-    # Distance maps
-    #   We need:
-    #       min distance to picked node
-    #       which picked node
-    #   Or None if it was a picked or unconnected node
-    
-    N = len(nodes)
-    distance = [None] * N
-    which    = [None] * N
-    
-    # Count branches
-    branching = [0] * N
-    
-    # Initialize with distances to root
-    def update_distance_map(fornode):
-        ref = nodes[fornode]
-        for other, prevdistance in enumerate(distance):
-            other_node = nodes[other]
-            d = connected(ref, other_node)
-            if d is not None:
-                if prevdistance is None or prevdistance > d:
-                    distance[other] = d
-                    which[other] = fornode
-        distance[fornode] = None
-        which[fornode] = None
-    
-    update_distance_map(root)
-    unpicked.remove(root)
-    
-    # Add remaining nodes, yield edges
-    def minrange(dsorted):
-        return dsorted[:bisect.bisect(dsorted, (dsorted[0][0], N, N))]
-        
-    needsrebuild = False
-    while unpicked:
-        # Rebuild the distance map if needed
-        # (ie, when a node in the partial MST is no longer
-        # a candidate for adjoining because of saturation)
-        if needsrebuild:
-            print "Rebuilding distance map..."
-            distance = [None] * N
-            which    = [None] * N
-            for n in xrange(N):
-                if n not in unpicked and branching[n] < maxbranching:
-                    update_distance_map(n)
-        
-        # Pick the closest unpicked node
-        dsorted = [(d,i,w) for i,(d,w) in enumerate(zip(distance, which)) 
-                   if d is not None 
-                      and i in unpicked
-                      and (maxbranching is None or branching[w] < maxbranching)
-                      and (maxsoftbranching is None or branching[w] < maxsoftbranching)]
-        if not dsorted and maxsoftbranching is not None:
-            dsorted = [(d,i,w) for i,(d,w) in enumerate(zip(distance, which)) 
-                       if d is not None 
-                          and i in unpicked
-                          and (maxbranching is None or branching[w] < maxbranching)]
-        if not dsorted:
-            raise AssertionError, "Unconnected graph"
-        
-        dsorted.sort()
-        dsorted = minrange(dsorted)
-        
-        if len(dsorted) > 1:
-            winner = untie(dsorted)
-        elif dsorted:
-            winner = dsorted[0]
-        else:
-            raise AssertionError, "Unconnected graph"
-        
-        weight, edgefrom, edgeto = winner
-        
-        branching[edgeto] += 1
-        
-        if maxbranching is not None and branching[edgeto] == maxbranching:
-            needsrebuild = True
-        
-        # Yield edge, update distance map to account
-        # for the picked node
-        yield (nodes[edgefrom], nodes[edgeto])
-        
-        update_distance_map(edgefrom)
-        unpicked.remove(edgefrom)
-
-
index 7f5ea3c..913e6ad 100644 (file)
@@ -1,13 +1,12 @@
-# -*- coding: utf-8 -*-
-
+# FIXME: This class is not thread-safe. 
+# Should it be made thread-safe?
 class GuidGenerator(object):
     def __init__(self):
         self._guids = list()
 
     def next(self, guid = None):
         if guid != None:
-            if guid in self._guids:
-                raise RuntimeError("guid %d is already assigned" % guid)
+            return guid
         else:
             last_guid = 0 if len(self._guids) == 0 else self._guids[-1]
             guid = last_guid + 1 
diff --git a/src/nepi/util/ipaddr2.py b/src/nepi/util/ipaddr2.py
deleted file mode 100644 (file)
index 2bfa0c9..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import struct
-import random
-import socket
-import array
-
-def ipv4_dot2mask(mask):
-    mask = mask.split('.',4) # a.b.c.d -> [a,b,c,d]
-    mask = map(int,mask) # to ints
-    
-    n = 0
-    while mask and mask[0] == 0xff:
-        n += 8
-        del mask[0]
-    
-    if mask:
-        mask = mask[0]
-        while mask:
-            n += 1
-            mask = (mask << 1) & 0xff
-    
-    return n
-
-def ipv4_mask2dot(mask):
-    mask = ((1L << mask)-1) << (32 - mask)
-    mask = struct.pack(">I",mask)
-    mask = '.'.join(map(str,map(ord,mask)))
-    return mask
-
-def ipdist(a,b):
-    a = struct.unpack('!L',socket.inet_aton(a))[0]
-    b = struct.unpack('!L',socket.inet_aton(b))[0]
-    d = 32
-    while d and (b&0x80000000)==(a&0x80000000):
-        a <<= 1
-        b <<= 1
-        d -= 1
-    return d
-
-def ipdistn(a,b):
-    d = 32
-    while d and (b&0x80000000)==(a&0x80000000):
-        a <<= 1
-        b <<= 1
-        d -= 1
-    return d
-
-def inet_cksum(packet):
-    words = array.array('H')
-    words.fromstring(packet[:len(packet)&~0x1])
-    htons = socket.htons
-    cksum = 0
-    for word in words:
-        cksum += htons(word)
-    if len(packet)&0x1:
-        cksum += ord(packet[-1])
-    cksum &= 0xffffffff
-    cksum = (cksum >> 16) + (cksum & 0xffff)
-    cksum += (cksum >> 16)
-    return ~cksum
-
-def iphdr(src, dst, datalen, ttl, proto, tos=0, nocksum=False, ipid=0):
-    cksum = 0
-    src = socket.inet_aton(src)
-    dst = socket.inet_aton(dst)
-    hdr = struct.pack('!BBHHHBBH4s4s', 
-        0x45, tos, datalen + 5*4, ipid, 0, 
-        ttl, proto, cksum & 0xffff, src, dst)
-    if not nocksum:
-        cksum = inet_cksum(hdr)
-        hdr = struct.pack('!BBHHHBBH4s4s', 
-            0x45, tos, datalen + 5*4, ipid, 0, 
-            ttl, proto, cksum & 0xffff, src, dst)
-    return hdr
-
-def igmp(type, mxrt, grp, nocksum=False):
-    cksum = 0
-    grp = socket.inet_aton(grp)
-    ighdr = struct.pack('!BBH4s', type, mxrt, cksum & 0xffff, grp)
-    if not nocksum:
-        cksum = inet_cksum(ighdr)
-        ighdr = struct.pack('!BBH4s', type, mxrt, cksum & 0xffff, grp)
-    return ighdr
-
-def ipigmp(src, dst, ttl, type, mxrt, grp, noipcksum=False, noigmpcksum=False):
-    igmpp = igmp(type, mxrt, grp, nocksum=noigmpcksum)
-    iph = iphdr(src, dst, len(igmpp), ttl, 2, tos=0xc0, nocksum=noipcksum)
-    return iph+igmpp
-
-
index 95baad7..8dc39a7 100644 (file)
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
 import threading
 import Queue
 import traceback
diff --git a/src/nepi/util/parser.py b/src/nepi/util/parser.py
new file mode 100644 (file)
index 0000000..b06cb7e
--- /dev/null
@@ -0,0 +1,147 @@
+from nepi.design.box import Box
+
+from xml.dom import minidom
+import sys
+
+STRING = "string"
+BOOL = "bool"
+INTEGER = "integer"
+DOUBLE = "float"
+
+def xmlencode(s):
+    if isinstance(s, str):
+        rv = s.decode("latin1")
+    elif not isinstance(s, unicode):
+        rv = unicode(s)
+    else:
+        rv = s
+    return rv.replace(u'\x00',u'&#0000;')
+
+def xmldecode(s):
+    return s.replace(u'&#0000',u'\x00').encode("utf8")
+
+def from_type(value):
+    if isinstance(value, str):
+        return STRING
+    if isinstance(value, bool):
+        return BOOL
+    if isinstance(value, int):
+        return INTEGER
+    if isinstance(value, float):
+        return DOUBLE
+
+def to_type(type, value):
+    if type == STRING:
+        return str(value)
+    if type == BOOL:
+        return value == "True"
+    if type == INTEGER:
+        return int(value)
+    if type == DOUBLE:
+        return float(value)
+
+class XMLParser(object):
+    def to_xml(self, box):
+        doc = minidom.Document()
+
+        root = doc.createElement("boxes")
+        doc.appendChild(root)
+
+        traversed = dict()
+        self._traverse_boxes(doc, traversed, box)
+
+        # Keep the order
+        for guid in sorted(traversed.keys()):
+            bnode = traversed[guid]
+            root.appendChild(bnode)
+       
+        try:
+            xml = doc.toprettyxml(indent="    ", encoding="UTF-8")
+        except:
+            print >>sys.stderr, "Oops: generating XML from %s" % (data,)
+            raise
+        
+        return xml
+
+    def _traverse_boxes(self, doc, traversed, box):
+        bnode = doc.createElement("box")
+        bnode.setAttribute("guid", xmlencode(box.guid))
+        bnode.setAttribute("label", xmlencode(box.label))
+        bnode.setAttribute("x", xmlencode(box.x))
+        bnode.setAttribute("y", xmlencode(box.y))
+        bnode.setAttribute("width", xmlencode(box.width))
+        bnode.setAttribute("height", xmlencode(box.height))
+
+        traversed[box.guid] = bnode
+
+        anode = doc.createElement("attributes")
+        bnode.appendChild(anode)
+        for name in sorted(box.attributes):
+            value = getattr(box.a, name)
+            aanode = doc.createElement("attribute")
+            anode.appendChild(aanode)
+            aanode.setAttribute("name", xmlencode(name))
+            aanode.setAttribute("value", xmlencode(value))
+            aanode.setAttribute("type", from_type(value))
+
+        tnode = doc.createElement("tags")
+        bnode.appendChild(tnode)
+        for tag in sorted(box.tags):
+            ttnode = doc.createElement("tag")
+            tnode.appendChild(ttnode)
+            ttnode.setAttribute("name", xmlencode(tag))
+
+        cnode = doc.createElement("connections")
+        bnode.appendChild(cnode)
+        for b in sorted(box.connections):
+            ccnode = doc.createElement("connection")
+            cnode.appendChild(ccnode)
+            ccnode.setAttribute("guid", xmlencode(b.guid))
+            if b.guid not in traversed:
+                self._traverse_boxes(doc, traversed, b)
+
+    def from_xml(self, xml):
+        doc = minidom.parseString(xml)
+        bnode_list = doc.getElementsByTagName("box")
+
+        boxes = dict()
+        connections = dict()
+
+        for bnode in bnode_list:
+            if bnode.nodeType == doc.ELEMENT_NODE:
+                guid = int(bnode.getAttribute("guid"))
+                label = xmldecode(bnode.getAttribute("label"))
+                x = float(bnode.getAttribute("x"))
+                y = float(bnode.getAttribute("y"))
+                height = float(bnode.getAttribute("height"))
+                width = float(bnode.getAttribute("width"))
+                box = Box(label=label, guid=guid)
+                boxes[guid] = box
+
+                anode_list = bnode.getElementsByTagName("attribute") 
+                for anode in anode_list:
+                    name = xmldecode(anode.getAttribute("name"))
+                    value = xmldecode(anode.getAttribute("value"))
+                    type = xmldecode(anode.getAttribute("type"))
+                    value = to_type(type, value)
+                    setattr(box.a, name, value)
+                    
+                tnode_list = bnode.getElementsByTagName("tag") 
+                for tnode in tnode_list:
+                    value = xmldecode(tnode.getAttribute("name"))
+                    box.tadd(value)
+
+                connections[box] = set()
+                cnode_list = bnode.getElementsByTagName("connection")
+                for cnode in cnode_list:
+                    guid = int(cnode.getAttribute("guid"))
+                    connections[box].add(guid)
+
+        for box, conns in connections.iteritems():
+            for guid in conns:
+                b = boxes[guid]
+                box.connect(b)
+
+        return box
+
+
diff --git a/src/nepi/util/parser/_xml.py b/src/nepi/util/parser/_xml.py
deleted file mode 100644 (file)
index 683e4f5..0000000
+++ /dev/null
@@ -1,327 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from nepi.core.attributes import Attribute
-from nepi.util.parser.base import ExperimentData, ExperimentParser
-from xml.dom import minidom
-
-import sys
-
-def xmlencode(s):
-    if isinstance(s, str):
-        rv = s.decode("latin1")
-    elif not isinstance(s, unicode):
-        rv = unicode(s)
-    else:
-        rv = s
-    return rv.replace(u'\x00',u'&#0000;')
-
-def xmldecode(s):
-    return s.replace(u'&#0000',u'\x00').encode("utf8")
-
-class XmlExperimentParser(ExperimentParser):
-    def to_xml(self, experiment_description=None, data=None):
-        if experiment_description is not None:
-            data = self.to_data(experiment_description)
-        elif data is None:
-            raise TypeError, "XmlExperimentParser.to_xml needs either 'experiment_description' or 'data' arguments"
-        doc = minidom.Document()        
-        exp_tag = doc.createElement("experiment")
-        testbeds_tag = doc.createElement("testbeds")
-        exp_tag.appendChild(testbeds_tag)
-
-        elements_tags = dict()
-        for guid in sorted(data.guids):
-            if data.is_testbed_data(guid):
-                elements_tag = self.testbed_data_to_xml(doc, testbeds_tag, guid, data)
-                elements_tags[guid] = elements_tag
-            else:
-                self.box_data_to_xml(doc, elements_tags, guid, data)
-        doc.appendChild(exp_tag)
-        
-        try:
-            xml = doc.toprettyxml(indent="    ", encoding="UTF-8")
-        except:
-            print >>sys.stderr, "Oops: generating XML from %s" % (data,)
-            raise
-        
-        return xml
-
-    def testbed_data_to_xml(self, doc, parent_tag, guid, data):
-        testbed_tag = doc.createElement("testbed") 
-        testbed_tag.setAttribute("guid", xmlencode(guid))
-        (testbed_id, testbed_version) = data.get_testbed_data(guid)
-        testbed_tag.setAttribute("testbed_id", xmlencode(testbed_id))
-        testbed_tag.setAttribute("testbed_version", xmlencode(testbed_version))
-        parent_tag.appendChild(testbed_tag)
-        self.graphical_info_data_to_xml(doc, testbed_tag, guid, data)
-        self.attributes_data_to_xml(doc, testbed_tag, guid, data)
-        elements_tag = doc.createElement("elements")
-        testbed_tag.appendChild(elements_tag)
-        return elements_tag
-
-    def box_data_to_xml(self, doc, elements_tags, guid, data):
-        (testbed_guid, factory_id) = data.get_box_data(guid)
-        element_tag = doc.createElement("element")
-        parent_tag = elements_tags[testbed_guid]
-        parent_tag.appendChild(element_tag)
-        element_tag.setAttribute("factory_id", factory_id)
-        element_tag.setAttribute("guid", xmlencode(guid))
-        self.graphical_info_data_to_xml(doc, element_tag, guid, data)
-        self.factory_attributes_data_to_xml(doc, element_tag, guid, data)
-        self.attributes_data_to_xml(doc, element_tag, guid, data)
-        self.traces_data_to_xml(doc, element_tag, guid, data)
-        self.addresses_data_to_xml(doc, element_tag, guid, data)
-        self.routes_data_to_xml(doc, element_tag, guid, data)
-        self.connections_data_to_xml(doc, element_tag, guid, data)
-
-    def graphical_info_data_to_xml(self, doc, parent_tag, guid, data):
-        graphical_info_tag = doc.createElement("graphical_info") 
-        parent_tag.appendChild(graphical_info_tag)
-        (x, y, width, height) = data.get_graphical_info_data(guid)
-        graphical_info_tag.setAttribute("x", xmlencode(x))
-        graphical_info_tag.setAttribute("y", xmlencode(y))
-        graphical_info_tag.setAttribute("width", xmlencode(width))
-        graphical_info_tag.setAttribute("height", xmlencode(height))
-
-    def factory_attributes_data_to_xml(self, doc, parent_tag, guid, data):
-        factory_attributes_tag = doc.createElement("factory_attributes")
-        for (name, value) in data.get_factory_attribute_data(guid):
-            if value is not None:
-                factory_attribute_tag = doc.createElement("factory_attribute") 
-                factory_attributes_tag.appendChild(factory_attribute_tag)
-                factory_attribute_tag.setAttribute("name", name)
-                factory_attribute_tag.setAttribute("value", xmlencode(value))
-                factory_attribute_tag.setAttribute("type", self.type_to_standard(value))
-        if factory_attributes_tag.hasChildNodes():
-            parent_tag.appendChild(factory_attributes_tag)
-
-    def attributes_data_to_xml(self, doc, parent_tag, guid, data):
-        attributes_tag = doc.createElement("attributes") 
-        for name, value in data.get_attribute_data(guid):
-            if value is not None:
-                attribute_tag = doc.createElement("attribute") 
-                attributes_tag.appendChild(attribute_tag)
-                attribute_tag.setAttribute("name", name)
-                attribute_tag.setAttribute("value", xmlencode(value))
-                attribute_tag.setAttribute("type", self.type_to_standard(value))
-        if attributes_tag.hasChildNodes():
-            parent_tag.appendChild(attributes_tag)
-
-    def traces_data_to_xml(self, doc, parent_tag, guid, data):
-        traces_tag = doc.createElement("traces") 
-        for name in data.get_trace_data(guid):
-            trace_tag = doc.createElement("trace") 
-            traces_tag.appendChild(trace_tag)
-            trace_tag.setAttribute("name", name)
-        if traces_tag.hasChildNodes():
-            parent_tag.appendChild(traces_tag)
-
-    def addresses_data_to_xml(self, doc, parent_tag, guid, data):
-        addresses_tag = doc.createElement("addresses") 
-        for (address, netprefix, broadcast) in data.get_address_data(guid):
-            address_tag = doc.createElement("address") 
-            addresses_tag.appendChild(address_tag)
-            if address:
-                address_tag.setAttribute("Address", xmlencode(address))
-            address_tag.setAttribute("NetPrefix", xmlencode(netprefix))
-            if broadcast:
-                address_tag.setAttribute("Broadcast", xmlencode(broadcast))
-        if addresses_tag.hasChildNodes():
-            parent_tag.appendChild(addresses_tag)
-
-    def routes_data_to_xml(self, doc, parent_tag, guid, data):
-        routes_tag = doc.createElement("routes") 
-        for (destination, netprefix, nexthop, metric, device) \
-                in data.get_route_data(guid):
-            route_tag = doc.createElement("route") 
-            routes_tag.appendChild(route_tag)
-            route_tag.setAttribute("Destination", xmlencode(destination))
-            route_tag.setAttribute("NetPrefix", xmlencode(netprefix))
-            route_tag.setAttribute("NextHop", xmlencode(nexthop))
-            route_tag.setAttribute("Metric", xmlencode(metric))
-            route_tag.setAttribute("Device", xmlencode(device))
-        if routes_tag.hasChildNodes():
-            parent_tag.appendChild(routes_tag)
-
-    def connections_data_to_xml(self, doc, parent_tag, guid, data):
-        connections_tag = doc.createElement("connections") 
-        for (connector_type_name, other_guid, other_connector_type_name) \
-                in data.get_connection_data(guid):
-                connection_tag = doc.createElement("connection") 
-                connections_tag.appendChild(connection_tag)
-                connection_tag.setAttribute("connector", connector_type_name)
-                connection_tag.setAttribute("other_guid", xmlencode(other_guid))
-                connection_tag.setAttribute("other_connector",
-                        other_connector_type_name)
-        if connections_tag.hasChildNodes():
-            parent_tag.appendChild(connections_tag)
-
-    def from_xml_to_data(self, xml):
-        data = ExperimentData()
-        doc = minidom.parseString(xml)
-        testbeds_tag = doc.getElementsByTagName("testbeds")[0] 
-        testbed_tag_list = testbeds_tag.getElementsByTagName("testbed")
-        for testbed_tag in testbed_tag_list:
-            if testbed_tag.nodeType == doc.ELEMENT_NODE:
-                testbed_guid = int(testbed_tag.getAttribute("guid"))
-                elements_tag = testbed_tag.getElementsByTagName("elements")[0] 
-                elements_tag = testbed_tag.removeChild(elements_tag)
-                self.testbed_data_from_xml(testbed_tag, data)
-                element_tag_list = elements_tag.getElementsByTagName("element")
-                for element_tag in element_tag_list:
-                    if element_tag.nodeType == doc.ELEMENT_NODE:
-                        self.box_data_from_xml(element_tag, testbed_guid, data)
-        return data
-
-    def from_xml(self, experiment_description, xml):
-        data = self.from_xml_to_data(xml)
-        self.from_data(experiment_description, data)
-
-    def testbed_data_from_xml(self, tag, data):
-        testbed_guid = int(tag.getAttribute("guid"))
-        testbed_id = xmldecode(tag.getAttribute("testbed_id"))
-        testbed_version = xmldecode(tag.getAttribute("testbed_version"))
-        data.add_testbed_data(testbed_guid, testbed_id, testbed_version)
-        self.graphical_info_data_from_xml(tag, testbed_guid, data)
-        self.attributes_data_from_xml(tag, testbed_guid, data)
-
-    def box_data_from_xml(self, tag, testbed_guid, data):
-        guid = int(tag.getAttribute("guid"))
-        factory_id = xmldecode(tag.getAttribute("factory_id"))
-        data.add_box_data(guid, testbed_guid, factory_id)
-        self.graphical_info_data_from_xml(tag, guid, data)
-        self.factory_attributes_data_from_xml(tag, guid, data)
-        self.attributes_data_from_xml(tag, guid, data)
-        self.traces_data_from_xml(tag, guid, data)
-        self.addresses_data_from_xml(tag, guid, data)
-        self.routes_data_from_xml(tag, guid, data)
-        self.connections_data_from_xml(tag, guid, data)
-
-    def graphical_info_data_from_xml(self, tag, guid, data):
-        graphical_info_tag_list = tag.getElementsByTagName(
-                "graphical_info")
-        if len(graphical_info_tag_list) == 0:
-            return
-
-        graphical_info_tag = graphical_info_tag_list[0]
-        if graphical_info_tag.nodeType == tag.ELEMENT_NODE:
-            x = float(graphical_info_tag.getAttribute("x"))
-            y = float(graphical_info_tag.getAttribute("y"))
-            width = float(graphical_info_tag.getAttribute("width"))
-            height = float(graphical_info_tag.getAttribute("height"))
-            data.add_graphical_info_data(guid, x, y, width, height)
-
-    def factory_attributes_data_from_xml(self, tag, guid, data):
-        factory_attributes_tag_list = tag.getElementsByTagName(
-                "factory_attributes")
-        if len(factory_attributes_tag_list) == 0:
-            return
-
-        factory_attribute_tag_list = factory_attributes_tag_list[0].\
-                getElementsByTagName("factory_attribute")
-        for factory_attribute_tag in factory_attribute_tag_list:
-             if factory_attribute_tag.nodeType == tag.ELEMENT_NODE:
-                name = xmldecode(factory_attribute_tag.getAttribute("name"))
-                value = xmldecode(factory_attribute_tag.getAttribute("value"))
-                std_type = xmldecode(factory_attribute_tag.getAttribute("type"))
-                value = self.type_from_standard(std_type, value)
-                data.add_factory_attribute_data(guid, name, value)
-
-    def attributes_data_from_xml(self, tag, guid, data):
-        attributes_tag_list= tag.getElementsByTagName("attributes")
-        if len(attributes_tag_list) == 0:
-            return
-
-        attribute_tag_list = attributes_tag_list[0].\
-                getElementsByTagName("attribute")
-        for attribute_tag in attribute_tag_list:
-             if attribute_tag.nodeType == tag.ELEMENT_NODE:
-                name = xmldecode(attribute_tag.getAttribute("name"))
-                value = xmldecode(attribute_tag.getAttribute("value"))
-                std_type = xmldecode(attribute_tag.getAttribute("type"))
-                value = self.type_from_standard(std_type, value)
-                data.add_attribute_data(guid, name, value)
-
-    def traces_data_from_xml(self, tag, guid, data):
-        traces_tag_list = tag.getElementsByTagName("traces")
-        if len(traces_tag_list) == 0:
-            return
-
-        trace_tag_list = traces_tag_list[0].getElementsByTagName(
-                "trace")
-        for trace_tag in trace_tag_list:
-             if trace_tag.nodeType == tag.ELEMENT_NODE:
-                name = xmldecode(trace_tag.getAttribute("name"))
-                data.add_trace_data(guid, name)
-
-    def addresses_data_from_xml(self, tag, guid, data):
-        addresses_tag_list = tag.getElementsByTagName("addresses")
-        if len(addresses_tag_list) == 0:
-            return
-
-        address_tag_list = addresses_tag_list[0].\
-                getElementsByTagName("address")
-        for address_tag in address_tag_list:
-            if address_tag.nodeType == tag.ELEMENT_NODE:
-                address = xmldecode(address_tag.getAttribute("Address")) \
-                       if address_tag.hasAttribute("Address") else None
-                netprefix = int(address_tag.getAttribute("NetPrefix")) \
-                       if address_tag.hasAttribute("NetPrefix") else None
-                broadcast = xmldecode(address_tag.getAttribute("Broadcast")) \
-                       if address_tag.hasAttribute("Broadcast") else None
-                data.add_address_data(guid, address, netprefix, broadcast)
-
-    def routes_data_from_xml(self, tag, guid, data):
-        routes_tag_list = tag.getElementsByTagName("routes")
-        if len(routes_tag_list) == 0:
-            return
-
-        route_tag_list = routes_tag_list[0].getElementsByTagName("route")
-        for route_tag in route_tag_list:
-            if route_tag.nodeType == tag.ELEMENT_NODE:
-                destination = xmldecode(route_tag.getAttribute("Destination"))
-                netprefix = int(route_tag.getAttribute("NetPrefix"))
-                nexthop = xmldecode(route_tag.getAttribute("NextHop"))
-                metric = int(route_tag.getAttribute("Metric")) \
-                        if route_tag.hasAttribute("Metric") else 0
-                device = xmldecode(route_tag.getAttribute("Device"))
-                data.add_route_data(guid, destination, netprefix, 
-                        nexthop, metric, device)
-
-    def connections_data_from_xml(self, tag, guid, data):
-        connections_tag_list = tag.getElementsByTagName("connections")
-        if len(connections_tag_list) == 0:
-            return
-
-        connection_tag_list = connections_tag_list[0].getElementsByTagName(
-                "connection")
-        for connection_tag in connection_tag_list:
-             if connection_tag.nodeType == tag.ELEMENT_NODE:
-                 connector_type_name = xmldecode(connection_tag.getAttribute(
-                     "connector"))
-                 other_connector_type_name = xmldecode(connection_tag.getAttribute(
-                         "other_connector"))
-                 other_guid = int(connection_tag.getAttribute("other_guid"))
-                 data.add_connection_data(guid, connector_type_name, 
-                         other_guid, other_connector_type_name)
-
-    def type_to_standard(self, value):
-        if isinstance(value, str):
-            return Attribute.STRING
-        if isinstance(value, bool):
-            return Attribute.BOOL
-        if isinstance(value, int):
-            return Attribute.INTEGER
-        if isinstance(value, float):
-            return Attribute.DOUBLE
-    
-    def type_from_standard(self, type, value):
-        if type == Attribute.STRING:
-            return str(value)
-        if type == Attribute.BOOL:
-            return value == "True"
-        if type == Attribute.INTEGER:
-            return int(value)
-        if type == Attribute.DOUBLE:
-            return float(value)
-
diff --git a/src/nepi/util/parser/base.py b/src/nepi/util/parser/base.py
deleted file mode 100644 (file)
index eeaf594..0000000
+++ /dev/null
@@ -1,352 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import sys
-
-class ExperimentData(object):
-    def __init__(self):
-        self.data = dict()
-    
-    def __repr__(self):
-        return "%s(%r)" % (self.__class__, self.data)
-    
-    def __str__(self):
-        from pprint import pformat
-        return "%s:%s" % (self.__class__, pformat(self.data))
-
-    @property
-    def guids(self):
-        return self.data.keys()
-
-    def add_testbed_data(self, guid, testbed_id, testbed_version):
-        testbed_data = dict()
-        testbed_data["testbed_id"] = testbed_id
-        testbed_data["testbed_version"] = testbed_version
-        self.data[guid] = testbed_data
-
-    def add_box_data(self, guid, testbed_guid, factory_id):
-        box_data = dict()
-        box_data["testbed_guid"] = testbed_guid
-        box_data["factory_id"] = factory_id
-        self.data[guid] = box_data
-
-    def add_graphical_info_data(self, guid, x, y, width, height):
-        data = self.data[guid]
-        if not "graphical_info" in data:
-            data["graphical_info"] = dict()
-        graphical_info_data = data["graphical_info"]
-        graphical_info_data["x"] = x
-        graphical_info_data["y"] = y
-        graphical_info_data["width"] = width
-        graphical_info_data["height"] = height
-
-    def add_factory_attribute_data(self, guid, name, value):
-        data = self.data[guid]
-        if not "factory_attributes" in data:
-            data["factory_attributes"] = dict()
-        factory_attributes_data = data["factory_attributes"]
-        factory_attributes_data[name] = value
-
-    def add_attribute_data(self, guid, name, value):
-        data = self.data[guid]
-        if not "attributes" in data:
-            data["attributes"] = dict()
-        attributes_data = data["attributes"]
-        attributes_data[name] = value
-
-    def add_trace_data(self, guid, trace_name):
-        data = self.data[guid]
-        if not "traces" in data:
-            data["traces"] = list()
-        traces_data = data["traces"]
-        traces_data.append(trace_name)
-
-    def add_connection_data(self, guid, connector_type_name, other_guid,
-            other_connector_type_name):
-        data = self.data[guid]
-        if not "connections" in data:
-            data["connections"] = dict()
-        connections_data = data["connections"]
-        if not connector_type_name in connections_data:
-            connections_data[connector_type_name] = dict()
-        connection_data = connections_data[connector_type_name]
-        connection_data[other_guid] = other_connector_type_name
-
-    def add_address_data(self, guid, address, netprefix, 
-            broadcast):
-        data = self.data[guid]
-        if not "addresses" in data:
-            data["addresses"] = list()
-        addresses_data = data["addresses"]
-        address_data = dict()
-        if address:
-            address_data["Address"] = address
-        address_data["NetPrefix"] = netprefix
-        if broadcast:
-            address_data["Broadcast"] = broadcast
-        addresses_data.append(address_data)
-
-    def add_route_data(self, guid, destination, netprefix, nexthop, metric, device):
-        data = self.data[guid]
-        if not "routes" in data:
-            data["routes"] = list()
-        routes_data = data["routes"]
-        route_data = dict({
-            "Destination": destination,
-            "NetPrefix": netprefix, 
-            "NextHop": nexthop, 
-            "Metric": metric,
-            "Device": device
-            })
-        routes_data.append(route_data)
-
-    def is_testbed_data(self, guid):
-        return True if "testbed_id" in self.data[guid] else None
-
-    def get_testbed_data(self, guid):
-        testbed_data = self.data[guid]
-        return (testbed_data["testbed_id"], testbed_data["testbed_version"])
-
-    def get_box_data(self, guid):
-        box_data = self.data[guid]
-        return (box_data["testbed_guid"], box_data["factory_id"])
-
-    def get_graphical_info_data(self, guid):
-        data = self.data[guid]
-        if not "graphical_info" in data:
-            return (0, 0, 0, 0, "") 
-        graphical_info_data = data["graphical_info"]
-        return (graphical_info_data["x"],
-                graphical_info_data["y"],
-                graphical_info_data["width"],
-                graphical_info_data["height"])
-
-    def get_factory_attribute_data(self, guid):
-        data = self.data[guid]
-        if not "factory_attributes" in data:
-            return []
-        factory_attributes_data = data["factory_attributes"]
-        return [(name, value) for name, value \
-                in factory_attributes_data.iteritems()]
-
-    def get_attribute_data(self, guid, attribute=None, default=None):
-        data = self.data[guid]
-        if not "attributes" in data:
-            if attribute is None:
-                return []
-            else:
-                return None
-        attributes_data = data["attributes"]
-        if attribute is None:
-            return [(name, value) for name, value \
-                    in attributes_data.iteritems()]
-        else:
-            return attributes_data.get(attribute, default)
-
-    def set_attribute_data(self, guid, attribute, value):
-        data = self.data[guid]
-        if not "attributes" in data:
-            raise KeyError, "No attributes in reference OBJECT %r" % (guid,)
-        attributes_data = data["attributes"]
-        attributes_data[attribute] = value
-
-    def get_trace_data(self, guid):
-        data = self.data[guid]
-        if not "traces" in data:
-            return []
-        return data["traces"]
-
-    def get_connection_data(self, guid):
-        data = self.data[guid]
-        if not "connections" in data:
-            return []
-        connections_data = data["connections"]
-        return [(connector_type_name, other_guid, other_connector_type_name) \
-                    for connector_type_name, connection_data \
-                        in connections_data.iteritems() \
-                            for other_guid, other_connector_type_name \
-                                in connection_data.iteritems()]
-
-    def get_address_data(self, guid):
-        data = self.data[guid]
-        if not "addresses" in data:
-            return []
-        addresses_data = data["addresses"]
-        return [(data["Address"] if "Address" in data else None,
-                 data["NetPrefix"] if "NetPrefix" in data else None,
-                 data["Broadcast"] if "Broadcast" in data else None) \
-                 for data in addresses_data]
-
-    def get_route_data(self, guid):
-        data = self.data[guid]
-        if not "routes" in data:
-            return []
-        routes_data = data["routes"]
-        return [(data["Destination"],
-                 data["NetPrefix"],
-                 data["NextHop"],
-                 data["Metric"],
-                 data["Device"]) \
-                         for data in routes_data]
-
-class ExperimentParser(object):
-    def to_data(self, experiment_description):
-        data = ExperimentData()
-        for testbed_description in experiment_description.testbed_descriptions:
-            guid = testbed_description.guid
-            testbed_id = testbed_description.provider.testbed_id
-            testbed_version = testbed_description.provider.testbed_version
-            data.add_testbed_data(guid, testbed_id, testbed_version)
-            self.graphical_info_to_data(data, guid, 
-                    testbed_description.graphical_info)
-            self.attributes_to_data(data, guid, testbed_description.attributes)
-            for box in testbed_description.boxes:
-                data.add_box_data(box.guid, guid, box.factory_id)
-                self.graphical_info_to_data(data, box.guid, box.graphical_info)
-                self.factory_attributes_to_data(data, box.guid, 
-                        box.factory_attributes)
-                self.attributes_to_data(data, box.guid, box.attributes)
-                self.traces_to_data(data, box.guid, box.traces)
-                self.connections_to_data(data, box.guid, box.connectors)
-                if hasattr(box, "addresses"):
-                    self.addresses_to_data(data, box.guid, box.addresses)
-                if hasattr(box, "routes"):
-                    self.routes_to_data(data, box.guid, box.routes)
-        return data
-
-    def graphical_info_to_data(self, data, guid, g_info):
-        data.add_graphical_info_data(guid, g_info.x, g_info.y, g_info.width, 
-                g_info.height)
-
-    def factory_attributes_to_data(self, data, guid, factory_attributes):
-        factory_attributes = factory_attributes or dict()
-        for name, value in factory_attributes.iteritems():
-            data.add_factory_attribute_data(guid, name, value)
-
-    def attributes_to_data(self, data, guid, attributes):
-        for attribute in attributes:
-            if attribute.modified or attribute.has_no_default_value:
-                data.add_attribute_data(guid, attribute.name, attribute.value)
-
-    def traces_to_data(self, data, guid, traces):
-        for trace in traces:
-            if trace.enabled:
-                data.add_trace_data(guid, trace.name)
-
-    def connections_to_data(self, data, guid, connectors):
-        for connector in connectors:
-            connector_type_name = connector.connector_type.name
-            for other_connector in connector.connections:
-                other_guid = other_connector.box.guid
-                other_connector_type_name = other_connector.connector_type.name
-                data.add_connection_data(guid, connector_type_name, other_guid,
-                        other_connector_type_name)
-
-    def addresses_to_data(self, data, guid, addresses):
-        for addr in addresses:
-             address = addr.get_attribute_value("Address")
-             netprefix = addr.get_attribute_value("NetPrefix")
-             broadcast = addr.get_attribute_value("Broadcast") \
-                    if addr.has_attribute("Broadcast") and \
-                    addr.is_attribute_modified("Broadcast") else None
-             data.add_address_data(guid, address, netprefix, broadcast)
-
-    def routes_to_data(self, data, guid, routes):
-        for route in routes:
-             destination = route.get_attribute_value("Destination")
-             netprefix = route.get_attribute_value("NetPrefix")
-             nexthop = route.get_attribute_value("NextHop")
-             metric = route.get_attribute_value("Metric")
-             device = route.get_attribute_value("Device")
-             data.add_route_data(guid, destination, netprefix, nexthop, 
-                     metric, device)
-
-    def from_data(self, experiment_description, data):
-        box_guids = list()
-        for guid in sorted(data.guids):
-            if data.is_testbed_data(guid):
-                self.testbed_from_data(experiment_description, guid, data)
-            else:
-                self.box_from_data(experiment_description, guid, data)
-                box_guids.append(guid)
-        self.connections_from_data(experiment_description, box_guids, data)
-
-    def testbed_from_data(self, experiment_description, guid, data):
-        from nepi.core.design import FactoriesProvider
-        (testbed_id, testbed_version) = data.get_testbed_data(guid)
-        provider = FactoriesProvider(testbed_id)
-        if provider.testbed_version != testbed_version:
-            raise RuntimeError("Bad testbed version on testbed %s. Asked for %s, got %s" % \
-                    (testbed_id, testbed_version, provider.testbed_version))
-        experiment_description.add_testbed_description(provider, guid)
-        testbed_description = experiment_description.testbed_description(guid)
-        self.graphical_info_from_data(testbed_description, data)
-        self.attributes_from_data(testbed_description, data)
-
-    def box_from_data(self, experiment_description, guid, data):
-        (testbed_guid, factory_id) = data.get_box_data(guid)
-        testbed_description = experiment_description.testbed_description(
-                testbed_guid)
-        self.factory_attributes_from_data(testbed_description, factory_id,
-                guid, data)
-        box = testbed_description.create(factory_id, guid)
-
-        self.graphical_info_from_data(box, data)
-        self.attributes_from_data(box, data)
-        self.traces_from_data(box, data)
-        self.addresses_from_data(box, data)
-        self.routes_from_data(box, data)
-
-    def graphical_info_from_data(self, element, data):
-        (x, y, width, height) =  data.get_graphical_info_data(
-                element.guid)
-        element.graphical_info.x = x
-        element.graphical_info.y = y
-        element.graphical_info.width = width
-        element.graphical_info.height = height
-
-    def factory_attributes_from_data(self, testbed_description, factory_id, 
-            guid, data):
-        factory = testbed_description.provider.factory(factory_id)
-        for (name, value) in data.get_factory_attribute_data(guid):
-            factory.set_attribute_value(name, value)
-
-    def attributes_from_data(self, element, data):
-        for name, value in data.get_attribute_data(element.guid):
-            element.set_attribute_value(name, value)
-
-    def traces_from_data(self, box, data):
-        for name in data.get_trace_data(box.guid):
-            box.enable_trace(name)
-
-    def addresses_from_data(self, box, data):
-        for (address, netprefix, broadcast) in data.get_address_data(box.guid):
-            addr = box.add_address()
-            if address:
-                addr.set_attribute_value("Address", address)
-            if netprefix != None:
-                addr.set_attribute_value("NetPrefix", netprefix)
-            if broadcast:
-                addr.set_attribute_value("Broadcast", broadcast)
-
-    def routes_from_data(self, box, data):
-         for (destination, netprefix, nexthop, metric, device) \
-                 in data.get_route_data(box.guid):
-            addr = box.add_route()
-            addr.set_attribute_value("Destination", destination)
-            addr.set_attribute_value("NetPrefix", netprefix)
-            addr.set_attribute_value("NextHop", nexthop)
-            addr.set_attribute_value("Metric", metric)
-            addr.set_attribute_value("Device", device)
-
-    def connections_from_data(self, experiment_description, guids, data):
-        for guid in guids:
-            box = experiment_description.box(guid)
-            for (connector_type_name, other_guid, other_connector_type_name) \
-                    in data.get_connection_data(guid):
-                    other_box = experiment_description.box(other_guid)
-                    connector = box.connector(connector_type_name)
-                    other_connector = other_box.connector(
-                            other_connector_type_name)
-                    if not connector.is_connected(other_connector):
-                        connector.connect(other_connector)
-
diff --git a/src/nepi/util/parser/sfa.py b/src/nepi/util/parser/sfa.py
deleted file mode 100644 (file)
index bb83508..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from xml.dom import minidom
-
-import sys
-
-def xmlencode(s):
-    if isinstance(s, str):
-        rv = s.decode("latin1")
-    elif not isinstance(s, unicode):
-        rv = unicode(s)
-    else:
-        rv = s
-    return rv.replace(u'\x00',u'&#0000;')
-
-def xmldecode(s):
-    return s.replace(u'&#0000',u'\x00').encode("utf8")
-
-def get_child_text(tag, name):
-    tags = tag.getElementsByTagName(name)
-    if not tags:
-        return ""
-    return get_text(tags[0])
-
-def get_name(tag):
-    return xmldecode(tag.tagName)
-
-def get_text(tag):
-    text = ''.join(t.nodeValue for t in tag.childNodes if t.nodeType == t.TEXT_NODE)
-    return xmldecode(text)
-
-def set_text(doc, tag, text):
-    ttag = doc.createTextNode(text)
-    tag.appendChild(ttag)
-
-def get_attribute(p_tag, name):
-    return xmldecode(p_tag.getAttribute(name))
-
-def has_sliver(node_tag):
-    sliver_tag = node_tag.getElementsByTagName("sliver")
-    return len(sliver_tag) > 0 
-
-class SFAResourcesParser(object):
-    def resources_from_xml(self, xml):
-        data = dict()
-        doc = minidom.parseString(xml)
-        rspec_tag = doc.getElementsByTagName("RSpec")[0]
-        network_tags = rspec_tag.getElementsByTagName("network")
-        for network_tag in network_tags:
-            if network_tag.nodeType == doc.ELEMENT_NODE:
-                node_data = self.nodes_from_xml(doc, network_tag)
-                data.update(node_data)
-        return data
-
-    def slice_info_from_xml(self, xml):
-        nodes_data = dict()
-        doc = minidom.parseString(xml)
-        rspec_tag = doc.getElementsByTagName("RSpec")[0]
-        network_tags = rspec_tag.getElementsByTagName("network")
-        for network_tag in network_tags:
-            if network_tag.nodeType == doc.ELEMENT_NODE:
-                node_data = self.nodes_from_xml(doc, network_tag, in_sliver = True)
-                nodes_data.update(node_data)
-        nodes_data = set(nodes_data.keys())
-        tags_data = self.slice_tags_from_xml(doc, rspec_tag)
-        return tags_data, nodes_data
-
-    def nodes_from_xml(self, doc, network_tag, in_sliver = False):
-        nodes_data = dict()
-        network_name = get_attribute(network_tag, 'name')
-        node_tags = network_tag.getElementsByTagName('node')
-        for node_tag in node_tags:
-            if node_tag.nodeType == doc.ELEMENT_NODE:
-                if in_sliver and not has_sliver(node_tag):
-                    continue
-                node_data = dict()
-                node_data['network_name'] = network_name
-                node_name = get_attribute(node_tag, 'component_name')
-                nodes_data[node_name] = node_data
-                for name in ['component_id', 'component_manager_id',
-                        'boot_state', 'component_name', 'site_id']:
-                    node_data[name] = get_attribute(node_tag, name)
-                location_tag = node_tag.getElementsByTagName('location')
-                if location_tag:
-                    for name in ['longitud' , 'latitude']:
-                        node_data[name] = get_attribute(location_tag[0], name)
-                for name in ['hostname', 'pldistro', 'arch', 'fcdistro',
-                        'stype', 'reliabilityw', 'loadm', 'cpuy', 'cpum', 
-                        'slicesm', 'slicesw', 'cpuw', 'loady', 'memy',
-                        'memw', 'reliabilityy', 'reliability', 'reliabilitym', 
-                        'responsey', 'bww', 'memem', 'bwm', 'slicey', 'responsem', 
-                        'response', 'loadw', 'country', 'load', 'mem', 'slices',
-                        'region', 'asnumber', 'bw', 'hrn', 'city', 'responsew', 
-                        'bwy', 'cpu']:
-                    node_data[name] = get_child_text(node_tag, name)
-                iface_tags =  node_tag.getElementsByTagName('interface')
-                ifaces_data = dict()
-                iface_ids = list()
-                for iface_tag in iface_tags: 
-                    if iface_tag.nodeType == doc.ELEMENT_NODE:
-                        for name in ['component_id', 'ipv4']:
-                            value = get_attribute(iface_tag, name)
-                            ifaces_data[name] = value
-                            if name == 'component_id':
-                                iface_ids.append(value)
-                node_data['interfaces'] = ifaces_data
-                node_data['interface_ids'] = iface_ids
-        return nodes_data
-
-    def slice_tags_from_xml(self, doc, rspec_tag):
-        tags_data = dict()
-        sliver_tag = rspec_tag.getElementsByTagName('sliver_defaults')
-        if len(sliver_tag) == 0:
-            return tags_data
-        for child_tag in sliver_tag[0].childNodes:
-            if child_tag.nodeType == doc.ELEMENT_NODE:
-                name = get_name(child_tag)
-                value = get_text(child_tag)
-                tags_data[name] = value
-        return tags_data
-
-    def create_slice_xml(self, node_data, slice_tags):
-        doc = minidom.Document()
-        rspec_tag = doc.createElement("RSpec")
-        doc.appendChild(rspec_tag)
-        rspec_tag.setAttribute("type", "SFA")
-        slice_defaults_tag = self.slice_defaults_xml(doc, slice_tags)
-        
-        networks = dict()
-        for k, data in node_data.iteritems():
-            network_name = data["network_name"]
-            if network_name not in networks:
-                networks[network_name] = dict()
-            networks[network_name][k] = data
-
-        for n, netdata in networks.iteritems():
-            network_tag = doc.createElement("testbeds")
-            network_tag.setAttribute("name", n)
-            rspec_tag.appendChild(network_tag)
-            for k, data in netdata.iteritems():
-                node_tag = doc.createElement("node")
-                node_tag.setAttribute("component_manager_id", data["component_manager_id"])
-                node_tag.setAttribute("component_id", data["component_id"])
-                node_tag.setAttribute("component_name", data["component_name"])
-                node_tag.setAttribute("boot_state", data["boot_state"])
-                node_tag.setAttribute("site_id", data["site_id"])
-                hostname_tag = doc.createElement("hostname")
-                set_text(doc, hostname_tag, data["hostname"])
-                node_tag.appendChild(hostname_tag)
-                sliver_tag = doc.createElement("sliver")
-                node_tag.appendChild(sliver_tag)
-                network_tag.appendChild(node_tag)
-            network_tag.appendChild(slice_defaults_tag)
-        return doc.toxml()
-
-    def slice_defaults_xml(self, doc, slice_tags):
-        slice_defaults_tag = doc.createElement("sliver_defaults")
-        for name, value in slice_tags.iteritems():
-            tag = doc.createElement(name)
-            set_text(doc, tag, value)
-            slice_defaults_tag.appendChild(tag)
-        return slice_defaults_tag
-
-"""
-if __name__ == "__main__":
-    path = sys.argv[1]
-    fd = open(path, 'r')
-    xml = fd.read()
-    fd.close()
-    p = SFAResourcesParser()
-    tags, nodes = p.slice_info_from_xml(xml)
-    print tags, nodes
-"""
diff --git a/src/nepi/util/plot.py b/src/nepi/util/plot.py
new file mode 100644 (file)
index 0000000..2959639
--- /dev/null
@@ -0,0 +1,30 @@
+import networkx
+import tempfile
+
+class Plotter(object):
+    def __init__(self, box):
+        self._graph = networkx.Graph(graph = dict(overlap = "false"))
+
+        traversed = set()
+        self._traverse_boxes(traversed, box)
+
+    def _traverse_boxes(self, traversed, box):
+        traversed.add(box.guid)
+
+        self._graph.add_node(box.label, 
+                width = 50/72.0, # 1 inch = 72 points
+                height = 50/72.0, 
+                shape = "circle")
+
+        for b in box.connections:
+            self._graph.add_edge(box.label, b.label)
+            if b.guid not in traversed:
+                self._traverse_boxes(traversed, b)
+
+    def plot(self):
+        f = tempfile.NamedTemporaryFile(delete=False)
+        networkx.draw_graphviz(self._graph)
+        networkx.write_dot(self._graph, f.name)
+        f.close()
+        return f.name
+
diff --git a/src/nepi/util/proxy.py b/src/nepi/util/proxy.py
deleted file mode 100644 (file)
index 4226bac..0000000
+++ /dev/null
@@ -1,1385 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import base64
-import nepi.core.execute
-import nepi.util.environ
-from nepi.core.attributes import AttributesMap, Attribute
-from nepi.util import server, validation
-from nepi.util.constants import TIME_NOW, ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP, DeploymentConfiguration as DC
-import getpass
-import cPickle
-import sys
-import time
-import tempfile
-import shutil
-import functools
-import os
-
-# PROTOCOL REPLIES
-OK = 0
-ERROR = 1
-
-# PROTOCOL INSTRUCTION MESSAGES
-XML = 2 
-TRACE   = 4
-FINISHED    = 5
-START   = 6
-STOP    = 7
-SHUTDOWN    = 8
-CONFIGURE   = 9
-CREATE      = 10
-CREATE_SET  = 11
-FACTORY_SET = 12
-CONNECT     = 13
-CROSS_CONNECT   = 14
-ADD_TRACE   = 15
-ADD_ADDRESS = 16
-ADD_ROUTE   = 17
-DO_SETUP    = 18
-DO_CREATE   = 19
-DO_CONNECT_INIT = 20
-DO_CONFIGURE    = 21
-DO_CROSS_CONNECT_INIT   = 22
-GET = 23
-SET = 24
-ACTION  = 25
-STATUS  = 26
-GUIDS  = 27
-GET_ROUTE = 28
-GET_ADDRESS = 29
-RECOVER = 30
-DO_PRECONFIGURE     = 31
-GET_ATTRIBUTE_LIST  = 32
-DO_CONNECT_COMPL    = 33
-DO_CROSS_CONNECT_COMPL  = 34
-TESTBED_ID  = 35
-TESTBED_VERSION  = 36
-DO_PRESTART = 37
-GET_FACTORY_ID = 38
-GET_TESTBED_ID = 39
-GET_TESTBED_VERSION = 40
-TRACES_INFO = 41
-EXEC_XML = 42
-TESTBED_STATUS  = 43
-STARTED_TIME  = 44
-STOPPED_TIME  = 45
-CURRENT = 46
-ACCESS_CONFIGURATIONS = 47
-CURRENT_ACCESS_CONFIG = 48
-
-
-instruction_text = dict({
-    OK:     "OK",
-    ERROR:  "ERROR",
-    XML:    "XML",
-    EXEC_XML:    "EXEC_XML",
-    TRACE:  "TRACE",
-    FINISHED:   "FINISHED",
-    START:  "START",
-    STOP:   "STOP",
-    RECOVER: "RECOVER",
-    SHUTDOWN:   "SHUTDOWN",
-    CONFIGURE:  "CONFIGURE",
-    CREATE: "CREATE",
-    CREATE_SET: "CREATE_SET",
-    FACTORY_SET:    "FACTORY_SET",
-    CONNECT:    "CONNECT",
-    CROSS_CONNECT: "CROSS_CONNECT",
-    ADD_TRACE:  "ADD_TRACE",
-    ADD_ADDRESS:    "ADD_ADDRESS",
-    ADD_ROUTE:  "ADD_ROUTE",
-    DO_SETUP:   "DO_SETUP",
-    DO_CREATE:  "DO_CREATE",
-    DO_CONNECT_INIT: "DO_CONNECT_INIT",
-    DO_CONNECT_COMPL: "DO_CONNECT_COMPL",
-    DO_CONFIGURE:   "DO_CONFIGURE",
-    DO_PRECONFIGURE:   "DO_PRECONFIGURE",
-    DO_CROSS_CONNECT_INIT:  "DO_CROSS_CONNECT_INIT",
-    DO_CROSS_CONNECT_COMPL: "DO_CROSS_CONNECT_COMPL",
-    GET:    "GET",
-    SET:    "SET",
-    GET_ROUTE: "GET_ROUTE",
-    GET_ADDRESS: "GET_ADDRESS",
-    GET_ATTRIBUTE_LIST: "GET_ATTRIBUTE_LIST",
-    GET_FACTORY_ID: "GET_FACTORY_ID",
-    GET_TESTBED_ID: "GET_TESTBED_ID",
-    GET_TESTBED_VERSION: "GET_TESTBED_VERSION",
-    ACTION: "ACTION",
-    STATUS: "STATUS",
-    GUIDS:  "GUIDS",
-    TESTBED_ID: "TESTBED_ID",
-    TESTBED_VERSION: "TESTBED_VERSION",
-    TRACES_INFO: "TRACES_INFO",
-    STARTED_TIME: "STARTED_TIME",
-    STOPPED_TIME: "STOPPED_TIME",
-    CURRENT: "CURRENT",
-    ACCESS_CONFIGURATIONS: "ACCESS_CONFIGURATIONS",
-    CURRENT_ACCESS_CONFIG: "CURRENT_ACCESS_CONFIG"
-
-    })
-
-def log_msg(server, params):
-    try:
-        instr = int(params[0])
-        instr_txt = instruction_text[instr]
-        server.log_debug("%s - msg: %s [%s]" % (server.__class__.__name__, 
-            instr_txt, ", ".join(map(str, params[1:]))))
-    except:
-        # don't die for logging
-        pass
-
-def log_reply(server, reply):
-    try:
-        res = reply.split("|")
-        code = int(res[0])
-        code_txt = instruction_text[code]
-        try:
-            txt = base64.b64decode(res[1])
-        except:
-            txt = res[1]
-        server.log_debug("%s - reply: %s %s" % (server.__class__.__name__, 
-                code_txt, txt))
-    except:
-        # don't die for logging
-        server.log_debug("%s - reply: %s" % (server.__class__.__name__, 
-                reply))
-        pass
-
-def to_server_log_level(log_level):
-    return (
-        DC.DEBUG_LEVEL
-            if log_level == DC.DEBUG_LEVEL 
-        else DC.ERROR_LEVEL
-    )
-
-def get_access_config_params(access_config):
-    mode = access_config.get_attribute_value(DC.DEPLOYMENT_MODE)
-    launch = not access_config.get_attribute_value(DC.RECOVER)
-    root_dir = access_config.get_attribute_value(DC.ROOT_DIRECTORY)
-    log_level = access_config.get_attribute_value(DC.LOG_LEVEL)
-    log_level = to_server_log_level(log_level)
-    communication = access_config.get_attribute_value(DC.DEPLOYMENT_COMMUNICATION)
-    environment_setup = (
-        access_config.get_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP)
-        if access_config.has_attribute(DC.DEPLOYMENT_ENVIRONMENT_SETUP)
-        else ""
-    )
-    user = access_config.get_attribute_value(DC.DEPLOYMENT_USER)
-    host = access_config.get_attribute_value(DC.DEPLOYMENT_HOST)
-    port = access_config.get_attribute_value(DC.DEPLOYMENT_PORT)
-    agent = access_config.get_attribute_value(DC.USE_AGENT)
-    sudo = access_config.get_attribute_value(DC.USE_SUDO)
-    key = access_config.get_attribute_value(DC.DEPLOYMENT_KEY)
-    communication = access_config.get_attribute_value(DC.DEPLOYMENT_COMMUNICATION)
-    clean_root = access_config.get_attribute_value(DC.CLEAN_ROOT)
-    return (mode, launch, root_dir, log_level, communication, user, host, port,
-            key, agent, sudo, environment_setup, clean_root)
-
-class AccessConfiguration(AttributesMap):
-    def __init__(self, params = None):
-        super(AccessConfiguration, self).__init__()
-        
-        from nepi.core.metadata import Metadata
-        
-        for _,attr_info in Metadata.PROXY_ATTRIBUTES.iteritems():
-            self.add_attribute(**attr_info)
-
-        if params:
-            for attr_name, attr_value in params.iteritems():
-                parser = Attribute.type_parsers[self.get_attribute_type(attr_name)]
-                attr_value = parser(attr_value)
-                self.set_attribute_value(attr_name, attr_value)
-
-class TempDir(object):
-    def __init__(self):
-        self.path = tempfile.mkdtemp()
-    
-    def __del__(self):
-        shutil.rmtree(self.path)
-
-class PermDir(object):
-    def __init__(self, path):
-        self.path = path
-
-def create_experiment_suite(xml, access_config, repetitions = None,
-        duration = None, wait_guids = None):
-    mode = None
-    if access_config :
-        (mode, launch, root_dir, log_level, communication, user, host, port, 
-                key, agent, sudo, environment_setup, clean_root) \
-                        = get_access_config_params(access_config)
-
-    if not mode or mode == DC.MODE_SINGLE_PROCESS:
-        from nepi.core.execute import ExperimentSuite
-        if not root_dir:
-            root_dir = TempDir()
-        else:
-            root_dir = PermDir(access_config.get_attribute_value(DC.ROOT_DIRECTORY))
-
-        exp_suite = ExperimentSuite(xml, access_config, repetitions, duration,
-                wait_guids)
-        
-        # inject reference to temporary dir, so that it gets cleaned
-        # up at destruction time.
-        exp_suite._tempdir = root_dir
-        return exp_suite
-    elif mode == DC.MODE_DAEMON:
-        return ExperimentSuiteProxy(root_dir, log_level,
-                xml,
-                repetitions = repetitions, 
-                duration = duration,
-                wait_guids = wait_guids, 
-                communication = communication,
-                host = host, 
-                port = port, 
-                user = user, 
-                ident_key = key,
-                agent = agent, 
-                sudo = sudo, 
-                environment_setup = environment_setup, 
-                clean_root = clean_root)
-    raise RuntimeError("Unsupported access configuration '%s'" % mode)
-
-def create_experiment_controller(xml, access_config = None):
-    mode = None
-    launch = True
-    log_level = DC.ERROR_LEVEL
-    if access_config:
-        (mode, launch, root_dir, log_level, communication, user, host, port, 
-                key, agent, sudo, environment_setup, clean_root) \
-                        = get_access_config_params(access_config)
-
-    os.environ["NEPI_CONTROLLER_LOGLEVEL"] = log_level
-
-    if not mode or mode == DC.MODE_SINGLE_PROCESS:
-        from nepi.core.execute import ExperimentController
-        
-        if not access_config or not access_config.has_attribute(DC.ROOT_DIRECTORY):
-            root_dir = TempDir()
-        else:
-            root_dir = PermDir(access_config.get_attribute_value(DC.ROOT_DIRECTORY))
-        controller = ExperimentController(xml, root_dir.path)
-        
-        # inject reference to temporary dir, so that it gets cleaned
-        # up at destruction time.
-        controller._tempdir = root_dir
-        
-        if not launch:
-            # try to recover
-            controller.recover()
-        
-        return controller
-    elif mode == DC.MODE_DAEMON:
-        try:
-            return ExperimentControllerProxy(root_dir, log_level,
-                experiment_xml = xml,
-                communication = communication,
-                host = host, 
-                port = port, 
-                user = user, 
-                ident_key = key,
-                agent = agent, 
-                sudo = sudo, 
-                launch = launch,
-                environment_setup = environment_setup, 
-                clean_root = clean_root)
-        except:
-            if not launch:
-                # Maybe controller died, recover from persisted testbed information if possible
-                controller = ExperimentControllerProxy(root_dir, log_level,
-                    experiment_xml = xml,
-                    communication = communication,
-                    host = host, 
-                    port = port, 
-                    user = user, 
-                    ident_key = key,
-                    agent = agent, 
-                    sudo = sudo, 
-                    launch = True,
-                    environment_setup = environment_setup,
-                    clean_root = clean_root)
-                controller.recover()
-                return controller
-            else:
-                raise
-    raise RuntimeError("Unsupported access configuration '%s'" % mode)
-
-def create_testbed_controller(testbed_id, testbed_version, access_config):
-    mode = None
-    launch = True
-    log_level = DC.ERROR_LEVEL
-    if access_config:
-        (mode, launch, root_dir, log_level, communication, user, host, port, 
-                key, agent, sudo, environment_setup, clean_root) \
-                        = get_access_config_params(access_config)
-
-    os.environ["NEPI_CONTROLLER_LOGLEVEL"] = log_level
-    
-    if not mode or mode == DC.MODE_SINGLE_PROCESS:
-        if not launch:
-            raise ValueError, "Unsupported instantiation mode: %s with launch=False" % (mode,)
-        return  _build_testbed_controller(testbed_id, testbed_version)
-    elif mode == DC.MODE_DAEMON:
-        return TestbedControllerProxy(root_dir, log_level, 
-                testbed_id = testbed_id, 
-                testbed_version = testbed_version,
-                communication = communication,
-                host = host, 
-                port = port, 
-                ident_key = key,
-                user = user, 
-                agent = agent, 
-                sudo = sudo, 
-                launch = launch,
-                environment_setup = environment_setup, 
-                clean_root = clean_root)
-    raise RuntimeError("Unsupported access configuration '%s'" % mode)
-
-def _build_testbed_controller(testbed_id, testbed_version):
-    mod_name = nepi.util.environ.find_testbed(testbed_id)
-    
-    if not mod_name in sys.modules:
-        try:
-            __import__(mod_name)
-        except ImportError:
-            raise ImportError, "Cannot find module %s in %r" % (mod_name, sys.path)
-    
-    module = sys.modules[mod_name]
-    tc = module.TestbedController()
-    if tc.testbed_version != testbed_version:
-        raise RuntimeError("Bad testbed version on testbed %s. Asked for %s, got %s" % \
-                (testbed_id, testbed_version, tc.testbed_version))
-    return tc
-
-# Just a namespace class
-class Marshalling:
-    class Decoders:
-        @staticmethod
-        def pickled_data(sdata):
-            return cPickle.loads(base64.b64decode(sdata))
-        
-        @staticmethod
-        def base64_data(sdata):
-            return base64.b64decode(sdata)
-        
-        @staticmethod
-        def nullint(sdata):
-            return None if sdata == "None" else int(sdata)
-
-        @staticmethod
-        def bool(sdata):
-            return sdata == 'True'
-        
-    class Encoders:
-        @staticmethod
-        def pickled_data(data):
-            return base64.b64encode(cPickle.dumps(data))
-        
-        @staticmethod
-        def base64_data(data):
-            if not data:
-                return ""
-            return base64.b64encode(data)
-        
-        @staticmethod
-        def nullint(data):
-            return "None" if data is None else int(data)
-        
-        @staticmethod
-        def bool(data):
-            return str(bool(data))
-           
-    # import into Marshalling all the decoders
-    # they act as types
-    locals().update([
-        (typname, typ)
-        for typname, typ in vars(Decoders).iteritems()
-        if not typname.startswith('_')
-    ])
-
-    _TYPE_ENCODERS = dict([
-        # id(type) -> (<encoding_function>, <formatting_string>)
-        (typname, (getattr(Encoders,typname),"%s"))
-        for typname in vars(Decoders)
-        if not typname.startswith('_')
-           and hasattr(Encoders,typname)
-    ])
-
-    # Builtins
-    _TYPE_ENCODERS["float"] = (float, "%r")
-    _TYPE_ENCODERS["int"] = (int, "%d")
-    _TYPE_ENCODERS["long"] = (int, "%d")
-    _TYPE_ENCODERS["str"] = (str, "%s")
-    _TYPE_ENCODERS["unicode"] = (str, "%s")
-    
-    # Generic encoder
-    _TYPE_ENCODERS[None] = (str, "%s")
-    
-    @staticmethod
-    def args(*types):
-        """
-        Decorator that converts the given function into one that takes
-        a single "params" list, with each parameter marshalled according
-        to the given factory callable (type constructors are accepted).
-        
-        The first argument (self) is left untouched.
-        
-        eg:
-        
-        @Marshalling.args(int,int,str,base64_data)
-        def somefunc(self, someint, otherint, somestr, someb64):
-           return someretval
-        """
-        def decor(f):
-            @functools.wraps(f)
-            def rv(self, params):
-                return f(self, *[ ctor(val)
-                                  for ctor,val in zip(types, params[1:]) ])
-            
-            rv._argtypes = types
-            
-            # Derive type encoders by looking up types in _TYPE_ENCODERS
-            # make_proxy will use it to encode arguments in command strings
-            argencoders = []
-            TYPE_ENCODERS = Marshalling._TYPE_ENCODERS
-            for typ in types:
-                if typ.__name__ in TYPE_ENCODERS:
-                    argencoders.append(TYPE_ENCODERS[typ.__name__])
-                else:
-                    # generic encoder
-                    argencoders.append(TYPE_ENCODERS[None])
-            
-            rv._argencoders = tuple(argencoders)
-            
-            rv._retval = getattr(f, '_retval', None)
-            return rv
-        return decor
-
-    @staticmethod
-    def retval(typ=Decoders.base64_data):
-        """
-        Decorator that converts the given function into one that 
-        returns a properly encoded return string, given that the undecorated
-        function returns suitable input for the encoding function.
-        
-        The optional typ argument specifies a type.
-        For the default of base64_data, return values should be strings.
-        The return value of the encoding method should be a string always.
-        
-        eg:
-        
-        @Marshalling.args(int,int,str,base64_data)
-        @Marshalling.retval(str)
-        def somefunc(self, someint, otherint, somestr, someb64):
-           return someint
-        """
-        encode, fmt = Marshalling._TYPE_ENCODERS.get(
-            typ.__name__,
-            Marshalling._TYPE_ENCODERS[None])
-        fmt = "%d|"+fmt
-        
-        def decor(f):
-            @functools.wraps(f)
-            def rv(self, *p, **kw):
-                data = f(self, *p, **kw)
-                return fmt % (
-                    OK,
-                    encode(data)
-                )
-            rv._retval = typ
-            rv._argtypes = getattr(f, '_argtypes', None)
-            rv._argencoders = getattr(f, '_argencoders', None)
-            return rv
-        return decor
-    
-    @staticmethod
-    def retvoid(f):
-        """
-        Decorator that converts the given function into one that 
-        always return an encoded empty string.
-        
-        Useful for null-returning functions.
-        """
-        OKRV = "%d|" % (OK,)
-        
-        @functools.wraps(f)
-        def rv(self, *p, **kw):
-            f(self, *p, **kw)
-            return OKRV
-        
-        rv._retval = None
-        rv._argtypes = getattr(f, '_argtypes', None)
-        rv._argencoders = getattr(f, '_argencoders', None)
-        return rv
-    
-    @staticmethod
-    def handles(whichcommand):
-        """
-        Associates the method with a given command code for servers.
-        It should always be the topmost decorator.
-        """
-        def decor(f):
-            f._handles_command = whichcommand
-            return f
-        return decor
-
-class BaseServer(server.Server):
-    def reply_action(self, msg):
-        if not msg:
-            result = base64.b64encode("Invalid command line")
-            reply = "%d|%s" % (ERROR, result)
-        else:
-            params = msg.split("|")
-            instruction = int(params[0])
-            log_msg(self, params)
-            try:
-                for mname,meth in vars(self.__class__).iteritems():
-                    if not mname.startswith('_'):
-                        cmd = getattr(meth, '_handles_command', None)
-                        if cmd == instruction:
-                            meth = getattr(self, mname)
-                            reply = meth(params)
-                            break
-                else:
-                    error = "Invalid instruction %s" % instruction
-                    self.log_error(error)
-                    result = base64.b64encode(error)
-                    reply = "%d|%s" % (ERROR, result)
-            except:
-                error = self.log_error()
-                result = base64.b64encode(error)
-                reply = "%d|%s" % (ERROR, result)
-        log_reply(self, reply)
-        return reply
-
-class ExperimentSuiteServer(BaseServer):
-    def __init__(self, root_dir, log_level, 
-            xml, repetitions, duration, wait_guids, 
-            communication = DC.ACCESS_LOCAL,
-            host = None, 
-            port = None, 
-            user = None, 
-            ident_key = None, 
-            agent = None,
-            sudo = False, 
-            environment_setup = "", 
-            clean_root = False):
-        super(ExperimentSuiteServer, self).__init__(root_dir, log_level, 
-            environment_setup = environment_setup, clean_root = clean_root)
-        access_config = AccessConfiguration()
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, root_dir)
-        access_config.set_attribute_value(DC.LOG_LEVEL, log_level)
-        access_config.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, environment_setup)
-        if user:
-            access_config.set_attribute_value(DC.DEPLOYMENT_USER, user)
-        if host:
-            access_config.set_attribute_value(DC.DEPLOYMENT_HOST, host)
-        if port:
-            access_config.set_attribute_value(DC.DEPLOYMENT_PORT, port)
-        if agent:    
-            access_config.set_attribute_value(DC.USE_AGENT, agent)
-        if sudo:
-            acess_config.set_attribute_value(DC.USE_SUDO, sudo)
-        if ident_key:
-            access_config.set_attribute_value(DC.DEPLOYMENT_KEY, ident_key)
-        if communication:
-            access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, communication)
-        if clean_root:
-            access_config.set_attribute_value(DC.CLEAN_ROOT, clean_root)
-        self._experiment_xml = xml
-        self._duration = duration
-        self._repetitions = repetitions
-        self._wait_guids = wait_guids
-        self._access_config = access_config
-        self._experiment_suite = None
-
-    def post_daemonize(self):
-        from nepi.core.execute import ExperimentSuite
-        self._experiment_suite = ExperimentSuite(
-                self._experiment_xml, self._access_config, 
-                self._repetitions, self._duration, self._wait_guids)
-
-    @Marshalling.handles(CURRENT)
-    @Marshalling.args()
-    @Marshalling.retval(int)
-    def current(self):
-        return self._experiment_suite.current()
-   
-    @Marshalling.handles(STATUS)
-    @Marshalling.args()
-    @Marshalling.retval(int)
-    def status(self):
-        return self._experiment_suite.status()
-    
-    @Marshalling.handles(FINISHED)
-    @Marshalling.args()
-    @Marshalling.retval(Marshalling.bool)
-    def is_finished(self):
-        return self._experiment_suite.is_finished()
-
-    @Marshalling.handles(ACCESS_CONFIGURATIONS)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def get_access_configurations(self):
-        return self._experiment_suite.get_access_configurations()
-
-    @Marshalling.handles(START)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def start(self):
-        self._experiment_suite.start()
-
-    @Marshalling.handles(SHUTDOWN)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def shutdown(self):
-        self._experiment_suite.shutdown()
-
-    @Marshalling.handles(CURRENT_ACCESS_CONFIG)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def get_current_access_config(self):
-        return self._experiment_suite.get_current_access_config()
-
-class TestbedControllerServer(BaseServer):
-    def __init__(self, root_dir, log_level, testbed_id, testbed_version, 
-            environment_setup, clean_root):
-        super(TestbedControllerServer, self).__init__(root_dir, log_level, 
-            environment_setup = environment_setup, clean_root = clean_root)
-        self._testbed_id = testbed_id
-        self._testbed_version = testbed_version
-        self._testbed = None
-
-    def post_daemonize(self):
-        self._testbed = _build_testbed_controller(self._testbed_id, 
-                self._testbed_version)
-
-    @Marshalling.handles(GUIDS)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def guids(self):
-        return self._testbed.guids
-
-    @Marshalling.handles(TESTBED_ID)
-    @Marshalling.args()
-    @Marshalling.retval()
-    def testbed_id(self):
-        return str(self._testbed.testbed_id)
-
-    @Marshalling.handles(TESTBED_VERSION)
-    @Marshalling.args()
-    @Marshalling.retval()
-    def testbed_version(self):
-        return str(self._testbed.testbed_version)
-
-    @Marshalling.handles(CREATE)
-    @Marshalling.args(int, str)
-    @Marshalling.retvoid
-    def defer_create(self, guid, factory_id):
-        self._testbed.defer_create(guid, factory_id)
-
-    @Marshalling.handles(TRACE)
-    @Marshalling.args(int, str, Marshalling.base64_data)
-    @Marshalling.retval()
-    def trace(self, guid, trace_id, attribute):
-        return self._testbed.trace(guid, trace_id, attribute)
-
-    @Marshalling.handles(TRACES_INFO)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def traces_info(self):
-        return self._testbed.traces_info()
-
-    @Marshalling.handles(START)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def start(self):
-        self._testbed.start()
-
-    @Marshalling.handles(STOP)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def stop(self):
-        self._testbed.stop()
-
-    @Marshalling.handles(SHUTDOWN)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def shutdown(self):
-        self._testbed.shutdown()
-
-    @Marshalling.handles(CONFIGURE)
-    @Marshalling.args(Marshalling.base64_data, Marshalling.pickled_data)
-    @Marshalling.retvoid
-    def defer_configure(self, name, value):
-        self._testbed.defer_configure(name, value)
-
-    @Marshalling.handles(CREATE_SET)
-    @Marshalling.args(int, Marshalling.base64_data, Marshalling.pickled_data)
-    @Marshalling.retvoid
-    def defer_create_set(self, guid, name, value):
-        self._testbed.defer_create_set(guid, name, value)
-
-    @Marshalling.handles(FACTORY_SET)
-    @Marshalling.args(Marshalling.base64_data, Marshalling.pickled_data)
-    @Marshalling.retvoid
-    def defer_factory_set(self, name, value):
-        self._testbed.defer_factory_set(name, value)
-
-    @Marshalling.handles(CONNECT)
-    @Marshalling.args(int, str, int, str)
-    @Marshalling.retvoid
-    def defer_connect(self, guid1, connector_type_name1, guid2, connector_type_name2):
-        self._testbed.defer_connect(guid1, connector_type_name1, guid2, 
-            connector_type_name2)
-
-    @Marshalling.handles(CROSS_CONNECT)
-    @Marshalling.args(int, str, int, int, str, str, str)
-    @Marshalling.retvoid
-    def defer_cross_connect(self, 
-            guid, connector_type_name,
-            cross_guid, cross_testbed_guid,
-            cross_testbed_id, cross_factory_id,
-            cross_connector_type_name):
-        self._testbed.defer_cross_connect(guid, connector_type_name, cross_guid, 
-            cross_testbed_guid, cross_testbed_id, cross_factory_id, 
-            cross_connector_type_name)
-
-    @Marshalling.handles(ADD_TRACE)
-    @Marshalling.args(int, str)
-    @Marshalling.retvoid
-    def defer_add_trace(self, guid, trace_id):
-        self._testbed.defer_add_trace(guid, trace_id)
-
-    @Marshalling.handles(ADD_ADDRESS)
-    @Marshalling.args(int, str, int, Marshalling.pickled_data)
-    @Marshalling.retvoid
-    def defer_add_address(self, guid, address, netprefix, broadcast):
-        self._testbed.defer_add_address(guid, address, netprefix,
-                broadcast)
-
-    @Marshalling.handles(ADD_ROUTE)
-    @Marshalling.args(int, str, int, str, int, str)
-    @Marshalling.retvoid
-    def defer_add_route(self, guid, destination, netprefix, nexthop, 
-            metric, device):
-        self._testbed.defer_add_route(guid, destination, netprefix, nexthop, 
-                metric, device)
-
-    @Marshalling.handles(DO_SETUP)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_setup(self):
-        self._testbed.do_setup()
-
-    @Marshalling.handles(DO_CREATE)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_create(self):
-        self._testbed.do_create()
-
-    @Marshalling.handles(DO_CONNECT_INIT)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_connect_init(self):
-        self._testbed.do_connect_init()
-
-    @Marshalling.handles(DO_CONNECT_COMPL)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_connect_compl(self):
-        self._testbed.do_connect_compl()
-
-    @Marshalling.handles(DO_CONFIGURE)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_configure(self):
-        self._testbed.do_configure()
-
-    @Marshalling.handles(DO_PRECONFIGURE)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_preconfigure(self):
-        self._testbed.do_preconfigure()
-
-    @Marshalling.handles(DO_PRESTART)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def do_prestart(self):
-        self._testbed.do_prestart()
-
-    @Marshalling.handles(DO_CROSS_CONNECT_INIT)
-    @Marshalling.args( Marshalling.Decoders.pickled_data )
-    @Marshalling.retvoid
-    def do_cross_connect_init(self, cross_data):
-        self._testbed.do_cross_connect_init(cross_data)
-
-    @Marshalling.handles(DO_CROSS_CONNECT_COMPL)
-    @Marshalling.args( Marshalling.Decoders.pickled_data )
-    @Marshalling.retvoid
-    def do_cross_connect_compl(self, cross_data):
-        self._testbed.do_cross_connect_compl(cross_data)
-
-    @Marshalling.handles(GET)
-    @Marshalling.args(int, Marshalling.base64_data, str)
-    @Marshalling.retval( Marshalling.pickled_data )
-    def get(self, guid, name, time):
-        return self._testbed.get(guid, name, time)
-
-    @Marshalling.handles(SET)
-    @Marshalling.args(int, Marshalling.base64_data, Marshalling.pickled_data, str)
-    @Marshalling.retvoid
-    def set(self, guid, name, value, time):
-        self._testbed.set(guid, name, value, time)
-
-    @Marshalling.handles(GET_ADDRESS)
-    @Marshalling.args(int, int, Marshalling.base64_data)
-    @Marshalling.retval()
-    def get_address(self, guid, index, attribute):
-        return str(self._testbed.get_address(guid, index, attribute))
-
-    @Marshalling.handles(GET_ROUTE)
-    @Marshalling.args(int, int, Marshalling.base64_data)
-    @Marshalling.retval()
-    def get_route(self, guid, index, attribute):
-        return str(self._testbed.get_route(guid, index, attribute))
-
-    @Marshalling.handles(ACTION)
-    @Marshalling.args(str, int, Marshalling.base64_data)
-    @Marshalling.retvoid
-    def action(self, time, guid, command):
-        self._testbed.action(time, guid, command)
-
-    @Marshalling.handles(STATUS)
-    @Marshalling.args(Marshalling.nullint)
-    @Marshalling.retval(int)
-    def status(self, guid):
-        return self._testbed.status(guid)
-
-    @Marshalling.handles(TESTBED_STATUS)
-    @Marshalling.args()
-    @Marshalling.retval(int)
-    def testbed_status(self):
-        return self._testbed.testbed_status()
-
-    @Marshalling.handles(GET_ATTRIBUTE_LIST)
-    @Marshalling.args(int, Marshalling.nullint, Marshalling.bool)
-    @Marshalling.retval( Marshalling.pickled_data )
-    def get_attribute_list(self, guid, filter_flags = None, exclude = False):
-        return self._testbed.get_attribute_list(guid, filter_flags, exclude)
-
-    @Marshalling.handles(GET_FACTORY_ID)
-    @Marshalling.args(int)
-    @Marshalling.retval()
-    def get_factory_id(self, guid):
-        return self._testbed.get_factory_id(guid)
-
-    @Marshalling.handles(RECOVER)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def recover(self):
-        self._testbed.recover()
-
-
-class ExperimentControllerServer(BaseServer):
-    def __init__(self, root_dir, log_level, experiment_xml, environment_setup,
-            clean_root):
-        super(ExperimentControllerServer, self).__init__(root_dir, log_level, 
-            environment_setup = environment_setup, clean_root = clean_root)
-        self._experiment_xml = experiment_xml
-        self._experiment = None
-
-    def post_daemonize(self):
-        from nepi.core.execute import ExperimentController
-        self._experiment = ExperimentController(self._experiment_xml, 
-            root_dir = self._root_dir)
-
-    @Marshalling.handles(GUIDS)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def guids(self):
-        return self._experiment.guids
-
-    @Marshalling.handles(STARTED_TIME)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def started_time(self):
-        return self._experiment.started_time
-
-    @Marshalling.handles(STOPPED_TIME)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def stopped_time(self):
-        return self._experiment.stopped_time
-
-    @Marshalling.handles(XML)
-    @Marshalling.args()
-    @Marshalling.retval()
-    def experiment_design_xml(self):
-        return self._experiment.experiment_design_xml
-        
-    @Marshalling.handles(EXEC_XML)
-    @Marshalling.args()
-    @Marshalling.retval()
-    def experiment_execute_xml(self):
-        return self._experiment.experiment_execute_xml
-        
-    @Marshalling.handles(TRACE)
-    @Marshalling.args(int, str, Marshalling.base64_data)
-    @Marshalling.retval()
-    def trace(self, guid, trace_id, attribute):
-        return str(self._experiment.trace(guid, trace_id, attribute))
-
-    @Marshalling.handles(TRACES_INFO)
-    @Marshalling.args()
-    @Marshalling.retval( Marshalling.pickled_data )
-    def traces_info(self):
-        return self._experiment.traces_info()
-
-    @Marshalling.handles(FINISHED)
-    @Marshalling.args(int)
-    @Marshalling.retval(Marshalling.bool)
-    def is_finished(self, guid):
-        return self._experiment.is_finished(guid)
-
-    @Marshalling.handles(STATUS)
-    @Marshalling.args(int)
-    @Marshalling.retval(int)
-    def status(self, guid):
-        return self._experiment.status(guid)
-
-    @Marshalling.handles(GET)
-    @Marshalling.args(int, Marshalling.base64_data, str)
-    @Marshalling.retval( Marshalling.pickled_data )
-    def get(self, guid, name, time):
-        return self._experiment.get(guid, name, time)
-
-    @Marshalling.handles(SET)
-    @Marshalling.args(int, Marshalling.base64_data, Marshalling.pickled_data, str)
-    @Marshalling.retvoid
-    def set(self, guid, name, value, time):
-        self._experiment.set(guid, name, value, time)
-
-    @Marshalling.handles(START)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def start(self):
-        self._experiment.start()
-
-    @Marshalling.handles(STOP)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def stop(self):
-        self._experiment.stop()
-
-    @Marshalling.handles(RECOVER)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def recover(self):
-        self._experiment.recover()
-
-    @Marshalling.handles(SHUTDOWN)
-    @Marshalling.args()
-    @Marshalling.retvoid
-    def shutdown(self):
-        self._experiment.shutdown()
-
-    @Marshalling.handles(GET_TESTBED_ID)
-    @Marshalling.args(int)
-    @Marshalling.retval()
-    def get_testbed_id(self, guid):
-        return self._experiment.get_testbed_id(guid)
-
-    @Marshalling.handles(GET_FACTORY_ID)
-    @Marshalling.args(int)
-    @Marshalling.retval()
-    def get_factory_id(self, guid):
-        return self._experiment.get_factory_id(guid)
-
-    @Marshalling.handles(GET_TESTBED_VERSION)
-    @Marshalling.args(int)
-    @Marshalling.retval()
-    def get_testbed_version(self, guid):
-        return self._experiment.get_testbed_version(guid)
-
-class BaseProxy(object):
-    _ServerClass = None
-    _ServerClassModule = "nepi.util.proxy"
-    
-    def __init__(self, ctor_args, root_dir, 
-            launch = True, 
-            communication = DC.ACCESS_LOCAL,
-            host = None, 
-            port = None, 
-            user = None, 
-            ident_key = None, 
-            agent = None,
-            sudo = False, 
-            environment_setup = "",
-            clean_root = False):
-        if launch:
-            python_code = (
-                    "from %(classmodule)s import %(classname)s;"
-                    "s = %(classname)s%(ctor_args)r;"
-                    "s.run()" 
-                % dict(
-                    classname = self._ServerClass.__name__,
-                    classmodule = self._ServerClassModule,
-                    ctor_args = ctor_args
-                ) )
-            proc = server.popen_python(python_code,
-                        communication = communication,
-                        host = host,
-                        port = port, 
-                        user = user, 
-                        agent = agent,
-                        ident_key = ident_key, 
-                        sudo = sudo,
-                        environment_setup = environment_setup) 
-            # Wait for the server to be ready, otherwise nobody
-            # will be able to connect to it
-            err = []
-            helo = "nope"
-            while helo:
-                helo = proc.stderr.readline()
-                if helo == 'SERVER_READY.\n':
-                    break
-                err.append(helo)
-            else:
-                raise AssertionError, "Expected 'SERVER_READY.', got: %s" % (''.join(err),)
-        # connect client to server
-        self._client = server.Client(root_dir, 
-                communication = communication,
-                host = host, 
-                port = port, 
-                user = user, 
-                agent = agent, 
-                sudo = sudo,
-                environment_setup = environment_setup)
-    
-    @staticmethod
-    def _make_message(argtypes, argencoders, command, methname, classname, *args):
-        if len(argtypes) != len(argencoders):
-            raise ValueError, "Invalid arguments for _make_message: "\
-                "in stub method %s of class %s "\
-                "argtypes and argencoders must match in size" % (
-                    methname, classname )
-        if len(argtypes) != len(args):
-            raise ValueError, "Invalid arguments for _make_message: "\
-                "in stub method %s of class %s "\
-                "expected %d arguments, got %d" % (
-                    methname, classname,
-                    len(argtypes), len(args))
-        
-        buf = []
-        for argnum, (typ, (encode, fmt), val) in enumerate(zip(argtypes, argencoders, args)):
-            try:
-                buf.append(fmt % encode(val))
-            except:
-                import traceback
-                raise TypeError, "Argument %d of stub method %s of class %s "\
-                    "requires a value of type %s, but got %s - nested error: %s" % (
-                        argnum, methname, classname,
-                        getattr(typ, '__name__', typ), type(val),
-                        traceback.format_exc()
-                )
-        
-        return "%d|%s" % (command, '|'.join(buf))
-    
-    @staticmethod
-    def _parse_reply(rvtype, methname, classname, reply):
-        if not reply:
-            raise RuntimeError, "Invalid reply: %r "\
-                "for stub method %s of class %s" % (
-                    reply,
-                    methname,
-                    classname)
-        
-        try:
-            result = reply.split("|")
-            code = int(result[0])
-            text = result[1]
-        except:
-            import traceback
-            raise TypeError, "Return value of stub method %s of class %s "\
-                "cannot be parsed: must be of type %s, got %r - nested error: %s" % (
-                    methname, classname,
-                    getattr(rvtype, '__name__', rvtype), reply,
-                    traceback.format_exc()
-            )
-        if code == ERROR:
-            text = base64.b64decode(text)
-            raise RuntimeError(text)
-        elif code == OK:
-            try:
-                if rvtype is None:
-                    return
-                else:
-                    return rvtype(text)
-            except:
-                import traceback
-                raise TypeError, "Return value of stub method %s of class %s "\
-                    "cannot be parsed: must be of type %s - nested error: %s" % (
-                        methname, classname,
-                        getattr(rvtype, '__name__', rvtype),
-                        traceback.format_exc()
-                )
-        else:
-            raise RuntimeError, "Invalid reply: %r "\
-                "for stub method %s of class %s - unknown code" % (
-                    reply,
-                    methname,
-                    classname)
-    
-    @staticmethod
-    def _make_stubs(server_class, template_class):
-        """
-        Returns a dictionary method_name -> method
-        with stub methods.
-        
-        Usage:
-        
-            class SomeProxy(BaseProxy):
-               ...
-               
-               locals().update( BaseProxy._make_stubs(
-                    ServerClass,
-                    TemplateClass
-               ) )
-        
-        ServerClass is the corresponding Server class, as
-        specified in the _ServerClass class method (_make_stubs
-        is static and can't access the method), and TemplateClass
-        is the ultimate implementation class behind the server,
-        from which argument names and defaults are taken, to
-        maintain meaningful interfaces.
-        """
-        rv = {}
-        
-        class NONE: pass
-        
-        import os.path
-        func_template_path = os.path.join(
-            os.path.dirname(__file__),
-            'proxy_stub.tpl')
-        func_template_file = open(func_template_path, "r")
-        func_template = func_template_file.read()
-        func_template_file.close()
-        
-        for methname in vars(template_class).copy():
-            if methname.endswith('_deferred'):
-                # cannot wrap deferreds...
-                continue
-            dmethname = methname+'_deferred'
-            if hasattr(server_class, methname) and not methname.startswith('_'):
-                template_meth = getattr(template_class, methname)
-                server_meth = getattr(server_class, methname)
-                
-                command = getattr(server_meth, '_handles_command', None)
-                argtypes = getattr(server_meth, '_argtypes', None)
-                argencoders = getattr(server_meth, '_argencoders', None)
-                rvtype = getattr(server_meth, '_retval', None)
-                doprop = False
-                
-                if hasattr(template_meth, 'fget'):
-                    # property getter
-                    template_meth = template_meth.fget
-                    doprop = True
-                
-                if command is not None and argtypes is not None and argencoders is not None:
-                    # We have an interface method...
-                    code = template_meth.func_code
-                    argnames = code.co_varnames[:code.co_argcount]
-                    argdefaults = ( (NONE,) * (len(argnames) - len(template_meth.func_defaults or ()))
-                                  + (template_meth.func_defaults or ()) )
-                    
-                    func_globals = dict(
-                        BaseProxy = BaseProxy,
-                        argtypes = argtypes,
-                        argencoders = argencoders,
-                        rvtype = rvtype,
-                        functools = functools,
-                    )
-                    context = dict()
-                    
-                    func_text = func_template % dict(
-                        self = argnames[0],
-                        args = '%s' % (','.join(argnames[1:])),
-                        argdefs = ','.join([
-                            argname if argdef is NONE
-                            else "%s=%r" % (argname, argdef)
-                            for argname, argdef in zip(argnames[1:], argdefaults[1:])
-                        ]),
-                        command = command,
-                        methname = methname,
-                        classname = server_class.__name__
-                    )
-                    
-                    func_text = compile(
-                        func_text,
-                        func_template_path,
-                        'exec')
-                    
-                    exec func_text in func_globals, context
-                    
-                    if doprop:
-                        rv[methname] = property(context[methname])
-                        rv[dmethname] = property(context[dmethname])
-                    else:
-                        rv[methname] = context[methname]
-                        rv[dmethname] = context[dmethname]
-                    
-                    # inject _deferred into core classes
-                    if hasattr(template_class, methname) and not hasattr(template_class, dmethname):
-                        def freezename(methname, dmethname):
-                            def dmeth(self, *p, **kw): 
-                                return getattr(self, methname)(*p, **kw)
-                            dmeth.__name__ = dmethname
-                            return dmeth
-                        dmeth = freezename(methname, dmethname)
-                        setattr(template_class, dmethname, dmeth)
-        
-        return rv
-
-class ExperimentSuiteProxy(BaseProxy):
-    
-    _ServerClass = ExperimentSuiteServer
-    
-    def __init__(self, root_dir, log_level,
-            xml, repetitions, duration, wait_guids, 
-            communication = DC.ACCESS_LOCAL,
-            host = None, 
-            port = None, 
-            user = None, 
-            ident_key = None, 
-            agent = None,
-            sudo = False, 
-            environment_setup = "", 
-            clean_root = False):
-        super(ExperimentSuiteProxy,self).__init__(
-            ctor_args = (root_dir, log_level,
-                xml, 
-                repetitions, 
-                duration,
-                wait_guids, 
-                communication,
-                host, 
-                port, 
-                user, 
-                ident_key,
-                agent, 
-                sudo, 
-                environment_setup, 
-                clean_root),
-            root_dir = root_dir,
-            launch = True, #launch
-            communication = communication,
-            host = host, 
-            port = port, 
-            user = user,
-            ident_key = ident_key, 
-            agent = agent, 
-            sudo = sudo, 
-            environment_setup = environment_setup)
-
-    locals().update( BaseProxy._make_stubs(
-        server_class = ExperimentSuiteServer,
-        template_class = nepi.core.execute.ExperimentSuite,
-    ) )
-    
-    # Shutdown stops the serverside...
-    def shutdown(self, _stub = shutdown):
-        rv = _stub(self)
-        self._client.send_stop()
-        self._client.read_reply() # wait for it
-        return rv
-
-class TestbedControllerProxy(BaseProxy):
-    
-    _ServerClass = TestbedControllerServer
-    
-    def __init__(self, root_dir, log_level, 
-            testbed_id = None, 
-            testbed_version = None, 
-            launch = True, 
-            communication = DC.ACCESS_LOCAL,
-            host = None, 
-            port = None, 
-            user = None, 
-            ident_key = None, 
-            agent = None,
-            sudo = False, 
-            environment_setup = "", 
-            clean_root = False):
-        if launch and (testbed_id == None or testbed_version == None):
-            raise RuntimeError("To launch a TesbedControllerServer a "
-                    "testbed_id and testbed_version are required")
-        super(TestbedControllerProxy,self).__init__(
-            ctor_args = (root_dir, log_level, testbed_id, testbed_version,
-                environment_setup, clean_root),
-            root_dir = root_dir,
-            launch = launch,
-            communication = communication,
-            host = host, 
-            port = port, 
-            user = user,
-            ident_key = ident_key, 
-            agent = agent, 
-            sudo = sudo, 
-            environment_setup = environment_setup)
-
-    locals().update( BaseProxy._make_stubs(
-        server_class = TestbedControllerServer,
-        template_class = nepi.core.execute.TestbedController,
-    ) )
-    
-    # Shutdown stops the serverside...
-    def shutdown(self, _stub = shutdown):
-        rv = _stub(self)
-        self._client.send_stop()
-        self._client.read_reply() # wait for it
-        return rv
-    
-
-class ExperimentControllerProxy(BaseProxy):
-    _ServerClass = ExperimentControllerServer
-    
-    def __init__(self, root_dir, log_level, 
-            experiment_xml = None, 
-            launch = True, 
-            communication = DC.ACCESS_LOCAL,
-            host = None, 
-            port = None, 
-            user = None, 
-            ident_key = None, 
-            agent = None, 
-            sudo = False, 
-            environment_setup = "",
-            clean_root = False):
-        super(ExperimentControllerProxy,self).__init__(
-            ctor_args = (root_dir, log_level, experiment_xml, environment_setup, 
-                clean_root),
-            root_dir = root_dir,
-            launch = launch, 
-            communication = communication,
-            host = host, 
-            port = port, 
-            user = user,
-            ident_key = ident_key, 
-            agent = agent, 
-            sudo = sudo, 
-            environment_setup = environment_setup,
-            clean_root = clean_root)
-
-    locals().update( BaseProxy._make_stubs(
-        server_class = ExperimentControllerServer,
-        template_class = nepi.core.execute.ExperimentController,
-    ) )
-
-    # Shutdown stops the serverside...
-    def shutdown(self, _stub = shutdown):
-        rv = _stub(self)
-        self._client.send_stop()
-        self._client.read_reply() # wait for it
-        return rv
-
diff --git a/src/nepi/util/proxy_stub.tpl b/src/nepi/util/proxy_stub.tpl
deleted file mode 100644 (file)
index 7161c6e..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-def %(methname)s(%(self)s, %(argdefs)s):
-    msg = BaseProxy._make_message(
-        argtypes,
-        argencoders,
-        %(command)d,
-        %(methname)r,
-        %(classname)r,
-        %(args)s)
-    %(self)s._client.send_msg(msg)
-    reply = %(self)s._client.read_reply()
-    rv = BaseProxy._parse_reply(
-        rvtype,
-        %(methname)r,
-        %(classname)r,
-        reply)
-    return rv
-
-def %(methname)s_deferred(%(self)s, %(argdefs)s):
-    msg = BaseProxy._make_message(
-        argtypes,
-        argencoders,
-        %(command)d,
-        %(methname)r,
-        %(classname)r,
-        %(args)s)
-    %(self)s._client.send_msg(msg)
-    rv = %(self)s._client.defer_reply(
-        transform = functools.partial(
-            BaseProxy._parse_reply,
-            rvtype,
-            %(methname)r+'_deferred',
-            %(classname)r)
-        )
-    return rv
-
diff --git a/src/nepi/util/rmatcher.py b/src/nepi/util/rmatcher.py
new file mode 100644 (file)
index 0000000..08563f6
--- /dev/null
@@ -0,0 +1,46 @@
+
+def match_tags(box, all_tags, exact_tags):
+    """ returns True if box has required tags """
+    tall = set(all_tags)
+    texact = set(exact_tags)
+
+    if texact and box.connections == texact:
+        return True
+
+    if tall and tall.issubset(box.connections):
+        return True
+
+    return False
+
+def find_boxes(box, all_tags = None, exact_tags = None, max_depth = 1):
+    """ Look for the connected boxes with the required tags, doing breath-first
+    search, until max_depth ( max_depth = None will traverse the entire graph ).
+    """
+    if not all_tags and not exact_tags:
+        msg = "No matching criteria for resources."
+        raise RuntimeError(msg)
+
+    queue = set()
+    # enqueue (depth, box) 
+    queue.add((0, box))
+    
+    traversed = set()
+    traversed.add(box)
+
+    depth = 0
+
+    result = set()
+
+    while len(q) > 0: 
+        (depth, a) = queue.pop()
+        if match_tags(a, all_tags, exact_tags):
+            result.add(a)
+
+        if not max_depth or depth <= max_depth:
+            depth += 1
+            for b in sorted(a.connections):
+                if b not in traversed:
+                    traversed.add(b)
+                    queue.add((depth, b))
+    
+    return result
diff --git a/src/nepi/util/server.py b/src/nepi/util/server.py
deleted file mode 100644 (file)
index 1c09874..0000000
+++ /dev/null
@@ -1,1134 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from nepi.util.constants import DeploymentConfiguration as DC
-
-import base64
-import errno
-import os
-import os.path
-import resource
-import select
-import shutil
-import signal
-import socket
-import sys
-import subprocess
-import threading
-import time
-import traceback
-import re
-import tempfile
-import defer
-import functools
-import collections
-import hashlib
-
-CTRL_SOCK = "ctrl.sock"
-CTRL_PID = "ctrl.pid"
-STD_ERR = "stderr.log"
-MAX_FD = 1024
-
-STOP_MSG = "STOP"
-
-TRACE = os.environ.get("NEPI_TRACE", "false").lower() in ("true", "1", "on")
-
-OPENSSH_HAS_PERSIST = None
-
-if hasattr(os, "devnull"):
-    DEV_NULL = os.devnull
-else:
-    DEV_NULL = "/dev/null"
-
-SHELL_SAFE = re.compile('^[-a-zA-Z0-9_=+:.,/]*$')
-
-hostbyname_cache = dict()
-
-def gethostbyname(host):
-    hostbyname = hostbyname_cache.get(host)
-    if not hostbyname:
-        hostbyname = socket.gethostbyname(host)
-        hostbyname_cache[host] = hostbyname
-    return hostbyname
-
-def openssh_has_persist():
-    global OPENSSH_HAS_PERSIST
-    if OPENSSH_HAS_PERSIST is None:
-        proc = subprocess.Popen(["ssh","-v"],
-            stdout = subprocess.PIPE,
-            stderr = subprocess.STDOUT,
-            stdin = open("/dev/null","r") )
-        out,err = proc.communicate()
-        proc.wait()
-        
-        vre = re.compile(r'OpenSSH_(?:[6-9]|5[.][8-9]|5[.][1-9][0-9]|[1-9][0-9]).*', re.I)
-        OPENSSH_HAS_PERSIST = bool(vre.match(out))
-    return OPENSSH_HAS_PERSIST
-
-def shell_escape(s):
-    """ Escapes strings so that they are safe to use as command-line arguments """
-    if SHELL_SAFE.match(s):
-        # safe string - no escaping needed
-        return s
-    else:
-        # unsafe string - escape
-        def escp(c):
-            if (32 <= ord(c) < 127 or c in ('\r','\n','\t')) and c not in ("'",'"'):
-                return c
-            else:
-                return "'$'\\x%02x''" % (ord(c),)
-        s = ''.join(map(escp,s))
-        return "'%s'" % (s,)
-
-def eintr_retry(func):
-    import functools
-    @functools.wraps(func)
-    def rv(*p, **kw):
-        retry = kw.pop("_retry", False)
-        for i in xrange(0 if retry else 4):
-            try:
-                return func(*p, **kw)
-            except (select.error, socket.error), args:
-                if args[0] == errno.EINTR:
-                    continue
-                else:
-                    raise 
-            except OSError, e:
-                if e.errno == errno.EINTR:
-                    continue
-                else:
-                    raise
-        else:
-            return func(*p, **kw)
-    return rv
-
-class Server(object):
-    def __init__(self, root_dir = ".", log_level = DC.ERROR_LEVEL, 
-            environment_setup = "", clean_root = False):
-        self._root_dir = root_dir
-        self._clean_root = clean_root
-        self._stop = False
-        self._ctrl_sock = None
-        self._log_level = log_level
-        self._rdbuf = ""
-        self._environment_setup = environment_setup
-
-    def run(self):
-        try:
-            if self.daemonize():
-                self.post_daemonize()
-                self.loop()
-                self.cleanup()
-                # ref: "os._exit(0)"
-                # can not return normally after fork beacuse no exec was done.
-                # This means that if we don't do a os._exit(0) here the code that 
-                # follows the call to "Server.run()" in the "caller code" will be 
-                # executed... but by now it has already been executed after the 
-                # first process (the one that did the first fork) returned.
-                os._exit(0)
-        except:
-            print >>sys.stderr, "SERVER_ERROR."
-            self.log_error()
-            self.cleanup()
-            os._exit(0)
-        print >>sys.stderr, "SERVER_READY."
-
-    def daemonize(self):
-        # pipes for process synchronization
-        (r, w) = os.pipe()
-        
-        # build root folder
-        root = os.path.normpath(self._root_dir)
-        if self._root_dir not in [".", ""] and os.path.exists(root) \
-                and self._clean_root:
-            shutil.rmtree(root)
-        if not os.path.exists(root):
-            os.makedirs(root, 0755)
-
-        pid1 = os.fork()
-        if pid1 > 0:
-            os.close(w)
-            while True:
-                try:
-                    os.read(r, 1)
-                except OSError, e: # pragma: no cover
-                    if e.errno == errno.EINTR:
-                        continue
-                    else:
-                        raise
-                break
-            os.close(r)
-            # os.waitpid avoids leaving a <defunc> (zombie) process
-            st = os.waitpid(pid1, 0)[1]
-            if st:
-                raise RuntimeError("Daemonization failed")
-            # return 0 to inform the caller method that this is not the 
-            # daemonized process
-            return 0
-        os.close(r)
-
-        # Decouple from parent environment.
-        os.chdir(self._root_dir)
-        os.umask(0)
-        os.setsid()
-
-        # fork 2
-        pid2 = os.fork()
-        if pid2 > 0:
-            # see ref: "os._exit(0)"
-            os._exit(0)
-
-        # close all open file descriptors.
-        max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
-        if (max_fd == resource.RLIM_INFINITY):
-            max_fd = MAX_FD
-        for fd in range(3, max_fd):
-            if fd != w:
-                try:
-                    os.close(fd)
-                except OSError:
-                    pass
-
-        # Redirect standard file descriptors.
-        stdin = open(DEV_NULL, "r")
-        stderr = stdout = open(STD_ERR, "a", 0)
-        os.dup2(stdin.fileno(), sys.stdin.fileno())
-        # NOTE: sys.stdout.write will still be buffered, even if the file
-        # was opened with 0 buffer
-        os.dup2(stdout.fileno(), sys.stdout.fileno())
-        os.dup2(stderr.fileno(), sys.stderr.fileno())
-        
-        # setup environment
-        if self._environment_setup:
-            # parse environment variables and pass to child process
-            # do it by executing shell commands, in case there's some heavy setup involved
-            envproc = subprocess.Popen(
-                [ "bash", "-c", 
-                    "( %s python -c 'import os,sys ; print \"\\x01\".join(\"\\x02\".join(map(str,x)) for x in os.environ.iteritems())' ) | tail -1" %
-                        ( self._environment_setup, ) ],
-                stdin = subprocess.PIPE, 
-                stdout = subprocess.PIPE,
-                stderr = subprocess.PIPE
-            )
-            out,err = envproc.communicate()
-
-            # parse new environment
-            if out:
-                environment = dict(map(lambda x:x.split("\x02"), out.split("\x01")))
-            
-                # apply to current environment
-                for name, value in environment.iteritems():
-                    os.environ[name] = value
-                
-                # apply pythonpath
-                if 'PYTHONPATH' in environment:
-                    sys.path = environment['PYTHONPATH'].split(':') + sys.path
-
-        # create control socket
-        self._ctrl_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        try:
-            self._ctrl_sock.bind(CTRL_SOCK)
-        except socket.error:
-            # Address in use, check pidfile
-            pid = None
-            try:
-                pidfile = open(CTRL_PID, "r")
-                pid = pidfile.read()
-                pidfile.close()
-                pid = int(pid)
-            except:
-                # no pidfile
-                pass
-            
-            if pid is not None:
-                # Check process liveliness
-                if not os.path.exists("/proc/%d" % (pid,)):
-                    # Ok, it's dead, clean the socket
-                    os.remove(CTRL_SOCK)
-            
-            # try again
-            self._ctrl_sock.bind(CTRL_SOCK)
-            
-        self._ctrl_sock.listen(0)
-        
-        # Save pidfile
-        pidfile = open(CTRL_PID, "w")
-        pidfile.write(str(os.getpid()))
-        pidfile.close()
-
-        # let the parent process know that the daemonization is finished
-        os.write(w, "\n")
-        os.close(w)
-        return 1
-
-    def post_daemonize(self):
-        os.environ["NEPI_CONTROLLER_LOGLEVEL"] = self._log_level
-        # QT, for some strange reason, redefines the SIGCHILD handler to write
-        # a \0 to a fd (lets say fileno 'x'), when ever a SIGCHILD is received.
-        # Server dameonization closes all file descriptors from fileno '3',
-        # but the overloaded handler (inherited by the forked process) will
-        # keep trying to write the \0 to fileno 'x', which might have been reused 
-        # after closing, for other operations. This is bad bad bad when fileno 'x'
-        # is in use for communication pouroses, because unexpected \0 start
-        # appearing in the communication messages... this is exactly what happens 
-        # when using netns in daemonized form. Thus, be have no other alternative than
-        # restoring the SIGCHLD handler to the default here.
-        import signal
-        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-
-    def loop(self):
-        while not self._stop:
-            conn, addr = self._ctrl_sock.accept()
-            self.log_error("ACCEPTED CONNECTION: %s" % (addr,))
-            conn.settimeout(5)
-            while not self._stop:
-                try:
-                    msg = self.recv_msg(conn)
-                except socket.timeout, e:
-                    #self.log_error("SERVER recv_msg: connection timedout ")
-                    continue
-                
-                if not msg:
-                    self.log_error("CONNECTION LOST")
-                    break
-                    
-                if msg == STOP_MSG:
-                    self._stop = True
-                    reply = self.stop_action()
-                else:
-                    reply = self.reply_action(msg)
-                
-                try:
-                    self.send_reply(conn, reply)
-                except socket.error:
-                    self.log_error()
-                    self.log_error("NOTICE: Awaiting for reconnection")
-                    break
-            try:
-                conn.close()
-            except:
-                # Doesn't matter
-                self.log_error()
-
-    def recv_msg(self, conn):
-        data = [self._rdbuf]
-        chunk = data[0]
-        while '\n' not in chunk:
-            try:
-                chunk = conn.recv(1024)
-            except (OSError, socket.error), e:
-                if e[0] != errno.EINTR:
-                    raise
-                else:
-                    continue
-            if chunk:
-                data.append(chunk)
-            else:
-                # empty chunk = EOF
-                break
-        data = ''.join(data).split('\n',1)
-        while len(data) < 2:
-            data.append('')
-        data, self._rdbuf = data
-        
-        decoded = base64.b64decode(data)
-        return decoded.rstrip()
-
-    def send_reply(self, conn, reply):
-        encoded = base64.b64encode(reply)
-        conn.send("%s\n" % encoded)
-       
-    def cleanup(self):
-        try:
-            self._ctrl_sock.close()
-            os.remove(CTRL_SOCK)
-        except:
-            self.log_error()
-
-    def stop_action(self):
-        return "Stopping server"
-
-    def reply_action(self, msg):
-        return "Reply to: %s" % msg
-
-    def log_error(self, text = None, context = ''):
-        if text == None:
-            text = traceback.format_exc()
-        date = time.strftime("%Y-%m-%d %H:%M:%S")
-        if context:
-            context = " (%s)" % (context,)
-        sys.stderr.write("ERROR%s: %s\n%s\n" % (context, date, text))
-        return text
-
-    def log_debug(self, text):
-        if self._log_level == DC.DEBUG_LEVEL:
-            date = time.strftime("%Y-%m-%d %H:%M:%S")
-            sys.stderr.write("DEBUG: %s\n%s\n" % (date, text))
-
-class Forwarder(object):
-    def __init__(self, root_dir = "."):
-        self._ctrl_sock = None
-        self._root_dir = root_dir
-        self._stop = False
-        self._rdbuf = ""
-
-    def forward(self):
-        self.connect()
-        print >>sys.stderr, "FORWARDER_READY."
-        while not self._stop:
-            data = self.read_data()
-            if not data:
-                # Connection to client lost
-                break
-            self.send_to_server(data)
-            
-            data = self.recv_from_server()
-            if not data:
-                # Connection to server lost
-                raise IOError, "Connection to server lost while "\
-                    "expecting response"
-            self.write_data(data)
-        self.disconnect()
-
-    def read_data(self):
-        return sys.stdin.readline()
-
-    def write_data(self, data):
-        sys.stdout.write(data)
-        # sys.stdout.write is buffered, this is why we need to do a flush()
-        sys.stdout.flush()
-
-    def send_to_server(self, data):
-        try:
-            self._ctrl_sock.send(data)
-        except (IOError, socket.error), e:
-            if e[0] == errno.EPIPE:
-                self.connect()
-                self._ctrl_sock.send(data)
-            else:
-                raise e
-        encoded = data.rstrip() 
-        msg = base64.b64decode(encoded)
-        if msg == STOP_MSG:
-            self._stop = True
-
-    def recv_from_server(self):
-        data = [self._rdbuf]
-        chunk = data[0]
-        while '\n' not in chunk:
-            try:
-                chunk = self._ctrl_sock.recv(1024)
-            except (OSError, socket.error), e:
-                if e[0] != errno.EINTR:
-                    raise
-                continue
-            if chunk:
-                data.append(chunk)
-            else:
-                # empty chunk = EOF
-                break
-        data = ''.join(data).split('\n',1)
-        while len(data) < 2:
-            data.append('')
-        data, self._rdbuf = data
-        
-        return data+'\n'
-    def connect(self):
-        self.disconnect()
-        self._ctrl_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        sock_addr = os.path.join(self._root_dir, CTRL_SOCK)
-        self._ctrl_sock.connect(sock_addr)
-
-    def disconnect(self):
-        try:
-            self._ctrl_sock.close()
-        except:
-            pass
-
-class Client(object):
-    def __init__(self, root_dir = ".", host = None, port = None, user = None, 
-            agent = None, sudo = False, communication = DC.ACCESS_LOCAL,
-            environment_setup = ""):
-        self.root_dir = root_dir
-        self.addr = (host, port)
-        self.user = user
-        self.agent = agent
-        self.sudo = sudo
-        self.communication = communication
-        self.environment_setup = environment_setup
-        self._stopped = False
-        self._deferreds = collections.deque()
-        self.connect()
-    
-    def __del__(self):
-        if self._process.poll() is None:
-            os.kill(self._process.pid, signal.SIGTERM)
-        self._process.wait()
-        
-    def connect(self):
-        root_dir = self.root_dir
-        (host, port) = self.addr
-        user = self.user
-        agent = self.agent
-        sudo = self.sudo
-        communication = self.communication
-        
-        python_code = "from nepi.util import server;c=server.Forwarder(%r);\
-                c.forward()" % (root_dir,)
-
-        self._process = popen_python(python_code, 
-                    communication = communication,
-                    host = host, 
-                    port = port, 
-                    user = user, 
-                    agent = agent, 
-                    sudo = sudo, 
-                    environment_setup = self.environment_setup)
-               
-        # Wait for the forwarder to be ready, otherwise nobody
-        # will be able to connect to it
-        err = []
-        helo = "nope"
-        while helo:
-            helo = self._process.stderr.readline()
-            if helo == 'FORWARDER_READY.\n':
-                break
-            err.append(helo)
-        else:
-            raise AssertionError, "Expected 'FORWARDER_READY.', got: %s" % (''.join(err),)
-        
-    def send_msg(self, msg):
-        encoded = base64.b64encode(msg)
-        data = "%s\n" % encoded
-        
-        try:
-            self._process.stdin.write(data)
-        except (IOError, ValueError):
-            # dead process, poll it to un-zombify
-            self._process.poll()
-            
-            # try again after reconnect
-            # If it fails again, though, give up
-            self.connect()
-            self._process.stdin.write(data)
-
-    def send_stop(self):
-        self.send_msg(STOP_MSG)
-        self._stopped = True
-
-    def defer_reply(self, transform=None):
-        defer_entry = []
-        self._deferreds.append(defer_entry)
-        return defer.Defer(
-            functools.partial(self.read_reply, defer_entry, transform)
-        )
-        
-    def _read_reply(self):
-        data = self._process.stdout.readline()
-        encoded = data.rstrip() 
-        if not encoded:
-            # empty == eof == dead process, poll it to un-zombify
-            self._process.poll()
-            
-            raise RuntimeError, "Forwarder died while awaiting reply: %s" % (self._process.stderr.read(),)
-        return base64.b64decode(encoded)
-    
-    def read_reply(self, which=None, transform=None):
-        # Test to see if someone did it already
-        if which is not None and len(which):
-            # Ok, they did it...
-            # ...just return the deferred value
-            if transform:
-                return transform(which[0])
-            else:
-                return which[0]
-        
-        # Process all deferreds until the one we're looking for
-        # or until the queue is empty
-        while self._deferreds:
-            try:
-                deferred = self._deferreds.popleft()
-            except IndexError:
-                # emptied
-                break
-            
-            deferred.append(self._read_reply())
-            if deferred is which:
-                # We reached the one we were looking for
-                if transform:
-                    return transform(deferred[0])
-                else:
-                    return deferred[0]
-        
-        if which is None:
-            # They've requested a synchronous read
-            if transform:
-                return transform(self._read_reply())
-            else:
-                return self._read_reply()
-
-def _make_server_key_args(server_key, host, port, args):
-    """ 
-    Returns a reference to the created temporary file, and adds the
-    corresponding arguments to the given argument list.
-    
-    Make sure to hold onto it until the process is done with the file
-    """
-    if port is not None:
-        host = '%s:%s' % (host,port)
-    # Create a temporary server key file
-    tmp_known_hosts = tempfile.NamedTemporaryFile()
-   
-    hostbyname = gethostbyname(host) 
-
-    # Add the intended host key
-    tmp_known_hosts.write('%s,%s %s\n' % (host, hostbyname, server_key))
-    
-    # If we're not in strict mode, add user-configured keys
-    if os.environ.get('NEPI_STRICT_AUTH_MODE',"").lower() not in ('1','true','on'):
-        user_hosts_path = '%s/.ssh/known_hosts' % (os.environ.get('HOME',""),)
-        if os.access(user_hosts_path, os.R_OK):
-            f = open(user_hosts_path, "r")
-            tmp_known_hosts.write(f.read())
-            f.close()
-        
-    tmp_known_hosts.flush()
-    
-    args.extend(['-o', 'UserKnownHostsFile=%s' % (tmp_known_hosts.name,)])
-    
-    return tmp_known_hosts
-
-def popen_ssh_command(command, host, port, user, agent, 
-        stdin="", 
-        ident_key = None,
-        server_key = None,
-        tty = False,
-        timeout = None,
-        retry = 0,
-        err_on_timeout = True,
-        connect_timeout = 60,
-        persistent = True,
-        hostip = None):
-    """
-    Executes a remote commands, returns ((stdout,stderr),process)
-    """
-   
-    tmp_known_hosts = None
-    args = ['ssh', '-C',
-            # Don't bother with localhost. Makes test easier
-            '-o', 'NoHostAuthenticationForLocalhost=yes',
-            '-o', 'ConnectTimeout=%d' % (int(connect_timeout),),
-            '-o', 'ConnectionAttempts=3',
-            '-o', 'ServerAliveInterval=30',
-            '-o', 'TCPKeepAlive=yes',
-            '-l', user, hostip or host]
-    if persistent and openssh_has_persist():
-        args.extend([
-            '-o', 'ControlMaster=auto',
-            '-o', 'ControlPath=/tmp/nepi_ssh-%r@%h:%p',
-            '-o', 'ControlPersist=60' ])
-    if agent:
-        args.append('-A')
-    if port:
-        args.append('-p%d' % port)
-    if ident_key:
-        args.extend(('-i', ident_key))
-    if tty:
-        args.append('-t')
-        args.append('-t')
-    if server_key:
-        # Create a temporary server key file
-        tmp_known_hosts = _make_server_key_args(
-            server_key, host, port, args)
-    args.append(command)
-
-    for x in xrange(retry or 3):
-        # connects to the remote host and starts a remote connection
-        proc = subprocess.Popen(args, 
-                stdout = subprocess.PIPE,
-                stdin = subprocess.PIPE, 
-                stderr = subprocess.PIPE)
-        
-        # attach tempfile object to the process, to make sure the file stays
-        # alive until the process is finished with it
-        proc._known_hosts = tmp_known_hosts
-    
-        try:
-            out, err = _communicate(proc, stdin, timeout, err_on_timeout)
-            if TRACE:
-                print "COMMAND host %s, command %s, out %s, error %s" % (host, " ".join(args), out, err)
-
-            if proc.poll():
-                if err.strip().startswith('ssh: ') or err.strip().startswith('mux_client_hello_exchange: '):
-                    # SSH error, can safely retry
-                    continue
-                elif retry:
-                    # Probably timed out or plain failed but can retry
-                    continue
-            break
-        except RuntimeError,e:
-            if TRACE:
-                print "EXCEPTION host %s, command %s, out %s, error %s, exception TIMEOUT ->  %s" % (
-                        host, " ".join(args), out, err, e.args)
-
-            if retry <= 0:
-                raise
-            retry -= 1
-        
-    return ((out, err), proc)
-
-def popen_scp(source, dest, 
-        port = None, 
-        agent = None, 
-        recursive = False,
-        ident_key = None,
-        server_key = None):
-    """
-    Copies from/to remote sites.
-    
-    Source and destination should have the user and host encoded
-    as per scp specs.
-    
-    If source is a file object, a special mode will be used to
-    create the remote file with the same contents.
-    
-    If dest is a file object, the remote file (source) will be
-    read and written into dest.
-    
-    In these modes, recursive cannot be True.
-    
-    Source can be a list of files to copy to a single destination,
-    in which case it is advised that the destination be a folder.
-    """
-    
-    if TRACE:
-        print "scp", source, dest
-    
-    if isinstance(source, file) and source.tell() == 0:
-        source = source.name
-    elif hasattr(source, 'read'):
-        tmp = tempfile.NamedTemporaryFile()
-        while True:
-            buf = source.read(65536)
-            if buf:
-                tmp.write(buf)
-            else:
-                break
-        tmp.seek(0)
-        source = tmp.name
-    
-    if isinstance(source, file) or isinstance(dest, file) \
-            or hasattr(source, 'read')  or hasattr(dest, 'write'):
-        assert not recursive
-        
-        # Parse source/destination as <user>@<server>:<path>
-        if isinstance(dest, basestring) and ':' in dest:
-            remspec, path = dest.split(':',1)
-        elif isinstance(source, basestring) and ':' in source:
-            remspec, path = source.split(':',1)
-        else:
-            raise ValueError, "Both endpoints cannot be local"
-        user,host = remspec.rsplit('@',1)
-        tmp_known_hosts = None
-        
-        args = ['ssh', '-l', user, '-C',
-                # Don't bother with localhost. Makes test easier
-                '-o', 'NoHostAuthenticationForLocalhost=yes',
-                '-o', 'ConnectTimeout=60',
-                '-o', 'ConnectionAttempts=3',
-                '-o', 'ServerAliveInterval=30',
-                '-o', 'TCPKeepAlive=yes',
-                host ]
-        if openssh_has_persist():
-            args.extend([
-                '-o', 'ControlMaster=auto',
-                '-o', 'ControlPath=/tmp/nepi_ssh-%r@%h:%p',
-                '-o', 'ControlPersist=60' ])
-        if port:
-            args.append('-P%d' % port)
-        if ident_key:
-            args.extend(('-i', ident_key))
-        if server_key:
-            # Create a temporary server key file
-            tmp_known_hosts = _make_server_key_args(
-                server_key, host, port, args)
-        
-        if isinstance(source, file) or hasattr(source, 'read'):
-            args.append('cat > %s' % (shell_escape(path),))
-        elif isinstance(dest, file) or hasattr(dest, 'write'):
-            args.append('cat %s' % (shell_escape(path),))
-        else:
-            raise AssertionError, "Unreachable code reached! :-Q"
-        
-        # connects to the remote host and starts a remote connection
-        if isinstance(source, file):
-            proc = subprocess.Popen(args, 
-                    stdout = open('/dev/null','w'),
-                    stderr = subprocess.PIPE,
-                    stdin = source)
-            err = proc.stderr.read()
-            proc._known_hosts = tmp_known_hosts
-            eintr_retry(proc.wait)()
-            return ((None,err), proc)
-        elif isinstance(dest, file):
-            proc = subprocess.Popen(args, 
-                    stdout = open('/dev/null','w'),
-                    stderr = subprocess.PIPE,
-                    stdin = source)
-            err = proc.stderr.read()
-            proc._known_hosts = tmp_known_hosts
-            eintr_retry(proc.wait)()
-            return ((None,err), proc)
-        elif hasattr(source, 'read'):
-            # file-like (but not file) source
-            proc = subprocess.Popen(args, 
-                    stdout = open('/dev/null','w'),
-                    stderr = subprocess.PIPE,
-                    stdin = subprocess.PIPE)
-            
-            buf = None
-            err = []
-            while True:
-                if not buf:
-                    buf = source.read(4096)
-                if not buf:
-                    #EOF
-                    break
-                
-                rdrdy, wrdy, broken = select.select(
-                    [proc.stderr],
-                    [proc.stdin],
-                    [proc.stderr,proc.stdin])
-                
-                if proc.stderr in rdrdy:
-                    # use os.read for fully unbuffered behavior
-                    err.append(os.read(proc.stderr.fileno(), 4096))
-                
-                if proc.stdin in wrdy:
-                    proc.stdin.write(buf)
-                    buf = None
-                
-                if broken:
-                    break
-            proc.stdin.close()
-            err.append(proc.stderr.read())
-                
-            proc._known_hosts = tmp_known_hosts
-            eintr_retry(proc.wait)()
-            return ((None,''.join(err)), proc)
-        elif hasattr(dest, 'write'):
-            # file-like (but not file) dest
-            proc = subprocess.Popen(args, 
-                    stdout = subprocess.PIPE,
-                    stderr = subprocess.PIPE,
-                    stdin = open('/dev/null','w'))
-            
-            buf = None
-            err = []
-            while True:
-                rdrdy, wrdy, broken = select.select(
-                    [proc.stderr, proc.stdout],
-                    [],
-                    [proc.stderr, proc.stdout])
-                
-                if proc.stderr in rdrdy:
-                    # use os.read for fully unbuffered behavior
-                    err.append(os.read(proc.stderr.fileno(), 4096))
-                
-                if proc.stdout in rdrdy:
-                    # use os.read for fully unbuffered behavior
-                    buf = os.read(proc.stdout.fileno(), 4096)
-                    dest.write(buf)
-                    
-                    if not buf:
-                        #EOF
-                        break
-                
-                if broken:
-                    break
-            err.append(proc.stderr.read())
-                
-            proc._known_hosts = tmp_known_hosts
-            eintr_retry(proc.wait)()
-            return ((None,''.join(err)), proc)
-        else:
-            raise AssertionError, "Unreachable code reached! :-Q"
-    else:
-        # Parse destination as <user>@<server>:<path>
-        if isinstance(dest, basestring) and ':' in dest:
-            remspec, path = dest.split(':',1)
-        elif isinstance(source, basestring) and ':' in source:
-            remspec, path = source.split(':',1)
-        else:
-            raise ValueError, "Both endpoints cannot be local"
-        user,host = remspec.rsplit('@',1)
-        
-        # plain scp
-        tmp_known_hosts = None
-        args = ['scp', '-q', '-p', '-C',
-                # Don't bother with localhost. Makes test easier
-                '-o', 'NoHostAuthenticationForLocalhost=yes',
-                '-o', 'ConnectTimeout=60',
-                '-o', 'ConnectionAttempts=3',
-                '-o', 'ServerAliveInterval=30',
-                '-o', 'TCPKeepAlive=yes' ]
-                
-        if port:
-            args.append('-P%d' % port)
-        if recursive:
-            args.append('-r')
-        if ident_key:
-            args.extend(('-i', ident_key))
-        if server_key:
-            # Create a temporary server key file
-            tmp_known_hosts = _make_server_key_args(
-                server_key, host, port, args)
-        if isinstance(source,list):
-            args.extend(source)
-        else:
-            if openssh_has_persist():
-                args.extend([
-                    '-o', 'ControlMaster=auto',
-                    '-o', 'ControlPath=/tmp/nepi_ssh-%r@%h:%p'])
-            args.append(source)
-        args.append(dest)
-
-        # connects to the remote host and starts a remote connection
-        proc = subprocess.Popen(args, 
-                stdout = subprocess.PIPE,
-                stdin = subprocess.PIPE, 
-                stderr = subprocess.PIPE)
-        proc._known_hosts = tmp_known_hosts
-        
-        comm = proc.communicate()
-        eintr_retry(proc.wait)()
-        return (comm, proc)
-
-def decode_and_execute():
-    # The python code we want to execute might have characters that 
-    # are not compatible with the 'inline' mode we are using. To avoid
-    # problems we receive the encoded python code in base64 as a input 
-    # stream and decode it for execution.
-    import base64, os
-    cmd = ""
-    while True:
-        try:
-            cmd += os.read(0, 1)# one byte from stdin
-        except OSError, e:            
-            if e.errno == errno.EINTR:
-                continue
-            else:
-                raise
-        if cmd[-1] == "\n": 
-            break
-    cmd = base64.b64decode(cmd)
-    # Uncomment for debug
-    #os.write(2, "Executing python code: %s\n" % cmd)
-    os.write(1, "OK\n") # send a sync message
-    exec(cmd)
-
-def popen_python(python_code, 
-        communication = DC.ACCESS_LOCAL,
-        host = None, 
-        port = None, 
-        user = None, 
-        agent = False, 
-        python_path = None,
-        ident_key = None,
-        server_key = None,
-        tty = False,
-        sudo = False, 
-        environment_setup = ""):
-
-    cmd = ""
-    if python_path:
-        python_path.replace("'", r"'\''")
-        cmd = """PYTHONPATH="$PYTHONPATH":'%s' """ % python_path
-        cmd += " ; "
-    if environment_setup:
-        cmd += environment_setup
-        cmd += " ; "
-    # Uncomment for debug (to run everything under strace)
-    # We had to verify if strace works (cannot nest them)
-    #cmd += "if strace echo >/dev/null 2>&1; then CMD='strace -ff -tt -s 200 -o strace.out'; else CMD=''; fi\n"
-    #cmd += "$CMD "
-    #cmd += "strace -f -tt -s 200 -o strace$$.out "
-    import nepi
-    cmd += "python -c 'import sys; sys.path.insert(0,%s); from nepi.util import server; server.decode_and_execute()'" % (
-        repr(os.path.dirname(os.path.dirname(nepi.__file__))).replace("'",'"'),
-    )
-
-    if sudo:
-        if ';' in cmd:
-            cmd = "sudo bash -c " + shell_escape(cmd)
-        else:
-            cmd = "sudo " + cmd
-
-    if communication == DC.ACCESS_SSH:
-        tmp_known_hosts = None
-        args = ['ssh', '-C',
-                # Don't bother with localhost. Makes test easier
-                '-o', 'NoHostAuthenticationForLocalhost=yes',
-                '-o', 'ConnectionAttempts=3',
-                '-o', 'ServerAliveInterval=30',
-                '-o', 'TCPKeepAlive=yes',
-                '-l', user, host]
-        if agent:
-            args.append('-A')
-        if port:
-            args.append('-p%d' % port)
-        if ident_key:
-            args.extend(('-i', ident_key))
-        if tty:
-            args.append('-t')
-        if server_key:
-            # Create a temporary server key file
-            tmp_known_hosts = _make_server_key_args(
-                server_key, host, port, args)
-        args.append(cmd)
-    else:
-        args = [ "/bin/bash", "-c", cmd ]
-
-    # connects to the remote host and starts a remote
-    proc = subprocess.Popen(args,
-            shell = False, 
-            stdout = subprocess.PIPE,
-            stdin = subprocess.PIPE, 
-            stderr = subprocess.PIPE)
-
-    if communication == DC.ACCESS_SSH:
-        proc._known_hosts = tmp_known_hosts
-
-    # send the command to execute
-    os.write(proc.stdin.fileno(),
-            base64.b64encode(python_code) + "\n")
-    while True: 
-        try:
-            msg = os.read(proc.stdout.fileno(), 3)
-            break
-        except OSError, e:            
-            if e.errno == errno.EINTR:
-                continue
-            else:
-                raise
-    
-    if msg != "OK\n":
-        raise RuntimeError, "Failed to start remote python interpreter: \nout:\n%s%s\nerr:\n%s" % (
-            msg, proc.stdout.read(), proc.stderr.read())
-
-    return proc
-
-# POSIX
-def _communicate(self, input, timeout=None, err_on_timeout=True):
-    read_set = []
-    write_set = []
-    stdout = None # Return
-    stderr = None # Return
-    
-    killed = False
-    
-    if timeout is not None:
-        timelimit = time.time() + timeout
-        killtime = timelimit + 4
-        bailtime = timelimit + 4
-
-    if self.stdin:
-        # Flush stdio buffer.  This might block, if the user has
-        # been writing to .stdin in an uncontrolled fashion.
-        self.stdin.flush()
-        if input:
-            write_set.append(self.stdin)
-        else:
-            self.stdin.close()
-    if self.stdout:
-        read_set.append(self.stdout)
-        stdout = []
-    if self.stderr:
-        read_set.append(self.stderr)
-        stderr = []
-
-    input_offset = 0
-    while read_set or write_set:
-        if timeout is not None:
-            curtime = time.time()
-            if timeout is None or curtime > timelimit:
-                if curtime > bailtime:
-                    break
-                elif curtime > killtime:
-                    signum = signal.SIGKILL
-                else:
-                    signum = signal.SIGTERM
-                # Lets kill it
-                os.kill(self.pid, signum)
-                select_timeout = 0.5
-            else:
-                select_timeout = timelimit - curtime + 0.1
-        else:
-            select_timeout = 1.0
-        
-        if select_timeout > 1.0:
-            select_timeout = 1.0
-            
-        try:
-            rlist, wlist, xlist = select.select(read_set, write_set, [], select_timeout)
-        except select.error,e:
-            if e[0] != 4:
-                raise
-            else:
-                continue
-        
-        if not rlist and not wlist and not xlist and self.poll() is not None:
-            # timeout and process exited, say bye
-            break
-
-        if self.stdin in wlist:
-            # When select has indicated that the file is writable,
-            # we can write up to PIPE_BUF bytes without risk
-            # blocking.  POSIX defines PIPE_BUF >= 512
-            bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
-            input_offset += bytes_written
-            if input_offset >= len(input):
-                self.stdin.close()
-                write_set.remove(self.stdin)
-
-        if self.stdout in rlist:
-            data = os.read(self.stdout.fileno(), 1024)
-            if data == "":
-                self.stdout.close()
-                read_set.remove(self.stdout)
-            stdout.append(data)
-
-        if self.stderr in rlist:
-            data = os.read(self.stderr.fileno(), 1024)
-            if data == "":
-                self.stderr.close()
-                read_set.remove(self.stderr)
-            stderr.append(data)
-    
-    # All data exchanged.  Translate lists into strings.
-    if stdout is not None:
-        stdout = ''.join(stdout)
-    if stderr is not None:
-        stderr = ''.join(stderr)
-
-    # Translate newlines, if requested.  We cannot let the file
-    # object do the translation: It is based on stdio, which is
-    # impossible to combine with select (unless forcing no
-    # buffering).
-    if self.universal_newlines and hasattr(file, 'newlines'):
-        if stdout:
-            stdout = self._translate_newlines(stdout)
-        if stderr:
-            stderr = self._translate_newlines(stderr)
-
-    if killed and err_on_timeout:
-        errcode = self.poll()
-        raise RuntimeError, ("Operation timed out", errcode, stdout, stderr)
-    else:
-        if killed:
-            self.poll()
-        else:
-            self.wait()
-        return (stdout, stderr)
-
diff --git a/src/nepi/util/settools/__init__.py b/src/nepi/util/settools/__init__.py
deleted file mode 100644 (file)
index 571fd59..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-from setclusters import *
-
diff --git a/src/nepi/util/settools/classify.py b/src/nepi/util/settools/classify.py
deleted file mode 100644 (file)
index b1663e8..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-import setclusters
-import collections
-import itertools
-import operator
-
-def classify(requests, partition):
-    """
-    Takes an iterable over requests and a classification, and classifies the requests
-    returning a mapping from their classification (bitmap of applicable partitions) to
-    lists of requests.
-    
-    Params:
-    
-        requests: iterable over sets of viable candidates for a request
-        
-        partition: sequence of sets of candidates that forms a partition
-            over all available candidates.
-    
-    Returns:
-        
-        { str : [requests] }
-    """
-    rv = collections.defaultdict(list)
-    
-    for request in requests:
-        rv[getClass(request, partition)].append(request)
-    
-    return dict(rv)
-
-def getClass(set, partition):
-    return "".join(
-        map("01".__getitem__, [
-            bool(set & part)
-            for part in partition
-        ])
-    )
-    
-
-def isSubclass(superclass, subclass):
-    """
-    Returns True iff 'superclass' includes all elements of 'subclass'
-    
-    >>> isSubclass("1100","1000")
-    True
-    >>> isSubclass("1100","1100")
-    True
-    >>> isSubclass("0000","0001")
-    False
-    """
-    for superbit, subbit in itertools.izip(superclass, subclass):
-        if subbit and not superbit:
-            return False
-    else:
-        return True
-
-def classContains(clz, partIndex):
-    return clz[partIndex] == "1"
-
-def classCardinality(clz, partition = None):
-    if not partition:
-        return sum(itertools.imap("1".__eq__, clz))
-    else:
-        return sum(len(part) for bit,part in zip(clz,partition) 
-                   if bit == "1" )
-
-def classMembers(clz, partition):
-    return reduce(operator.or_, classComponents(clz, partition), set())
-
-def classComponents(clz, partition):
-    return [
-        partition[i]
-        for i,bit in enumerate(clz)
-        if bit == "1"
-    ]
-
-
diff --git a/src/nepi/util/settools/setclusters.py b/src/nepi/util/settools/setclusters.py
deleted file mode 100644 (file)
index 8a74d69..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-import itertools
-import collections
-
-def disjoint_sets(*sets):
-    """
-    Given a series of sets S1..SN, computes disjoint clusters C1..CM
-    such that C1=U Sc1..Sc1', C2=U Sc2..Sc2', ... CM=ScM..ScM'
-    and any component of Ci is disjoint against any component of Cj
-    for i!=j
-    
-    The result is given in terms of the component sets, so C1 is given
-    as the sequence Sc1..Sc1', etc.
-    
-    Example:
-    
-    >>> disjoint_sets( set([1,2,4]), set([2,3,4,5]), set([4,5]), set([6,7]), set([7,8]) )
-    [[set([1, 2, 4]), set([4, 5]), set([2, 3, 4, 5])], [set([6, 7]), set([8, 7])]]
-
-    >>> disjoint_sets( set([1]), set([2]), set([3]) )
-    [[set([1])], [set([2])], [set([3])]]
-
-    """
-    
-    # Pseudo:
-    #
-    # While progress is made:
-    #   - Join intersecting clusters
-    #   - Track their components
-    #   - Replace sets with the new clusters, restart
-    cluster_components = [ [s] for s in sets ]
-    clusters = [s.copy() for s in sets]
-    
-    changed = True
-    while changed:
-        changed = False
-        
-        for i,s in enumerate(clusters):
-            for j in xrange(len(clusters)-1,i,-1):
-                cluster = clusters[j]
-                if cluster & s:
-                    changed = True
-                    cluster.update(s)
-                    cluster_components[i].extend(cluster_components[j])
-                    del cluster_components[j]
-                    del clusters[j]
-        
-    return cluster_components
-
-def disjoint_partition(*sets):
-    """
-    Given a series of sets S1..SN, computes a disjoint partition of
-    the population maintaining set boundaries. 
-    
-    That is, it computes a disjoint partition P1..PM where 
-    Pn is the equivalence relation given by 
-    
-    R<a,b>  <==>  a in Sn <--> b in Sn  for all n
-    
-    NOTE: Given the current implementation, the contents of the
-    sets must be hashable.
-    
-    Examples:
-    
-        >>> disjoint_partition( set([1,2,4]), set([2,3,4,5]), set([4,5]), set([6,7]), set([7,8]) )
-        [set([2]), set([5]), set([1]), set([3]), set([4]), set([6]), set([8]), set([7])]
-        
-        >>> disjoint_partition( set([1,2,4]), set([2,3,4,5,10]), set([4,5]), set([6,7]), set([7,8]) )
-        [set([2]), set([5]), set([1]), set([10, 3]), set([4]), set([6]), set([8]), set([7])]
-    
-    """
-    reverse_items = collections.defaultdict(list)
-    
-    for i,s in enumerate(sets):
-        for item in s:
-            reverse_items[item].append(i)
-    
-    partitions = collections.defaultdict(set)
-    for item, cats in reverse_items.iteritems():
-        partitions[tuple(cats)].add(item)
-    
-    return partitions.values()
-
diff --git a/src/nepi/util/sfa_api.py b/src/nepi/util/sfa_api.py
new file mode 100644 (file)
index 0000000..580b528
--- /dev/null
@@ -0,0 +1,263 @@
+import logging
+import hashlib
+
+from parser import sfa_sfav1
+import subprocess
+import warnings
+
+import threading
+
+class SFAApi(object):
+
+    def __init__(self, aggregate = 'ple', slice_id = None, sfi_auth = None, sfi_user = None,
+            sfi_registry = None, sfi_sm = None, timeout = None, private_key = None):
+    
+        self._resources = dict()
+        self._reservable_resources = list()
+        self._leases = dict()
+        self._slice_tags = dict()
+        self._slice_resources = set()
+        self._slice_leases = set()
+        self._aggregate = aggregate
+        self._slice_hrn = slice_id
+        # TODO: take into account Rspec version, SFA V1, GENI V2, GENI V3
+        # For now is SFA V1 from PlanetLab and Nitos (wrong namespace)
+        self._parser = sfa_sfav1.SFAResourcesParser(['ple', 'omf'])
+        self._lock = threading.Lock()
+
+        # Paremeters to contact the XMLRPC SFA service
+        self._sfi_parameters = {'-a': sfi_auth, '-u': sfi_user,
+                '-r': sfi_registry, '-s': sfi_sm, '-t': timeout,
+                '-k': private_key}
+
+        #self._logger = logging.getLogger('nepi.utils.sfiapi')
+        self._fetch_resources_info()
+        self._fetch_slice_info()
+
+    def _sfi_command_options(self):
+        command_options = " ".join("%s %s" % (k,v) for (k,v) in \
+                self._sfi_parameters.iteritems() if v is not None)
+        return command_options
+
+    def _sfi_command_exec(self, command):
+        args = command.split(" ")
+        s = subprocess.Popen(args, stdout = subprocess.PIPE,
+                stdin = subprocess.PIPE)
+        xml, err = s.communicate()
+        if err:
+           raise RuntimeError("Command excecution problem, error: %s", err)
+        return xml
+
+    def _fetch_resources_info(self, resources = True):
+        command_options = self._sfi_command_options()
+        command = "sfi.py " + command_options + " resources -l all"
+        try:
+            xml = self._sfi_command_exec(command)
+        except:
+            #self._logger.error("Error in SFA responds: %s", xml)
+            raise
+        if resources:
+            self._resources, self._leases = self._parser.resources_from_xml(xml, resources = True)
+        else:
+            self._leases = self._parser.resources_from_xml(xml)
+        #self._update_reservable()
+        return xml
+    
+    def _fetch_slice_info(self):
+        command_options = self._sfi_command_options()
+        command = "sfi.py " + command_options + " resources -l all"
+        command = command + " " + self._slice_hrn
+        try:
+            xml = self._sfi_command_exec(command)
+        except:
+            #self._logger.error("Error in SFA responds: %s", xml)
+            raise
+        self._slice_resources, self._slice_leases, self._slice_tags = \
+            self._parser.resources_from_xml(xml, sliver = True, resources = True)
+        return xml
+
+    def _update_reservable(self):
+        for rid, r in self._resources.iteritems():
+            if (r['resource_type'] == 'node' and r['exclusive'].upper() == 'TRUE') \
+                 or (r['resource_type'] == 'channel'):
+                self._reservable_resources.append(rid)
+
+
+    def discover_resources(self, resourceId=None, fields=[], **kwargs):
+        result = dict()
+        resources = self._resources
+
+        if resourceId is not None:
+            resource_ids = resourceId
+            if not isinstance(resource_ids, list):
+                resource_ids = [resource_ids]
+            resources = self._filter_by_resourceId(resources, resource_ids)
+        else:
+            for filter, value in kwargs.items():
+                resources = self._filter_by_filter(resources, filter, value)
+        if not fields:
+            return resources
+        else:
+            for k, info in resources.iteritems():
+                info = self._extract_fields(info, fields)
+                result[k] = info
+            return result
+                
+    def _filter_by_resourceId(self, resources, resource_ids):
+        return dict((k, resources[k]) for k in resource_ids if k in resources)
+
+    def _filter_by_filter(self, resources, filter, value):
+        d = dict()
+        for k in resources.keys():
+            if filter in resources[k]:
+                if resources[k][filter] == value:
+                    d[k] = resources[k]
+        return d
+               
+    def _extract_fields(self, info, fields):
+        return dict((k, info[k]) for k in fields if k in info)
+
+    def discover_fields(self):
+        resources = self._resources
+        fields = []
+        for k, data in resources.iteritems():
+            for field in data:
+                if field not in fields:
+                    fields.append(field)
+        return fields
+
+    def discover_leases(self, resourceId=None):
+        leases = self._leases
+
+        if resourceId is not None:
+            resource_ids = resourceId
+            if not isinstance(resourceId, list):
+                resource_ids = [resource_ids]
+            leases = self._filterbyresourceId(leases, resource_ids)
+        return leases
+
+    def find_resources(self, leases, resources, rtype, quantity, start_time, duration, slot):
+        result = dict()
+        if rtype not in ['node', 'channel']:
+            raise RuntimeError("Unknown type")
+
+        finish_time = start_time + duration * slot
+
+        leases_resources = dict()
+        reservable_resources = dict()
+        for lid, lease in leases.iteritems():
+            if lease[0]['type'] == rtype:
+                leases_resources.update({lid: lease})
+        #print leases_resources
+        for rid, resource in resources.iteritems():
+            if rtype == 'node' and (resource['type'] == 'node' and resource['exclusive'].upper() == 'TRUE'):
+                reservable_resources.update({rid: resource})
+            elif rtype == 'channel':
+                reservable_resources.update({rid: resource})
+            #if resource['type'] == 'rtype' and resources['exclusive'].upper() == 'TRUE':\
+            # (in case adding exclusive tag to channels)
+
+        free_resources = list(set(reservable_resources.keys()) - set(leases_resources.keys()))
+    
+        if len(free_resources) >= quantity:
+            free_resources = free_resources[:quantity]
+            for rid, resource in resources.iteritems():
+                if rid in free_resources:
+                    result[rid] = resource
+            return result
+        else:
+            maybe_free = []
+            new_quan = quantity - len(free_resources)
+            print new_quan
+
+            for lid, lease in leases_resources.iteritems():
+                for l in lease:
+                    st = int(l['start_time'])
+                    ft = st + int(l['duration']) * slot
+                    if (st <= finish_time <= ft) or (st <= start_time <= ft):
+                        if lid in maybe_free:
+                            maybe_free.remove(lid)
+                        break
+                    else:
+                        if lid not in maybe_free:
+                            maybe_free.append(lid)
+                if len(maybe_free) >= new_quan:
+                    free_resources = [free_resources, maybe_free]
+                    free_resources = sum(free_resources, [])
+                    for rid, resource in resources.iteritems():
+                        if rid in free_resources:
+                            result[rid] = resource
+                        return result
+                    #return free_resources
+            warnings.warn("There aren't enough nodes")
+
+                                 
+    def provision_resource(self, new_resource, start_time = None, duration = None):
+        import os, tempfile
+        with self._lock:
+            xml = self._fetch_slice_info()
+            new_xml = self._parser.create_reservation_xml(xml, self._slice_hrn,\
+            new_resource, start_time, duration, self._aggregate)
+            fh, fname = tempfile.mkstemp()
+            print fname
+            os.write(fh, new_xml)
+            os.close(fh)
+            try:
+                command_options = self._sfi_command_options()
+                command = "sfi.py " + command_options + " create %s %s" % (self._slice_hrn, fname)
+                out = self._sfi_command_exec(command)
+            except:
+                raise
+        xml = self._fetch_slice_info()
+        return self._parser.verify_reservation_xml(xml, self._slice_hrn, new_resource, start_time,\
+                duration, self._aggregate)
+
+    def release_resource(self, resource, start_time = None, duration = None):
+        import os, tempfile
+        with self._lock:
+            xml = self._fetch_slice_info()
+            new_xml = self._parser.release_reservation_xml(xml, self._slice_hrn, resource,\
+            start_time, duration, self._aggregate)
+            fh, fname = tempfile.mkstemp()
+            print fname
+            os.write(fh, new_xml)
+            os.close(fh)
+            try:
+                command_options = self._sfi_command_options()
+                command = "sfi.py " + command_options + " create %s %s" % (self._slice_hrn, fname)
+                out = self._sfi_command_exec(command)
+            except:
+                raise
+        xml = self._fetch_slice_info()
+        return not self._parser.verify_reservation_xml(xml, self._slice_hrn, resource, start_time,\
+            duration, self._aggregate)
+
+
+class SFAApiFactory(object):
+    lock = threading.Lock()
+    _apis = dict()
+
+    @classmethod
+    def get_api(slice_id = None, sfi_auth = None, sfi_user = None,
+            sfi_registry = None, sfi_sm = None, timeout = None, private_key = None):
+
+        key = cls.make_key(aggregate = 'ple', slice_id, sfi_auth, sfi_user, sfi_registry, sfi_sm,
+            timeout, private_key)
+        api = cls._apis.get(key)
+        cls.lock.acquire()
+        api._fetch_resources_info(resources = False)
+        api._fetch_slice_info()
+        cls.lock.release()
+
+        if not api:
+            api = SFAApi(slice_id = None, sfi_auth = None, sfi_user = None,
+            sfi_registry = None, sfi_sm = None, timeout = None, private_key = None)
+            cls._apis[key] = api
+
+        return api
+
+    @classmethod
+    def make_key(cls, *args):
+        skey = "".join(map(str, args))
+        return hashlib.md5(skey).hexdigest()
+
diff --git a/src/nepi/util/sfa_sfav1.py b/src/nepi/util/sfa_sfav1.py
new file mode 100644 (file)
index 0000000..0f545b0
--- /dev/null
@@ -0,0 +1,226 @@
+# -*- coding: utf-8 -*-
+
+from lxml import etree
+#import collections
+import sys
+
+class SFAResourcesParser(object):
+    # Maybe this init method is not necessary, it was aim to check that the
+    # aggregate was supported by nepi
+
+    def __init__(self, aggr_pattern):
+        if not isinstance(aggr_pattern, list):
+            self._aggr_pattern = [aggr_pattern]
+        else:
+            self._aggr_pattern = aggr_pattern
+    
+    def resources_from_xml(self, xml, sliver = False, resources = False):
+        rdata = dict()
+        ldata = dict()
+        stags = dict()
+        RSpec = etree.fromstring(xml)
+        RSpec_attr = dict(RSpec.attrib)
+        network = RSpec.findall('.//network')
+        for net in network:
+            aggr = net.get('name') 
+            if aggr == 'ple' and resources:
+                node_tree = net.iterfind('node')
+                for node in list(node_tree): 
+                    if isinstance(node.tag, basestring):
+                        data_ple = dict(node.attrib)
+                        data_ple['aggregate'] = aggr
+                        data_ple['resource_type'] = 'node'
+                        data_ple = self._get_node_info(node, data_ple)
+                        hostname = node.find('hostname')
+                        rdata[hostname.text] = data_ple
+                if sliver:
+                    sliver_defaults = net.find('sliver_defaults')
+                    if len(sliver_defaults):
+                        stags = self._get_sliver_tags(sliver_defaults, stags)
+            elif aggr == 'omf' and resources:
+                node_tree = net.iterfind('node')
+                for node in node_tree:
+                    if isinstance(node.tag, basestring):
+                        data_omf = dict(node.attrib)
+                        data_omf['aggregate'] = aggr
+                        data_omf['resource_type'] = 'node'
+                        data_omf = self._get_node_info(node, data_omf)
+                        hostname = node.find('hostname')
+                        rdata[hostname.text] = data_omf
+                spectrum = net.find('spectrum')
+                for channel in list(spectrum):
+                    if isinstance(channel.tag, basestring):
+                        data_omf = dict(channel.attrib)
+                        data_omf['aggregate'] = aggr
+                        data_omf['resource_type'] = 'channel'
+                        channelnum = data_omf['channel_num']
+                        rdata[channelnum] = data_omf
+                leases = net.iterfind('lease')
+                for lease in list(leases):
+                    if isinstance(lease.tag, basestring):
+                        (st, duration) = lease.attrib['start_time'], lease.attrib['duration']
+                        data_lease = dict(lease.attrib)
+                        data_lease['aggregate'] = aggr
+                        data_lease['resource_type'] = 'lease'
+                        data_lease = self._get_leases_info(lease, data_lease)
+                        ldata[(st, duration)] = data_lease
+            elif aggr == 'omf' and not resources:
+                leases = net.iterfind('lease')
+                for lease in list(leases):
+                    if isinstance(lease.tag, basestring):
+                        (st, duration) = lease.attrib['start_time'], lease.attrib['duration']
+                        data_lease = dict(lease.attrib)
+                        data_lease['aggregate'] = aggr
+                        data_lease['resource_type'] = 'lease'
+                        data_lease = self._get_leases_info(lease, data_lease)
+                        ldata[(st, duration)] = data_lease
+            else:
+                pass
+        if sliver:
+            return rdata, ldata, stags
+        elif resources:
+            return rdata, ldata
+        elif not resources:
+            return ldata
+
+    def _get_node_info(self, node_tag, data_dict):
+        for n in list(node_tag):
+            if isinstance(n.tag, basestring):
+                if n.attrib:
+                    data_dict[n.tag] = dict(n.attrib)
+                else:
+                    data_dict[n.tag] = n.text
+        return data_dict
+
+    def _get_leases_info(self, lease_tag, data_dict):
+        nodes = list()
+        channels = list()
+        for l in list(lease_tag):
+            if l.tag == 'node':
+                node = l.attrib['component_id'].split('+').pop()
+                nodes.append(node)
+            if l.tag == 'channel':
+                #TODO: find out key when channel reservation
+                #channels.append(l.attrib['averiguar']) channel_num
+                pass
+            data_dict['nodes'] = nodes
+            data_dict['channels'] = channels
+        return data_dict
+
+    def _get_sliver_tags(self, sliverdefaults_tag, sliver_tag_dict):
+        vsys = list()
+        for info in list(sliverdefaults_tag):
+            if info.tag == 'vsys_vnet':
+                sliver_tag_dict['vsys_vnet'] = info.text
+            elif info.tag == 'vsys':
+                vsys.append(info.text)
+        sliver_tag_dict['vsys'] = vsys
+        return sliver_tag_dict
+            
+    def create_reservation_xml(self, xml, slice_hrn, new_resource, start_time, duration, aggregate):
+        aggrs = []
+        RSpec = etree.fromstring(xml)
+        network = RSpec.findall('.//network')
+        for net in network:
+            aggr = net.get('name')
+            aggrs.append(aggr)
+            if aggr == aggregate:
+                new_xml = self._create_tags(RSpec, net, slice_hrn, new_resource, start_time, duration)
+        if aggregate not in aggrs:
+            new_net = etree.SubElement(RSpec, 'network', name = aggregate)
+            new_xml = self._create_tags(RSpec, new_net, slice_hrn, new_resource, start_time, duration)
+        return new_xml
+
+    def _create_tags(self, RSpec, net, slice_hrn, new_resource, start_time, duration):
+        resource = new_resource.keys()[0]
+        res_type = new_resource[resource]['resource_type']
+        if res_type == 'node':
+            node = etree.SubElement(net, res_type, \
+            component_manager_id = new_resource[resource]['component_manager_id'],\
+            component_id = new_resource[resource]['component_id'],\
+            component_name = new_resource[resource]['component_name'], \
+            site_id = new_resource[resource]['site_id'])
+            sliver_tag = etree.SubElement(node, 'sliver')
+        elif res_type == 'channel':
+            spectrum = etree.SubElement(net, spectrum)
+            channel = etree.SubElement(spectrum, channel,\
+            channel_num = new_resource[resource]['channel_num'],\
+            frequency = new_resource[resource]['frequency'],\
+            standard = new_resource[resource]['standard'])
+        if start_time is not None and duration is not None:
+            slice_id = "urn:publicid:IDN+" + slice_hrn.split('.')[0] + ':' + slice_hrn.split('.')[1]\
+            + '+slice+' + slice_hrn.split('.')[2]
+            lease = etree.SubElement(net, 'lease', slice_id = slice_id,\
+            start_time = str(start_time), duration = str(duration))
+            if res_type == 'node':
+                res = etree.SubElement(lease, res_type,\
+                component_id = new_resource[resource]['component_id'])
+            elif res_type == 'channel':
+                res = etree.SubElement(lease, res_type,\
+                channel_num = new_resource[resource]['channel_num'])
+        new_xml = etree.tostring(RSpec, xml_declaration=True)
+        return new_xml
+                
+    def verify_reservation_xml(self, xml, slice_hrn, new_resource, start_time, duration, aggregate):
+        slice_id = "urn:publicid:IDN+" + slice_hrn.split('.')[0] + ':' + slice_hrn.split('.')[1]\
+        + '+slice+' + slice_hrn.split('.')[2]
+        rdata, ldata, stags = self.resources_from_xml(xml, sliver = True, resources = True)
+        res_name = new_resource.keys()[0]
+        if res_name in rdata.keys():
+            if start_time and duration:
+                if ldata[(start_time, duration)]:
+                    nodes = ldata[(start_time, duration)]['nodes']
+                    sliceid = ldata[(start_time, duration)]['slice_id']
+                    if res_name in nodes and sliceid == slice_id:
+                        return True
+                    else: return False
+                else: return False
+            else: return True
+        else: return False
+
+    def release_reservation_xml(self, xml, slice_hrn, resource, start_time, duration, aggregate):
+        RSpec = etree.fromstring(xml)
+        network = RSpec.findall('.//network')
+        for net in network:
+            aggr = net.get('name')
+            if aggr == aggregate:
+                new_xml = self._delete_tag(RSpec, net, slice_hrn, resource, start_time, duration)
+                return new_xml
+
+    def _delete_tag(self, RSpec, net, slice_hrn, resource, start_time, duration):
+        resource_name = resource.keys()[0]
+        res_type = resource[resource_name]['resource_type']
+        if res_type == 'node':
+            node_tree = net.iterfind('node')
+            for node in list(node_tree):
+                if isinstance(node.tag, basestring):
+                    data_node = dict(node.attrib)
+                    if data_node['component_name'] == resource_name:
+                        net.remove(node)
+        elif res_type == 'channel':
+            spectrum = net.find('spectrum')
+            for channel in list(spectrum):
+                if isinstance(channel.tag, basestring):
+                    data_channel = dict(channel.attrib)
+                    if data_channel['channel_num'] == resource_name:
+                        spectrum.remove(channel)
+        if start_time is not None and duration is not None:
+            slice_id = "urn:publicid:IDN+" + slice_hrn.split('.')[0] + ':' + slice_hrn.split('.')[1]\
+            + '+slice+' + slice_hrn.split('.')[2]
+            leases = net.iterfind('lease')
+            for lease in list(leases):
+                if isinstance(lease.tag, basestring):
+                    (st, duration) = lease.attrib['start_time'], lease.attrib['duration']
+                    sliceid = lease.attrib['slice_id']
+                    if st == str(start_time) and duration == str(duration) and sliceid == slice_id:
+                        for l in list(lease):
+                            if l.tag == 'node' and res_type == 'node':
+                                if l.attrib['component_id'].split('+').pop() == resource_name:
+                                    lease.remove(l)
+                            elif l.tag == 'channel' and res_type == 'channel':
+                                if l.attrib['channel_num'] == resource_name:
+                                    lease.remove(l)
+        new_xml = etree.tostring(RSpec, xml_declaration=True)
+        return new_xml
+
+
diff --git a/src/nepi/util/sfiapi.py b/src/nepi/util/sfiapi.py
deleted file mode 100644 (file)
index d82b28c..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-# -*- coding: utf-8 -*-
-
-
-import logging
-
-from nepi.util.parser import sfa
-
-###
-# TODO: This API is a mega hack to adapt the sfa interface to the plc interface.
-#       The right way to implement this would be to make node.py invoke generic 
-#       methods and to adapt the sfa and plc APIs to provide the reuired 
-#       data.
-
-class SFIAPI(object):
-    def __init__(self, slice_id):
-        self._slice_tags = dict()
-        self._slice_nodes = set()
-        self._all_nodes = dict()
-        self._slice_id = slice_id
-
-        self._logger = logging.getLogger('nepi.utils.sfiapi')
-        
-        self.FetchSliceInfo()
-
-    def FetchSliceInfo(self):
-        p = sfa.SFAResourcesParser()
-        import commands
-        xml = commands.getoutput("sfi.py resources")
-        try:
-            self._all_nodes = p.resources_from_xml(xml)
-            xml = commands.getoutput("sfi.py resources %s" % self._slice_id)
-            self._slice_tags, self._slice_nodes = p.slice_info_from_xml(xml)
-        except:
-            self._logger.error("Error in SFA responds: %s", xml)
-            raise
-    
-    def GetSliceNodes(self, slicename):
-        return list(self._slice_nodes)
-
-    def GetNodeInfo(self, node_id):
-        # TODO: thread-unsafe!! sanitize!
-        info = self.GetNodes(node_id)
-        tags = self.GetNodeTags(node_id=node_id, fields=('tagname','value'))
-        return info, tags
-
-    def GetSliceId(self, slicename):
-        return self._slice_id
-
-    def GetSliceVnetSysTag(self, slicename):
-        return self._slice_tags.get('vsys_net')
-
-    def GetNodeTags(self, node_id=None, fields=None, **kw):
-        nodes = self._all_nodes
-        if node_id is not None:
-            node_ids = node_id
-            if not isinstance(node_id, list):
-                node_ids = [node_ids]
-            nodes = self._FilterByNodeId(nodes, node_ids)
-        else:
-            filters = kw.pop('filters',{})
-            if '|slice_ids' in filters:
-                nodes = self._FilterByNodeId(nodes, self._slice_nodes)
-                del filters['|slice_ids']
-            nodes = self._FilterByFilters(nodes, filters)
-        tagnames = kw.pop('tagname',[])
-        return self._GetTagInfo(nodes, tagnames, fields)
-
-    def GetNodes(self, nodeIdOrName=None, fields=[], **kw):
-        #TODO: filter - peer
-        nodes = self._all_nodes
-        if nodeIdOrName is not None:
-            node_ids = nodeIdOrName
-            if not isinstance(nodeIdOrName, list):
-                node_ids = [node_ids]
-            nodes = self._FilterByNodeId(nodes, node_ids)
-        else:
-            filters = kw.pop('filters',{})
-            if '|slice_ids' in filters:
-                nodes = self._FilterByNodeId(nodes, self._slice_nodes)
-                del filters['|slice_ids']
-            # TODO: Remove this!! need to allow filter '>last_contact' !!!
-            for f in ['>last_contact', 'node_type', 'run_level']:
-                if f in filters:
-                    del filters[f]
-            nodes = self._FilterByFilters(nodes, filters)
-        return self._GetNodeFieldsInfo(nodes, fields)
-    
-    def _FilterByNodeId(self, nodes, node_ids):
-        return dict((k, nodes[k]) for k in node_ids if k in nodes)
-
-    def _FilterByFilters(self, nodes, filters):
-        def has_all_tags(node_id):
-            data = nodes[node_id]
-            for name, value in filters.iteritems():
-                if name == 'value' or name == 'tagname':
-                    tagname = filters['tagname']
-                    tagval = filters['value']
-                    if data[tagname] != tagval:
-                        return False
-                elif name == 'node_id':
-                    node_ids = list(value)
-                    if node_id not in node_ids:
-                        return False
-                else:
-                    #if  (name == '>last_contact' and data['lastcontact'] > value) or \
-                    if (not name in data or data[name] != value):
-                        return False
-            return True
-        return dict((k, value) for k, value in nodes.iteritems() if has_all_tags(k))
-
-    def _GetNodeFieldsInfo(self, nodes, fields):
-        result = list()
-        for k, data in nodes.iteritems():
-            if not fields:
-                result.append(data)
-                continue
-            r_data = dict()
-            for f in fields:
-                if f == "node_id":
-                    value = k
-                else:
-                    value = data[f]
-                r_data[f] = value
-            result.append(r_data)
-        return result
-
-    def _GetTagInfo(self, nodes, tagnames, fields):
-        result = list()
-        for k, data in nodes.iteritems():
-            for name, value in data.iteritems():
-                r_data = dict()
-                if tagnames and name not in tagnames:
-                    continue
-                for f in fields:
-                    if f == "node_id":
-                        val = k
-                    if f == "tagname":
-                        val = name
-                    if f == "value":
-                        val = value
-                    r_data[f] = val
-                result.append(r_data)
-        return result
-
-    def AddSliceNodes(self, slicename, nodes=None):
-        import os, commands, tempfile
-        nodes = set(nodes)
-        nodes.update(self._slice_nodes)
-        nodes_data = dict((k, self._all_nodes[k]) for k in nodes)
-        p = sfa.SFAResourcesParser()
-        xml = p.create_slice_xml(nodes_data, self._slice_tags)
-        fh, fname = tempfile.mkstemp()
-        os.write(fh, xml)
-        os.close(fh)
-        out = commands.getoutput("sfi.py create %s %s" % (self._slice_id, fname))
-        os.remove(fname)
-        #print out
-
-def sfiapi(slice_id):
-    api = SFIAPI(slice_id)
-    return api
-
diff --git a/src/nepi/util/sshfuncs.py b/src/nepi/util/sshfuncs.py
new file mode 100644 (file)
index 0000000..a8a9b86
--- /dev/null
@@ -0,0 +1,925 @@
+import base64
+import errno
+import hashlib
+import logging
+import os
+import os.path
+import re
+import select
+import signal
+import socket
+import subprocess
+import threading
+import time
+import tempfile
+
+logger = logging.getLogger("sshfuncs")
+
+def log(msg, level, out = None, err = None):
+    if out:
+        msg += " - OUT: %s " % out
+
+    if err:
+        msg += " - ERROR: %s " % err
+
+    logger.log(level, msg)
+
+
+if hasattr(os, "devnull"):
+    DEV_NULL = os.devnull
+else:
+    DEV_NULL = "/dev/null"
+
+SHELL_SAFE = re.compile('^[-a-zA-Z0-9_=+:.,/]*$')
+
+class STDOUT: 
+    """
+    Special value that when given to rspawn in stderr causes stderr to 
+    redirect to whatever stdout was redirected to.
+    """
+
+class RUNNING:
+    """
+    Process is still running
+    """
+
+class FINISHED:
+    """
+    Process is finished
+    """
+
+class NOT_STARTED:
+    """
+    Process hasn't started running yet (this should be very rare)
+    """
+
+hostbyname_cache = dict()
+hostbyname_cache_lock = threading.Lock()
+
+def gethostbyname(host):
+    global hostbyname_cache
+    global hostbyname_cache_lock
+    
+    hostbyname = hostbyname_cache.get(host)
+    if not hostbyname:
+        with hostbyname_cache_lock:
+            hostbyname = socket.gethostbyname(host)
+            hostbyname_cache[host] = hostbyname
+
+            msg = " Added hostbyname %s - %s " % (host, hostbyname)
+            log(msg, logging.DEBUG)
+
+    return hostbyname
+
+OPENSSH_HAS_PERSIST = None
+
+def openssh_has_persist():
+    """ The ssh_config options ControlMaster and ControlPersist allow to
+    reuse a same network connection for multiple ssh sessions. In this 
+    way limitations on number of open ssh connections can be bypassed.
+    However, older versions of openSSH do not support this feature.
+    This function is used to determine if ssh connection persist features
+    can be used.
+    """
+    global OPENSSH_HAS_PERSIST
+    if OPENSSH_HAS_PERSIST is None:
+        proc = subprocess.Popen(["ssh","-v"],
+            stdout = subprocess.PIPE,
+            stderr = subprocess.STDOUT,
+            stdin = open("/dev/null","r") )
+        out,err = proc.communicate()
+        proc.wait()
+        
+        vre = re.compile(r'OpenSSH_(?:[6-9]|5[.][8-9]|5[.][1-9][0-9]|[1-9][0-9]).*', re.I)
+        OPENSSH_HAS_PERSIST = bool(vre.match(out))
+    return OPENSSH_HAS_PERSIST
+
+def make_server_key_args(server_key, host, port):
+    """ Returns a reference to a temporary known_hosts file, to which 
+    the server key has been added. 
+    
+    Make sure to hold onto the temp file reference until the process is 
+    done with it
+
+    :param server_key: the server public key
+    :type server_key: str
+
+    :param host: the hostname
+    :type host: str
+
+    :param port: the ssh port
+    :type port: str
+
+    """
+    if port is not None:
+        host = '%s:%s' % (host, str(port))
+
+    # Create a temporary server key file
+    tmp_known_hosts = tempfile.NamedTemporaryFile()
+   
+    hostbyname = gethostbyname(host) 
+
+    # Add the intended host key
+    tmp_known_hosts.write('%s,%s %s\n' % (host, hostbyname, server_key))
+    
+    # If we're not in strict mode, add user-configured keys
+    if os.environ.get('NEPI_STRICT_AUTH_MODE',"").lower() not in ('1','true','on'):
+        user_hosts_path = '%s/.ssh/known_hosts' % (os.environ.get('HOME',""),)
+        if os.access(user_hosts_path, os.R_OK):
+            f = open(user_hosts_path, "r")
+            tmp_known_hosts.write(f.read())
+            f.close()
+        
+    tmp_known_hosts.flush()
+    
+    return tmp_known_hosts
+
+def make_control_path(agent, forward_x11):
+    ctrl_path = "/tmp/nepi_ssh"
+
+    if agent:
+        ctrl_path +="_a"
+
+    if forward_x11:
+        ctrl_path +="_x"
+
+    ctrl_path += "-%r@%h:%p"
+
+    return ctrl_path
+
+def shell_escape(s):
+    """ Escapes strings so that they are safe to use as command-line 
+    arguments """
+    if SHELL_SAFE.match(s):
+        # safe string - no escaping needed
+        return s
+    else:
+        # unsafe string - escape
+        def escp(c):
+            if (32 <= ord(c) < 127 or c in ('\r','\n','\t')) and c not in ("'",'"'):
+                return c
+            else:
+                return "'$'\\x%02x''" % (ord(c),)
+        s = ''.join(map(escp,s))
+        return "'%s'" % (s,)
+
+def eintr_retry(func):
+    """Retries a function invocation when a EINTR occurs"""
+    import functools
+    @functools.wraps(func)
+    def rv(*p, **kw):
+        retry = kw.pop("_retry", False)
+        for i in xrange(0 if retry else 4):
+            try:
+                return func(*p, **kw)
+            except (select.error, socket.error), args:
+                if args[0] == errno.EINTR:
+                    continue
+                else:
+                    raise 
+            except OSError, e:
+                if e.errno == errno.EINTR:
+                    continue
+                else:
+                    raise
+        else:
+            return func(*p, **kw)
+    return rv
+
+def rexec(command, host, user, 
+        port = None, 
+        agent = True,
+        sudo = False,
+        stdin = None,
+        identity = None,
+        server_key = None,
+        env = None,
+        tty = False,
+        timeout = None,
+        retry = 3,
+        err_on_timeout = True,
+        connect_timeout = 30,
+        persistent = True,
+        forward_x11 = False,
+        strict_host_checking = True):
+    """
+    Executes a remote command, returns ((stdout,stderr),process)
+    """
+    
+    tmp_known_hosts = None
+    hostip = gethostbyname(host)
+
+    args = ['ssh', '-C',
+            # Don't bother with localhost. Makes test easier
+            '-o', 'NoHostAuthenticationForLocalhost=yes',
+            '-o', 'ConnectTimeout=%d' % (int(connect_timeout),),
+            '-o', 'ConnectionAttempts=3',
+            '-o', 'ServerAliveInterval=30',
+            '-o', 'TCPKeepAlive=yes',
+            '-l', user, hostip or host]
+
+    if persistent and openssh_has_persist():
+        args.extend([
+            '-o', 'ControlMaster=auto',
+            '-o', 'ControlPath=%s' % (make_control_path(agent, forward_x11),),
+            '-o', 'ControlPersist=60' ])
+
+    if not strict_host_checking:
+        # Do not check for Host key. Unsafe.
+        args.extend(['-o', 'StrictHostKeyChecking=no'])
+
+    if agent:
+        args.append('-A')
+
+    if port:
+        args.append('-p%d' % port)
+
+    if identity:
+        args.extend(('-i', identity))
+
+    if tty:
+        args.append('-t')
+        args.append('-t')
+
+    if forward_x11:
+        args.append('-X')
+
+    if server_key:
+        # Create a temporary server key file
+        tmp_known_hosts = make_server_key_args(server_key, host, port)
+        args.extend(['-o', 'UserKnownHostsFile=%s' % (tmp_known_hosts.name,)])
+
+    args.append(command)
+
+    for x in xrange(retry):
+        # connects to the remote host and starts a remote connection
+        proc = subprocess.Popen(args,
+                env = env,
+                stdout = subprocess.PIPE,
+                stdin = subprocess.PIPE, 
+                stderr = subprocess.PIPE)
+        
+        # attach tempfile object to the process, to make sure the file stays
+        # alive until the process is finished with it
+        proc._known_hosts = tmp_known_hosts
+    
+        try:
+            out, err = _communicate(proc, stdin, timeout, err_on_timeout)
+            msg = " rexec - host %s - command %s " % (host, " ".join(args))
+            log(msg, logging.DEBUG, out, err)
+
+            if proc.poll():
+                skip = False
+
+                if err.strip().startswith('ssh: ') or err.strip().startswith('mux_client_hello_exchange: '):
+                    # SSH error, can safely retry
+                    skip = True 
+                elif retry:
+                    # Probably timed out or plain failed but can retry
+                    skip = True 
+                
+                if skip:
+                    t = x*2
+                    msg = "SLEEPING %d ... ATEMPT %d - host %s - command %s " % ( 
+                            t, x, host, " ".join(args))
+                    log(msg, logging.DEBUG)
+
+                    time.sleep(t)
+                    continue
+            break
+        except RuntimeError, e:
+            msg = " rexec EXCEPTION - host %s - command %s - TIMEOUT -> %s" % (host, " ".join(args), e.args)
+            log(msg, logging.DEBUG, out, err)
+
+            if retry <= 0:
+                raise
+            retry -= 1
+        
+    return ((out, err), proc)
+
+def rcopy(source, dest,
+        port = None, 
+        agent = True, 
+        recursive = False,
+        identity = None,
+        server_key = None,
+        retry = 3,
+        strict_host_checking = True):
+    """
+    Copies from/to remote sites.
+    
+    Source and destination should have the user and host encoded
+    as per scp specs.
+    
+    If source is a file object, a special mode will be used to
+    create the remote file with the same contents.
+    
+    If dest is a file object, the remote file (source) will be
+    read and written into dest.
+    
+    In these modes, recursive cannot be True.
+    
+    Source can be a list of files to copy to a single destination,
+    in which case it is advised that the destination be a folder.
+    """
+    
+    if isinstance(source, file) and source.tell() == 0:
+        source = source.name
+    elif hasattr(source, 'read'):
+        tmp = tempfile.NamedTemporaryFile()
+        while True:
+            buf = source.read(65536)
+            if buf:
+                tmp.write(buf)
+            else:
+                break
+        tmp.seek(0)
+        source = tmp.name
+    
+    if isinstance(source, file) or isinstance(dest, file) \
+            or hasattr(source, 'read')  or hasattr(dest, 'write'):
+        assert not recursive
+        
+        # Parse source/destination as <user>@<server>:<path>
+        if isinstance(dest, basestring) and ':' in dest:
+            remspec, path = dest.split(':',1)
+        elif isinstance(source, basestring) and ':' in source:
+            remspec, path = source.split(':',1)
+        else:
+            raise ValueError, "Both endpoints cannot be local"
+        user,host = remspec.rsplit('@',1)
+        
+        tmp_known_hosts = None
+        hostip = gethostbyname(host)
+        
+        args = ['ssh', '-l', user, '-C',
+                # Don't bother with localhost. Makes test easier
+                '-o', 'NoHostAuthenticationForLocalhost=yes',
+                '-o', 'ConnectTimeout=60',
+                '-o', 'ConnectionAttempts=3',
+                '-o', 'ServerAliveInterval=30',
+                '-o', 'TCPKeepAlive=yes',
+                hostip or host ]
+
+        if openssh_has_persist():
+            args.extend([
+                '-o', 'ControlMaster=auto',
+                '-o', 'ControlPath=%s' % (make_control_path(agent, False),),
+                '-o', 'ControlPersist=60' ])
+
+        if port:
+            args.append('-P%d' % port)
+
+        if identity:
+            args.extend(('-i', identity))
+
+        if server_key:
+            # Create a temporary server key file
+            tmp_known_hosts = make_server_key_args(server_key, host, port)
+            args.extend(['-o', 'UserKnownHostsFile=%s' % (tmp_known_hosts.name,)])
+        
+        if isinstance(source, file) or hasattr(source, 'read'):
+            args.append('cat > %s' % (shell_escape(path),))
+        elif isinstance(dest, file) or hasattr(dest, 'write'):
+            args.append('cat %s' % (shell_escape(path),))
+        else:
+            raise AssertionError, "Unreachable code reached! :-Q"
+        
+        # connects to the remote host and starts a remote connection
+        if isinstance(source, file):
+            proc = subprocess.Popen(args, 
+                    stdout = open('/dev/null','w'),
+                    stderr = subprocess.PIPE,
+                    stdin = source)
+            err = proc.stderr.read()
+            proc._known_hosts = tmp_known_hosts
+            eintr_retry(proc.wait)()
+            return ((None,err), proc)
+        elif isinstance(dest, file):
+            proc = subprocess.Popen(args, 
+                    stdout = open('/dev/null','w'),
+                    stderr = subprocess.PIPE,
+                    stdin = source)
+            err = proc.stderr.read()
+            proc._known_hosts = tmp_known_hosts
+            eintr_retry(proc.wait)()
+            return ((None,err), proc)
+        elif hasattr(source, 'read'):
+            # file-like (but not file) source
+            proc = subprocess.Popen(args, 
+                    stdout = open('/dev/null','w'),
+                    stderr = subprocess.PIPE,
+                    stdin = subprocess.PIPE)
+            
+            buf = None
+            err = []
+            while True:
+                if not buf:
+                    buf = source.read(4096)
+                if not buf:
+                    #EOF
+                    break
+                
+                rdrdy, wrdy, broken = select.select(
+                    [proc.stderr],
+                    [proc.stdin],
+                    [proc.stderr,proc.stdin])
+                
+                if proc.stderr in rdrdy:
+                    # use os.read for fully unbuffered behavior
+                    err.append(os.read(proc.stderr.fileno(), 4096))
+                
+                if proc.stdin in wrdy:
+                    proc.stdin.write(buf)
+                    buf = None
+                
+                if broken:
+                    break
+            proc.stdin.close()
+            err.append(proc.stderr.read())
+                
+            proc._known_hosts = tmp_known_hosts
+            eintr_retry(proc.wait)()
+            return ((None,''.join(err)), proc)
+        elif hasattr(dest, 'write'):
+            # file-like (but not file) dest
+            proc = subprocess.Popen(args, 
+                    stdout = subprocess.PIPE,
+                    stderr = subprocess.PIPE,
+                    stdin = open('/dev/null','w'))
+            
+            buf = None
+            err = []
+            while True:
+                rdrdy, wrdy, broken = select.select(
+                    [proc.stderr, proc.stdout],
+                    [],
+                    [proc.stderr, proc.stdout])
+                
+                if proc.stderr in rdrdy:
+                    # use os.read for fully unbuffered behavior
+                    err.append(os.read(proc.stderr.fileno(), 4096))
+                
+                if proc.stdout in rdrdy:
+                    # use os.read for fully unbuffered behavior
+                    buf = os.read(proc.stdout.fileno(), 4096)
+                    dest.write(buf)
+                    
+                    if not buf:
+                        #EOF
+                        break
+                
+                if broken:
+                    break
+            err.append(proc.stderr.read())
+                
+            proc._known_hosts = tmp_known_hosts
+            eintr_retry(proc.wait)()
+            return ((None,''.join(err)), proc)
+        else:
+            raise AssertionError, "Unreachable code reached! :-Q"
+    else:
+        # Parse destination as <user>@<server>:<path>
+        if isinstance(dest, basestring) and ':' in dest:
+            remspec, path = dest.split(':',1)
+        elif isinstance(source, basestring) and ':' in source:
+            remspec, path = source.split(':',1)
+        else:
+            raise ValueError, "Both endpoints cannot be local"
+        user,host = remspec.rsplit('@',1)
+        
+        # plain scp
+        tmp_known_hosts = None
+
+        args = ['scp', '-q', '-p', '-C',
+                # Don't bother with localhost. Makes test easier
+                '-o', 'NoHostAuthenticationForLocalhost=yes',
+                '-o', 'ConnectTimeout=60',
+                '-o', 'ConnectionAttempts=3',
+                '-o', 'ServerAliveInterval=30',
+                '-o', 'TCPKeepAlive=yes' ]
+                
+        if port:
+            args.append('-P%d' % port)
+
+        if recursive:
+            args.append('-r')
+
+        if identity:
+            args.extend(('-i', identity))
+
+        if server_key:
+            # Create a temporary server key file
+            tmp_known_hosts = make_server_key_args(server_key, host, port)
+            args.extend(['-o', 'UserKnownHostsFile=%s' % (tmp_known_hosts.name,)])
+
+        if not strict_host_checking:
+            # Do not check for Host key. Unsafe.
+            args.extend(['-o', 'StrictHostKeyChecking=no'])
+
+        if isinstance(source,list):
+            args.extend(source)
+        else:
+            if openssh_has_persist():
+                args.extend([
+                    '-o', 'ControlMaster=auto',
+                    '-o', 'ControlPath=%s' % (make_control_path(agent, False),)
+                    ])
+            args.append(source)
+
+        args.append(dest)
+
+        for x in xrange(retry):
+            # connects to the remote host and starts a remote connection
+            proc = subprocess.Popen(args,
+                    stdout = subprocess.PIPE,
+                    stdin = subprocess.PIPE, 
+                    stderr = subprocess.PIPE)
+            
+            # attach tempfile object to the process, to make sure the file stays
+            # alive until the process is finished with it
+            proc._known_hosts = tmp_known_hosts
+        
+            try:
+                (out, err) = proc.communicate()
+                eintr_retry(proc.wait)()
+                msg = " rcopy - host %s - command %s " % (host, " ".join(args))
+                log(msg, logging.DEBUG, out, err)
+
+                if proc.poll():
+                    t = x*2
+                    msg = "SLEEPING %d ... ATEMPT %d - host %s - command %s " % ( 
+                            t, x, host, " ".join(args))
+                    log(msg, logging.DEBUG)
+
+                    time.sleep(t)
+                    continue
+
+                break
+            except RuntimeError, e:
+                msg = " rcopy EXCEPTION - host %s - command %s - TIMEOUT -> %s" % (host, " ".join(args), e.args)
+                log(msg, logging.DEBUG, out, err)
+
+                if retry <= 0:
+                    raise
+                retry -= 1
+            
+        return ((out, err), proc)
+
+def rspawn(command, pidfile, 
+        stdout = '/dev/null', 
+        stderr = STDOUT, 
+        stdin = '/dev/null', 
+        home = None, 
+        create_home = False, 
+        sudo = False,
+        host = None, 
+        port = None, 
+        user = None, 
+        agent = None, 
+        identity = None, 
+        server_key = None,
+        tty = False):
+    """
+    Spawn a remote command such that it will continue working asynchronously.
+    
+    Parameters:
+        command: the command to run - it should be a single line.
+        
+        pidfile: path of a (ideally unique to this task) pidfile for tracking the process.
+        
+        stdout: path of a file to redirect standard output to - must be a string.
+            Defaults to /dev/null
+        stderr: path of a file to redirect standard error to - string or the special STDOUT value
+            to redirect to the same file stdout was redirected to. Defaults to STDOUT.
+        stdin: path of a file with input to be piped into the command's standard input
+        
+        home: path of a folder to use as working directory - should exist, unless you specify create_home
+        
+        create_home: if True, the home folder will be created first with mkdir -p
+        
+        sudo: whether the command needs to be executed as root
+        
+        host/port/user/agent/identity: see rexec
+    
+    Returns:
+        (stdout, stderr), process
+        
+        Of the spawning process, which only captures errors at spawning time.
+        Usually only useful for diagnostics.
+    """
+    # Start process in a "daemonized" way, using nohup and heavy
+    # stdin/out redirection to avoid connection issues
+    if stderr is STDOUT:
+        stderr = '&1'
+    else:
+        stderr = ' ' + stderr
+    
+    daemon_command = '{ { %(command)s  > %(stdout)s 2>%(stderr)s < %(stdin)s & } ; echo $! 1 > %(pidfile)s ; }' % {
+        'command' : command,
+        'pidfile' : shell_escape(pidfile),
+        'stdout' : stdout,
+        'stderr' : stderr,
+        'stdin' : stdin,
+    }
+    
+    cmd = "%(create)s%(gohome)s rm -f %(pidfile)s ; %(sudo)s nohup bash -c %(command)s " % {
+            'command' : shell_escape(daemon_command),
+            'sudo' : 'sudo -S' if sudo else '',
+            'pidfile' : shell_escape(pidfile),
+            'gohome' : 'cd %s ; ' % (shell_escape(home),) if home else '',
+            'create' : 'mkdir -p %s ; ' % (shell_escape(home),) if create_home and home else '',
+        }
+
+    (out,err),proc = rexec(
+        cmd,
+        host = host,
+        port = port,
+        user = user,
+        agent = agent,
+        identity = identity,
+        server_key = server_key,
+        tty = tty ,
+        )
+    
+    if proc.wait():
+        raise RuntimeError, "Failed to set up application on host %s: %s %s" % (host, out,err,)
+
+    return ((out, err), proc)
+
+@eintr_retry
+def rcheckpid(pidfile,
+        host = None, 
+        port = None, 
+        user = None, 
+        agent = None, 
+        identity = None,
+        server_key = None):
+    """
+    Check the pidfile of a process spawned with remote_spawn.
+    
+    Parameters:
+        pidfile: the pidfile passed to remote_span
+        
+        host/port/user/agent/identity: see rexec
+    
+    Returns:
+        
+        A (pid, ppid) tuple useful for calling remote_status and remote_kill,
+        or None if the pidfile isn't valid yet (maybe the process is still starting).
+    """
+
+    (out,err),proc = rexec(
+        "cat %(pidfile)s" % {
+            'pidfile' : pidfile,
+        },
+        host = host,
+        port = port,
+        user = user,
+        agent = agent,
+        identity = identity,
+        server_key = server_key
+        )
+        
+    if proc.wait():
+        return None
+    
+    if out:
+        try:
+            return map(int,out.strip().split(' ',1))
+        except:
+            # Ignore, many ways to fail that don't matter that much
+            return None
+
+@eintr_retry
+def rstatus(pid, ppid, 
+        host = None, 
+        port = None, 
+        user = None, 
+        agent = None, 
+        identity = None,
+        server_key = None):
+    """
+    Check the status of a process spawned with remote_spawn.
+    
+    Parameters:
+        pid/ppid: pid and parent-pid of the spawned process. See remote_check_pid
+        
+        host/port/user/agent/identity: see rexec
+    
+    Returns:
+        
+        One of NOT_STARTED, RUNNING, FINISHED
+    """
+
+    (out,err),proc = rexec(
+        # Check only by pid. pid+ppid does not always work (especially with sudo) 
+        " (( ps --pid %(pid)d -o pid | grep -c %(pid)d && echo 'wait')  || echo 'done' ) | tail -n 1" % {
+            'ppid' : ppid,
+            'pid' : pid,
+        },
+        host = host,
+        port = port,
+        user = user,
+        agent = agent,
+        identity = identity,
+        server_key = server_key
+        )
+    
+    if proc.wait():
+        return NOT_STARTED
+    
+    status = False
+    if err:
+        if err.strip().find("Error, do this: mount -t proc none /proc") >= 0:
+            status = True
+    elif out:
+        status = (out.strip() == 'wait')
+    else:
+        return NOT_STARTED
+    return RUNNING if status else FINISHED
+
+@eintr_retry
+def rkill(pid, ppid,
+        host = None, 
+        port = None, 
+        user = None, 
+        agent = None, 
+        sudo = False,
+        identity = None, 
+        server_key = None, 
+        nowait = False):
+    """
+    Kill a process spawned with remote_spawn.
+    
+    First tries a SIGTERM, and if the process does not end in 10 seconds,
+    it sends a SIGKILL.
+    
+    Parameters:
+        pid/ppid: pid and parent-pid of the spawned process. See remote_check_pid
+        
+        sudo: whether the command was run with sudo - careful killing like this.
+        
+        host/port/user/agent/identity: see rexec
+    
+    Returns:
+        
+        Nothing, should have killed the process
+    """
+    
+    subkill = "$(ps --ppid %(pid)d -o pid h)" % { 'pid' : pid }
+    cmd = """
+SUBKILL="%(subkill)s" ;
+%(sudo)s kill -- -%(pid)d $SUBKILL || /bin/true
+%(sudo)s kill %(pid)d $SUBKILL || /bin/true
+for x in 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 ; do 
+    sleep 0.2 
+    if [ `ps --pid %(pid)d -o pid | grep -c %(pid)d` == '0' ]; then
+        break
+    else
+        %(sudo)s kill -- -%(pid)d $SUBKILL || /bin/true
+        %(sudo)s kill %(pid)d $SUBKILL || /bin/true
+    fi
+    sleep 1.8
+done
+if [ `ps --pid %(pid)d -o pid | grep -c %(pid)d` != '0' ]; then
+    %(sudo)s kill -9 -- -%(pid)d $SUBKILL || /bin/true
+    %(sudo)s kill -9 %(pid)d $SUBKILL || /bin/true
+fi
+"""
+    if nowait:
+        cmd = "( %s ) >/dev/null 2>/dev/null </dev/null &" % (cmd,)
+
+    (out,err),proc = rexec(
+        cmd % {
+            'ppid' : ppid,
+            'pid' : pid,
+            'sudo' : 'sudo -S' if sudo else '',
+            'subkill' : subkill,
+        },
+        host = host,
+        port = port,
+        user = user,
+        agent = agent,
+        identity = identity,
+        server_key = server_key
+        )
+    
+    # wait, don't leave zombies around
+    proc.wait()
+
+    return (out, err), proc
+
+# POSIX
+def _communicate(self, input, timeout=None, err_on_timeout=True):
+    read_set = []
+    write_set = []
+    stdout = None # Return
+    stderr = None # Return
+    
+    killed = False
+    
+    if timeout is not None:
+        timelimit = time.time() + timeout
+        killtime = timelimit + 4
+        bailtime = timelimit + 4
+
+    if self.stdin:
+        # Flush stdio buffer.  This might block, if the user has
+        # been writing to .stdin in an uncontrolled fashion.
+        self.stdin.flush()
+        if input:
+            write_set.append(self.stdin)
+        else:
+            self.stdin.close()
+    if self.stdout:
+        read_set.append(self.stdout)
+        stdout = []
+    if self.stderr:
+        read_set.append(self.stderr)
+        stderr = []
+
+    input_offset = 0
+    while read_set or write_set:
+        if timeout is not None:
+            curtime = time.time()
+            if timeout is None or curtime > timelimit:
+                if curtime > bailtime:
+                    break
+                elif curtime > killtime:
+                    signum = signal.SIGKILL
+                else:
+                    signum = signal.SIGTERM
+                # Lets kill it
+                os.kill(self.pid, signum)
+                select_timeout = 0.5
+            else:
+                select_timeout = timelimit - curtime + 0.1
+        else:
+            select_timeout = 1.0
+        
+        if select_timeout > 1.0:
+            select_timeout = 1.0
+            
+        try:
+            rlist, wlist, xlist = select.select(read_set, write_set, [], select_timeout)
+        except select.error,e:
+            if e[0] != 4:
+                raise
+            else:
+                continue
+        
+        if not rlist and not wlist and not xlist and self.poll() is not None:
+            # timeout and process exited, say bye
+            break
+
+        if self.stdin in wlist:
+            # When select has indicated that the file is writable,
+            # we can write up to PIPE_BUF bytes without risk
+            # blocking.  POSIX defines PIPE_BUF >= 512
+            bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
+            input_offset += bytes_written
+            if input_offset >= len(input):
+                self.stdin.close()
+                write_set.remove(self.stdin)
+
+        if self.stdout in rlist:
+            data = os.read(self.stdout.fileno(), 1024)
+            if data == "":
+                self.stdout.close()
+                read_set.remove(self.stdout)
+            stdout.append(data)
+
+        if self.stderr in rlist:
+            data = os.read(self.stderr.fileno(), 1024)
+            if data == "":
+                self.stderr.close()
+                read_set.remove(self.stderr)
+            stderr.append(data)
+    
+    # All data exchanged.  Translate lists into strings.
+    if stdout is not None:
+        stdout = ''.join(stdout)
+    if stderr is not None:
+        stderr = ''.join(stderr)
+
+    # Translate newlines, if requested.  We cannot let the file
+    # object do the translation: It is based on stdio, which is
+    # impossible to combine with select (unless forcing no
+    # buffering).
+    if self.universal_newlines and hasattr(file, 'newlines'):
+        if stdout:
+            stdout = self._translate_newlines(stdout)
+        if stderr:
+            stderr = self._translate_newlines(stderr)
+
+    if killed and err_on_timeout:
+        errcode = self.poll()
+        raise RuntimeError, ("Operation timed out", errcode, stdout, stderr)
+    else:
+        if killed:
+            self.poll()
+        else:
+            self.wait()
+        return (stdout, stderr)
+
diff --git a/src/nepi/util/tags.py b/src/nepi/util/tags.py
deleted file mode 100644 (file)
index 4cd4c38..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# -*- coding: utf-8 -*-
-
-MOBILE = "mobile"
-NODE = "node"
-INTERFACE = "interface"
-WIRELESS = "wireless"
-APPLICATION = "application"
-NAT = "nat"
-SWITCH = "switch"
-PPP = "point-to-point"
-PROTOCOL = "protocol"
-TUNNEL = "tunnel"
-INTERNET = "internet"
-HUB = "hub"
-ALLOW_ADDRESSES = "allow_addresses"
-ALLOW_ROUTES = "allow_routes"
-HAS_ADDRESSES = "has_addresses"
-HAS_ROUTES = "has_routes"
-
-class Taggable(object):
-    def __init__(self):
-        super(Taggable, self).__init__()
-        self._tags = list()
-
-    @property
-    def tags(self):
-        return self._tags
-
-    def add_tag(self, tag_id):
-        self._tags.append(tag_id)
-
-    def has_tag(self, tag_id):
-        return tag_id in self._tags
-
diff --git a/src/nepi/util/timefuncs.py b/src/nepi/util/timefuncs.py
new file mode 100644 (file)
index 0000000..0cf4fcd
--- /dev/null
@@ -0,0 +1,55 @@
+import datetime
+import re
+
+_strf = "%Y%m%d%H%M%S%f"
+_reabs = re.compile("^\d{20}$")
+_rerel = re.compile("^(?P<time>\d+(.\d+)?)(?P<units>h|m|s|ms|us)$")
+
+# Work around to fix "ImportError: Failed to import _strptime because the import lock is held by another thread."
+datetime.datetime.strptime("20120807124732894211", _strf)
+
+def strfnow():
+    """ Current date """
+    return datetime.datetime.now().strftime(_strf)
+
+def strfdiff(str1, str2):
+    # Time difference in seconds without ignoring miliseconds
+    d1 = datetime.datetime.strptime(str1, _strf)
+    d2 = datetime.datetime.strptime(str2, _strf)
+    diff = d1 - d2
+    ddays = diff.days * 86400
+    dus = round(diff.microseconds * 1.0e-06, 2) 
+    ret = ddays + diff.seconds + dus
+    # delay must be > 0
+    return (ret or 0.001)
+
+def strfvalid(date):
+    """ User defined date to scheduler date 
+    
+    :param date : user define date matchin the pattern _strf 
+    :type date : date 
+
+    """
+    if not date:
+        return strfnow()
+    if _reabs.match(date):
+        return date
+    m = _rerel.match(date)
+    if m:
+        time = float(m.groupdict()['time'])
+        units = m.groupdict()['units']
+        if units == 'h':
+            delta = datetime.timedelta(hours = time) 
+        elif units == 'm':
+            delta = datetime.timedelta(minutes = time) 
+        elif units == 's':
+            delta = datetime.timedelta(seconds = time) 
+        elif units == 'ms':
+            delta = datetime.timedelta(microseconds = (time*1000)) 
+        else:
+            delta = datetime.timedelta(microseconds = time) 
+        now = datetime.datetime.now()
+        d = now + delta
+        return d.strftime(_strf)
+    return None
+
diff --git a/src/nepi/util/tunchannel.py b/src/nepi/util/tunchannel.py
deleted file mode 100644 (file)
index a5c3960..0000000
+++ /dev/null
@@ -1,770 +0,0 @@
-import select
-import sys
-import os
-import struct
-import socket
-import threading
-import traceback
-import errno
-import fcntl
-import random
-import traceback
-import functools
-import collections
-import ctypes
-import time
-
-def ipfmt(ip):
-    ipbytes = map(ord,ip.decode("hex"))
-    return '.'.join(map(str,ipbytes))
-
-tagtype = {
-    '0806' : 'arp',
-    '0800' : 'ipv4',
-    '8870' : 'jumbo',
-    '8863' : 'PPPoE discover',
-    '8864' : 'PPPoE',
-    '86dd' : 'ipv6',
-}
-
-def etherProto(packet, len=len):
-    if len(packet) > 14:
-        if packet[12] == "\x81" and packet[13] == "\x00":
-            # tagged
-            return packet[16:18]
-        else:
-            # untagged
-            return packet[12:14]
-    # default: ip
-    return "\x08\x00"
-
-def formatPacket(packet, ether_mode):
-    if ether_mode:
-        stripped_packet = etherStrip(packet)
-        if not stripped_packet:
-            packet = packet.encode("hex")
-            if len(packet) < 28:
-                return "malformed eth " + packet.encode("hex")
-            else:
-                if packet[24:28] == "8100":
-                    # tagged
-                    ethertype = tagtype.get(packet[32:36], 'eth')
-                    return ethertype + " " + ( '-'.join( (
-                        packet[0:12], # MAC dest
-                        packet[12:24], # MAC src
-                        packet[24:32], # VLAN tag
-                        packet[32:36], # Ethertype/len
-                        packet[36:], # Payload
-                    ) ) )
-                else:
-                    # untagged
-                    ethertype = tagtype.get(packet[24:28], 'eth')
-                    return ethertype + " " + ( '-'.join( (
-                        packet[0:12], # MAC dest
-                        packet[12:24], # MAC src
-                        packet[24:28], # Ethertype/len
-                        packet[28:], # Payload
-                    ) ) )
-        else:
-            packet = stripped_packet
-    packet = packet.encode("hex")
-    if len(packet) < 48:
-        return "malformed ip " + packet
-    else:
-        return "ip " + ( '-'.join( (
-            packet[0:1], #version
-            packet[1:2], #header length
-            packet[2:4], #diffserv/ECN
-            packet[4:8], #total length
-            packet[8:12], #ident
-            packet[12:16], #flags/fragment offs
-            packet[16:18], #ttl
-            packet[18:20], #ip-proto
-            packet[20:24], #checksum
-            ipfmt(packet[24:32]), # src-ip
-            ipfmt(packet[32:40]), # dst-ip
-            packet[40:48] if (int(packet[1],16) > 5) else "", # options
-            packet[48:] if (int(packet[1],16) > 5) else packet[40:], # payload
-        ) ) )
-
-def _packetReady(buf, ether_mode=False, len=len, str=str):
-    if not buf:
-        return False
-        
-    rv = False
-    while not rv:
-        if len(buf[0]) < 4:
-            rv = False
-        elif ether_mode:
-            rv = True
-        else:
-            _,totallen = struct.unpack('HH',buf[0][:4])
-            totallen = socket.htons(totallen)
-            rv = len(buf[0]) >= totallen
-        if not rv and len(buf) > 1:
-            # collapse only first two buffers
-            # as needed, to mantain len(buf) meaningful
-            p1 = buf.popleft()
-            buf[0] = p1+str(buf[0])
-        else:
-            return rv
-    return rv
-
-def _pullPacket(buf, ether_mode=False, len=len, buffer=buffer):
-    if ether_mode:
-        return buf.popleft()
-    else:
-        _,totallen = struct.unpack('HH',buf[0][:4])
-        totallen = socket.htons(totallen)
-        if len(buf[0]) > totallen:
-            rv = buffer(buf[0],0,totallen)
-            buf[0] = buffer(buf[0],totallen)
-        else:
-            rv = buf.popleft()
-        return rv
-
-def etherStrip(buf, buffer=buffer, len=len):
-    if len(buf) < 14:
-        return ""
-    if buf[12:14] == '\x08\x10' and buf[16:18] == '\x08\x00':
-        # tagged ethernet frame
-        return buffer(buf, 18)
-    elif buf[12:14] == '\x08\x00':
-        # untagged ethernet frame
-        return buffer(buf, 14)
-    else:
-        return ""
-
-def etherWrap(packet):
-    return ''.join((
-        "\x00"*6*2 # bogus src and dst mac
-        +"\x08\x00", # IPv4
-        packet, # payload
-        "\x00"*4, # bogus crc
-    ))
-
-def piStrip(buf, len=len):
-    if len(buf) < 4:
-        return buf
-    else:
-        return buffer(buf,4)
-    
-def piWrap(buf, ether_mode, etherProto=etherProto):
-    if ether_mode:
-        proto = etherProto(buf)
-    else:
-        proto = "\x08\x00"
-    return ''.join((
-        "\x00\x00", # PI: 16 bits flags
-        proto, # 16 bits proto
-        buf,
-    ))
-
-_padmap = [ chr(padding) * padding for padding in xrange(127) ]
-del padding
-
-def encrypt(packet, crypter, len=len, padmap=_padmap):
-    # pad
-    padding = crypter.block_size - len(packet) % crypter.block_size
-    packet += padmap[padding]
-    
-    # encrypt
-    return crypter.encrypt(packet)
-
-def decrypt(packet, crypter, ord=ord):
-    if packet:
-        # decrypt
-        packet = crypter.decrypt(packet)
-        
-        # un-pad
-        padding = ord(packet[-1])
-        if not (0 < padding <= crypter.block_size):
-            # wrong padding
-            raise RuntimeError, "Truncated packet %s"
-        packet = packet[:-padding]
-    
-    return packet
-
-def nonblock(fd):
-    try:
-        fl = fcntl.fcntl(fd, fcntl.F_GETFL)
-        fl |= os.O_NONBLOCK
-        fcntl.fcntl(fd, fcntl.F_SETFL, fl)
-        return True
-    except:
-        traceback.print_exc(file=sys.stderr)
-        # Just ignore
-        return False
-
-def tun_fwd(tun, remote, with_pi, ether_mode, cipher_key, udp, TERMINATE, SUSPEND,
-        stderr = sys.stderr, reconnect = None, rwrite = None, rread = None,
-        tunqueue = 1000, tunkqueue = 1000, cipher = 'AES', accept_local = None, 
-        accept_remote = None, slowlocal = True, queueclass = None, 
-        bwlimit = None, len = len, max = max, min = min, buffer = buffer,
-        OSError = OSError, select = select.select, selecterror = select.error, 
-        os = os, socket = socket,
-        retrycodes=(os.errno.EWOULDBLOCK, os.errno.EAGAIN, os.errno.EINTR) ):
-    crypto_mode = False
-    crypter = None
-
-    try:
-        if cipher_key and cipher:
-            import Crypto.Cipher
-            import hashlib
-            __import__('Crypto.Cipher.'+cipher)
-            
-            ciphername = cipher
-            cipher = getattr(Crypto.Cipher, cipher)
-            hashed_key = hashlib.sha256(cipher_key).digest()
-
-            if ciphername == 'AES':
-                hashed_key = hashed_key[:16]
-            elif ciphername == 'Blowfish':
-                hashed_key = hashed_key[:24]
-            elif ciphername == 'DES':
-                hashed_key = hashed_key[:8]
-            elif ciphername == 'DES3':
-                hashed_key = hashed_key[:24]
-
-            crypter = cipher.new(
-                hashed_key, 
-                cipher.MODE_ECB)
-            crypto_mode = True
-    except:
-        # We don't want decription to work only on one side,
-        # This could break things really bad
-        #crypto_mode = False
-        #crypter = None
-        traceback.print_exc(file=sys.stderr)
-        raise
-
-    if stderr is not None:
-        if crypto_mode:
-            print >>stderr, "Packets are transmitted in CIPHER"
-        else:
-            print >>stderr, "Packets are transmitted in PLAINTEXT"
-    
-    if hasattr(remote, 'fileno'):
-        remote_fd = remote.fileno()
-        if rwrite is None:
-            def rwrite(remote, packet, os_write=os.write):
-                return os_write(remote_fd, packet)
-        if rread is None:
-            def rread(remote, maxlen, os_read=os.read):
-                return os_read(remote_fd, maxlen)
-    rnonblock = nonblock(remote)
-    tnonblock = nonblock(tun)
-    
-    # Pick up TUN/TAP writing method
-    if with_pi:
-        try:
-            import iovec
-            
-            # We have iovec, so we can skip PI injection
-            # and use iovec which does it natively
-            if ether_mode:
-                twrite = iovec.ethpiwrite
-                tread = iovec.piread2
-            else:
-                twrite = iovec.ippiwrite
-                tread = iovec.piread2
-        except ImportError:
-            # We have to inject PI headers pythonically
-            def twrite(fd, packet, oswrite=os.write, piWrap=piWrap, ether_mode=ether_mode):
-                return oswrite(fd, piWrap(packet, ether_mode))
-            
-            # For reading, we strip PI headers with buffer slicing and that's it
-            def tread(fd, maxlen, osread=os.read, piStrip=piStrip):
-                return piStrip(osread(fd, maxlen))
-    else:
-        # No need to inject PI headers
-        twrite = os.write
-        tread = os.read
-    
-    encrypt_ = encrypt
-    decrypt_ = decrypt
-    xrange_ = xrange
-
-    if accept_local is not None:
-        def tread(fd, maxlen, _tread=tread, accept=accept_local):
-            packet = _tread(fd, maxlen)
-            if accept(packet, 0):
-                return packet
-            else:
-                return None
-
-    if accept_remote is not None:
-        if crypto_mode:
-            def decrypt_(packet, crypter, decrypt_=decrypt_, accept=accept_remote):
-                packet = decrypt_(packet, crypter)
-                if accept(packet, 1):
-                    return packet
-                else:
-                    return None
-        else:
-            def rread(fd, maxlen, _rread=rread, accept=accept_remote):
-                packet = _rread(fd, maxlen)
-                if accept(packet, 1):
-                    return packet
-                else:
-                    return None
-    
-    maxbkbuf = maxfwbuf = max(10,tunqueue-tunkqueue)
-    tunhurry = max(0,maxbkbuf/2)
-    
-    if queueclass is None:
-        queueclass = collections.deque
-        maxbatch = 2000
-        maxtbatch = 50
-    else:
-        maxfwbuf = maxbkbuf = 2000000000
-        maxbatch = 50
-        maxtbatch = 30
-        tunhurry = 30
-    
-    fwbuf = queueclass()
-    bkbuf = queueclass()
-    nfwbuf = 0
-    nbkbuf = 0
-    
-    # backwards queue functions
-    # they may need packet inspection to 
-    # reconstruct packet boundaries
-    if ether_mode or udp:
-        packetReady = bool
-        pullPacket = queueclass.popleft
-        reschedule = queueclass.appendleft
-    else:
-        packetReady = _packetReady
-        pullPacket = _pullPacket
-        reschedule = queueclass.appendleft
-    
-    # forward queue functions
-    # no packet inspection needed
-    fpacketReady = bool
-    fpullPacket = queueclass.popleft
-    freschedule = queueclass.appendleft
-    
-    tunfd = tun.fileno()
-    os_read = os.read
-    os_write = os.write
-    
-    tget = time.time
-    maxbwfree = bwfree = 1500 * tunqueue
-    lastbwtime = tget()
-    
-    remoteok = True
-    
-    
-    while not TERMINATE:
-        # The SUSPEND flag has been set. This means we need to wait on
-        # the SUSPEND condition until it is released.
-        while SUSPEND and not TERMINATE:
-            time.sleep(0.5)
-
-        wset = []
-        if packetReady(bkbuf):
-            wset.append(tun)
-        if remoteok and fpacketReady(fwbuf) and (not bwlimit or bwfree > 0):
-            wset.append(remote)
-        
-        rset = []
-        if len(fwbuf) < maxfwbuf:
-            rset.append(tun)
-        if remoteok and len(bkbuf) < maxbkbuf:
-            rset.append(remote)
-        
-        if remoteok:
-            eset = (tun,remote)
-        else:
-            eset = (tun,)
-        
-        try:
-            rdrdy, wrdy, errs = select(rset,wset,eset,1)
-        except selecterror, e:
-            if e.args[0] == errno.EINTR:
-                # just retry
-                continue
-            else:
-                traceback.print_exc(file=sys.stderr)
-                # If the SUSPEND flag has been set, then the TUN will be in a bad
-                # state and the select error should be ignores.
-                if SUSPEND:
-                    continue
-                else:
-                    raise
-
-        # check for errors
-        if errs:
-            if reconnect is not None and remote in errs and tun not in errs:
-                remote = reconnect()
-                if hasattr(remote, 'fileno'):
-                    remote_fd = remote.fileno()
-            elif udp and remote in errs and tun not in errs:
-                # In UDP mode, those are always transient errors
-                # Usually, an error will imply a read-ready socket
-                # that will raise an "Connection refused" error, so
-                # disable read-readiness just for now, and retry
-                # the select
-                remoteok = False
-                continue
-            else:
-                break
-        else:
-            remoteok = True
-        
-        # check to see if we can write
-        #rr = wr = rt = wt = 0
-        if remote in wrdy:
-            sent = 0
-            try:
-                try:
-                    for x in xrange(maxbatch):
-                        packet = pullPacket(fwbuf)
-
-                        if crypto_mode:
-                            packet = encrypt_(packet, crypter)
-                        
-                        sentnow = rwrite(remote, packet)
-                        sent += sentnow
-                        #wr += 1
-                        
-                        if not udp and 0 <= sentnow < len(packet):
-                            # packet partially sent
-                            # reschedule the remaining part
-                            # this doesn't happen ever in udp mode
-                            freschedule(fwbuf, buffer(packet,sentnow))
-                        
-                        if not rnonblock or not fpacketReady(fwbuf):
-                            break
-                except OSError,e:
-                    # This except handles the entire While block on PURPOSE
-                    # as an optimization (setting a try/except block is expensive)
-                    # The only operation that can raise this exception is rwrite
-                    if e.errno in retrycodes:
-                        # re-schedule packet
-                        freschedule(fwbuf, packet)
-                    else:
-                        raise
-            except:
-                if reconnect is not None:
-                    # in UDP mode, sometimes connected sockets can return a connection refused.
-                    # Give the caller a chance to reconnect
-                    remote = reconnect()
-                    if hasattr(remote, 'fileno'):
-                        remote_fd = remote.fileno()
-                elif not udp:
-                    # in UDP mode, we ignore errors - packet loss man...
-                    raise
-                #traceback.print_exc(file=sys.stderr)
-            
-            if bwlimit:
-                bwfree -= sent
-        if tun in wrdy:
-            try:
-                for x in xrange(maxtbatch):
-                    packet = pullPacket(bkbuf)
-                    twrite(tunfd, packet)
-                    #wt += 1
-                    
-                    # Do not inject packets into the TUN faster than they arrive, unless we're falling
-                    # behind. TUN devices discard packets if their queue is full (tunkqueue), but they
-                    # don't block either (they're always ready to write), so if we flood the device 
-                    # we'll have high packet loss.
-                    if not tnonblock or (slowlocal and len(bkbuf) < tunhurry) or not packetReady(bkbuf):
-                        break
-                else:
-                    if slowlocal:
-                        # Give some time for the kernel to process the packets
-                        time.sleep(0)
-            except OSError,e:
-                # This except handles the entire While block on PURPOSE
-                # as an optimization (setting a try/except block is expensive)
-                # The only operation that can raise this exception is os_write
-                if e.errno in retrycodes:
-                    # re-schedule packet
-                    reschedule(bkbuf, packet)
-                else:
-                    raise
-        
-        # check incoming data packets
-        if tun in rdrdy:
-            try:
-                for x in xrange(maxbatch):
-                    packet = tread(tunfd,2000) # tun.read blocks until it gets 2k!
-                    if not packet:
-                        continue
-                    #rt += 1
-                    fwbuf.append(packet)
-                    
-                    if not tnonblock or len(fwbuf) >= maxfwbuf:
-                        break
-            except OSError,e:
-                # This except handles the entire While block on PURPOSE
-                # as an optimization (setting a try/except block is expensive)
-                # The only operation that can raise this exception is os_read
-                if e.errno not in retrycodes:
-                    raise
-        if remote in rdrdy:
-            try:
-                try:
-                    for x in xrange(maxbatch):
-                        packet = rread(remote,2000)
-                        
-                        #rr += 1
-                        
-                        if crypto_mode:
-                            packet = decrypt_(packet, crypter)
-                            if not packet:
-                                continue
-                        elif not packet:
-                            if not udp and packet == "":
-                                # Connection broken, try to reconnect (or just die)
-                                raise RuntimeError, "Connection broken"
-                            else:
-                                continue
-
-                        bkbuf.append(packet)
-                        
-                        if not rnonblock or len(bkbuf) >= maxbkbuf:
-                            break
-                except OSError,e:
-                    # This except handles the entire While block on PURPOSE
-                    # as an optimization (setting a try/except block is expensive)
-                    # The only operation that can raise this exception is rread
-                    if e.errno not in retrycodes:
-                        raise
-            except Exception, e:
-                if reconnect is not None:
-                    # in UDP mode, sometimes connected sockets can return a connection refused
-                    # on read. Give the caller a chance to reconnect
-                    remote = reconnect()
-                    if hasattr(remote, 'fileno'):
-                        remote_fd = remote.fileno()
-                elif not udp:
-                    # in UDP mode, we ignore errors - packet loss man...
-                    raise
-                traceback.print_exc(file=sys.stderr)
-
-        if bwlimit:
-            tnow = tget()
-            delta = tnow - lastbwtime
-            if delta > 0.001:
-                delta = int(bwlimit * delta)
-                if delta > 0:
-                    bwfree = min(bwfree+delta, maxbwfree)
-                    lastbwtime = tnow
-        
-        #print >>sys.stderr, "rr:%d\twr:%d\trt:%d\twt:%d" % (rr,wr,rt,wt)
-
-def udp_connect(TERMINATE, local_addr, local_port, peer_addr, peer_port):
-    rsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
-    retrydelay = 1.0
-    for i in xrange(30):
-        # TERMINATE is a array. An item can be added to TERMINATE, from
-        # outside this function to force termination of the loop
-        if TERMINATE:
-            raise OSError, "Killed"
-        try:
-            rsock.bind((local_addr, local_port))
-            break
-        except socket.error:
-            # wait a while, retry
-            print >>sys.stderr, "%s: Could not bind. Retrying in a sec..." % (time.strftime('%c'),)
-            time.sleep(min(30.0,retrydelay))
-            retrydelay *= 1.1
-    else:
-        rsock.bind((local_addr, local_port))
-    print >>sys.stderr, "Listening UDP at: %s:%d" % (local_addr, local_port)
-    print >>sys.stderr, "Connecting UDP to: %s:%d" % (peer_addr, peer_port)
-    rsock.connect((peer_addr, peer_port))
-    return rsock
-
-def udp_handshake(TERMINATE, rsock):
-    endme = False
-    def keepalive():
-        while not endme and not TERMINATE:
-            try:
-                rsock.send('')
-            except:
-                pass
-            time.sleep(1)
-        try:
-            rsock.send('')
-        except:
-            pass
-    keepalive_thread = threading.Thread(target=keepalive)
-    keepalive_thread.start()
-    for i in xrange(900):
-        if TERMINATE:
-            raise OSError, "Killed"
-        try:
-            heartbeat = rsock.recv(10)
-            break
-        except:
-            time.sleep(1)
-    else:
-        heartbeat = rsock.recv(10)
-    endme = True
-    keepalive_thread.join()
-
-def udp_establish(TERMINATE, local_addr, local_port, peer_addr, peer_port):
-    rsock = udp_connect(TERMINATE, local_addr, local_port, peer_addr,
-            peer_port)
-    udp_handshake(TERMINATE, rsock)
-    return rsock 
-
-def tcp_connect(TERMINATE, stop, rsock, peer_addr, peer_port):
-    sock = None
-    retrydelay = 1.0
-    # The peer has a firewall that prevents a response to the connect, we 
-    # will be forever blocked in the connect, so we put a reasonable timeout.
-    rsock.settimeout(10) 
-    # We wait for 
-    for i in xrange(30):
-        if stop:
-            break
-        if TERMINATE:
-            raise OSError, "Killed"
-        try:
-            rsock.connect((peer_addr, peer_port))
-            sock = rsock
-            break
-        except socket.error:
-            # wait a while, retry
-            print >>sys.stderr, "%s: Could not connect. Retrying in a sec..." % (time.strftime('%c'),)
-            time.sleep(min(30.0,retrydelay))
-            retrydelay *= 1.1
-    else:
-        rsock.connect((peer_addr, peer_port))
-        sock = rsock
-    if sock:
-        print >>sys.stderr, "tcp_connect: TCP sock connected to remote %s:%s" % (peer_addr, peer_port)
-        sock.settimeout(0) 
-        
-        print >>sys.stderr, "tcp_connect: disabling NAGLE"
-        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-    return sock
-
-def tcp_listen(TERMINATE, stop, lsock, local_addr, local_port):
-    sock = None
-    retrydelay = 1.0
-    # We try to bind to the local virtual interface. 
-    # It might not exist yet so we wait in a loop.
-    for i in xrange(30):
-        if stop:
-            break
-        if TERMINATE:
-            raise OSError, "Killed"
-        try:
-            lsock.bind((local_addr, local_port))
-            break
-        except socket.error:
-            # wait a while, retry
-            print >>sys.stderr, "%s: Could not bind. Retrying in a sec..." % (time.strftime('%c'),)
-            time.sleep(min(30.0,retrydelay))
-            retrydelay *= 1.1
-    else:
-        lsock.bind((local_addr, local_port))
-
-    print >>sys.stderr, "tcp_listen: TCP sock listening in local sock %s:%s" % (local_addr, local_port)
-    # Now we wait until the other side connects. 
-    # The other side might not be ready yet, so we also wait in a loop for timeouts.
-    timeout = 1
-    lsock.listen(1)
-    for i in xrange(30):
-        if TERMINATE:
-            raise OSError, "Killed"
-        rlist, wlist, xlist = select.select([lsock], [], [], timeout)
-        if stop:
-            break
-        if lsock in rlist:
-            sock,raddr = lsock.accept()
-            print >>sys.stderr, "tcp_listen: TCP connection accepted in local sock %s:%s" % (local_addr, local_port)
-            break
-        timeout += 5
-    return sock
-
-def tcp_handshake(rsock, listen, hand):
-    # we are going to use a barrier algorithm to decide wich side listen.
-    # each side will "roll a dice" and send the resulting value to the other 
-    # side. 
-    win = False
-    rsock.settimeout(10)
-    try:
-        rsock.send(hand)
-        peer_hand = rsock.recv(4)
-        if not peer_hand:
-            print >>sys.stderr, "tcp_handshake: connection reset by peer"
-            return False
-        else:
-            print >>sys.stderr, "tcp_handshake: hand %r, peer_hand %r" % (hand, peer_hand)
-        if hand < peer_hand:
-            if listen:
-                win = True
-        elif hand > peer_hand:
-            if not listen:
-                win = True
-    finally:
-        rsock.settimeout(0)
-    return win
-
-def tcp_establish(TERMINATE, local_addr, local_port, peer_addr, peer_port):
-    def listen(stop, hand, lsock, lresult):
-        win = False
-        rsock = tcp_listen(TERMINATE, stop, lsock, local_addr, local_port)
-        if rsock:
-            win = tcp_handshake(rsock, True, hand)
-            stop.append(True)
-        lresult.append((win, rsock))
-
-    def connect(stop, hand, rsock, rresult):
-        win = False
-        rsock = tcp_connect(TERMINATE, stop, rsock, peer_addr, peer_port)
-        if rsock:
-            win = tcp_handshake(rsock, False, hand)
-            stop.append(True)
-        rresult.append((win, rsock))
-  
-    end = False
-    sock = None
-    for i in xrange(0, 50):
-        if end:
-            break
-        if TERMINATE:
-            raise OSError, "Killed"
-        hand = struct.pack("!L", random.randint(0, 2**30))
-        stop = []
-        lresult = []
-        rresult = []
-        lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
-        rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
-        listen_thread = threading.Thread(target=listen, args=(stop, hand, lsock, lresult))
-        connect_thread = threading.Thread(target=connect, args=(stop, hand, rsock, rresult))
-        connect_thread.start()
-        listen_thread.start()
-        connect_thread.join()
-        listen_thread.join()
-        (lwin, lrsock) = lresult[0]
-        (rwin, rrsock) = rresult[0]
-        if not lrsock or not rrsock:
-            if not lrsock:
-                sock = rrsock
-            if not rrsock:
-                sock = lrsock
-            end = True
-        # both socket are connected
-        else:
-           if lwin:
-                sock = lrsock
-                end = True
-           elif rwin: 
-                sock = rrsock
-                end = True
-
-    if not sock:
-        raise OSError, "Error: tcp_establish could not establish connection."
-    return sock
-
-
diff --git a/src/nepi/util/tunchannel_impl.py b/src/nepi/util/tunchannel_impl.py
deleted file mode 100644 (file)
index 4123a64..0000000
+++ /dev/null
@@ -1,335 +0,0 @@
-import os
-import sys
-import random
-import threading
-import socket
-import select
-import weakref
-import time
-
-from tunchannel import tun_fwd, udp_establish, tcp_establish
-
-class TunChannel(object):
-    """
-    Helper box class that implements most of the required boilerplate
-    for tunnelling cross connections.
-    
-    The class implements a threaded forwarder that runs in the
-    testbed controller process. It takes several parameters that
-    can be given by directly setting attributes:
-    
-        tun_port/addr/proto/cipher: information about the local endpoint.
-            The addresses here should be externally-reachable,
-            since when listening or when using the UDP protocol,
-            connections to this address/port will be attempted
-            by remote endpoitns.
-        
-        peer_port/addr/proto/cipher: information about the remote endpoint.
-            Usually, you set these when the cross connection 
-            initializer/completion functions are invoked (both).
-        
-        tun_key: the agreed upon encryption key.
-        
-        with_pi: set if the incoming packet stream (see tun_socket)
-            contains PI headers - if so, they will be stripped.
-        
-        ethernet_mode: set if the incoming packet stream is
-            composed of ethernet frames (as opposed of IP packets).
-        
-        tun_socket: a socket or file object that can be read
-            from and written to. Packets will be read when available,
-            remote packets will be forwarded as writes.
-            A socket should be of type SOCK_SEQPACKET (or SOCK_DGRAM
-            if not possible), a file object should preserve packet
-            boundaries (ie, a pipe or TUN/TAP device file descriptor).
-        
-        trace_target: a file object where trace output will be sent.
-            It cannot be changed after launch.
-            By default, it's sys.stderr
-    """
-    
-    def __init__(self):
-        # Some operational attributes
-        self.ethernet_mode = True
-        self.with_pi = False
-        
-        # These get initialized when the channel is configured
-        # They're part of the TUN standard attribute set
-        self.tun_port = None
-        self.tun_addr = None
-        self.tun_cipher = 'AES'
-        
-        # These get initialized when the channel is connected to its peer
-        self.peer_proto = None
-        self.peer_addr = None
-        self.peer_port = None
-        self.peer_cipher = None
-        
-        # These get initialized when the channel is connected to its iface
-        self.tun_socket = None
-
-        # same as peer proto, but for execute-time standard attribute lookups
-        self.tun_proto = None 
-        
-        # some state
-        self.prepared = False
-        self._terminate = [] # terminate signaller
-        self._suspend = [] # suspend signaller
-        self._exc = [] # exception store, to relay exceptions from the forwarder thread
-        self._connected = threading.Event()
-        self._forwarder_thread = None
-       
-        # trace to stderr
-        self.stderr = sys.stderr
-        
-        # Generate an initial random cryptographic key to use for tunnelling
-        # Upon connection, both endpoints will agree on a common one based on
-        # this one.
-        self.tun_key = os.urandom(32).encode("base64").strip()
-        
-
-    def __str__(self):
-        return "%s<%s %s:%s %s %s:%s %s>" % (
-            self.__class__.__name__,
-            self.tun_proto, 
-            self.tun_addr, self.tun_port,
-            self.peer_proto, 
-            self.peer_addr, self.peer_port,
-            self.tun_cipher,
-        )
-
-    def launch(self):
-        # self.tun_proto is only set if the channel is connected
-        # launch has to be a no-op in unconnected channels because
-        # it is called at configuration time, which for cross connections
-        # happens before connection.
-        if self.tun_proto:
-            if not self._forwarder_thread:
-                self._launch()
-    
-    def cleanup(self):
-        if self._forwarder_thread:
-            self.kill()
-
-    def wait(self):
-        if self._forwarder_thread:
-            self._connected.wait()
-            for exc in self._exc:
-                # Relay exception
-                eTyp, eVal, eLoc = exc
-                raise eTyp, eVal, eLoc
-
-    def kill(self):    
-        if self._forwarder_thread:
-            if not self._terminate:
-                self._terminate.append(None)
-            self._forwarder_thread.join()
-
-    def suspend(self):
-        if not self._suspend:
-            self._suspend.append(None)
-
-    def resume(self):
-        if self._suspend:
-            self._suspend.remove(None)
-
-    def _launch(self):
-        # Launch forwarder thread with a weak reference
-        # to self, so that we don't create any strong cycles
-        # and automatic refcounting works as expected
-        self._forwarder_thread = threading.Thread(
-            target = self._forwarder,
-            args = (weakref.ref(self),) )
-        self._forwarder_thread.start()
-
-    @staticmethod
-    def _forwarder(weak_self):
-        try:
-            weak_self().__forwarder(weak_self)
-        except:
-            self = weak_self()
-            
-            # store exception and wake up anyone waiting
-            self._exc.append(sys.exc_info())
-            self._connected.set()
-    
-    @staticmethod
-    def __forwarder(weak_self):
-        # grab strong reference
-        self = weak_self()
-        if not self:
-            return
-        
-        peer_port = self.peer_port
-        peer_addr = self.peer_addr
-        peer_proto= self.peer_proto
-        peer_cipher=self.peer_cipher
-
-        local_port = self.tun_port
-        local_addr = self.tun_addr
-        local_proto = self.tun_proto
-        local_cipher= self.tun_cipher
-        
-        stderr = self.stderr
-        ether_mode = self.ethernet_mode
-        with_pi = self.with_pi
-        
-        if local_proto != peer_proto:
-            raise RuntimeError, "Peering protocol mismatch: %s != %s" % (local_proto, peer_proto)
-
-        if local_cipher != peer_cipher:
-            raise RuntimeError, "Peering cipher mismatch: %s != %s" % (local_cipher, peer_cipher)
-        
-        if not peer_port or not peer_addr:
-            raise RuntimeError, "Misconfigured peer for: %s" % (self,)
-
-        if not local_port or not local_addr:
-            raise RuntimeError, "Misconfigured TUN: %s" % (self,)
-        
-        TERMINATE = self._terminate
-        SUSPEND = self._suspend
-        cipher_key = self.tun_key
-        tun = self.tun_socket
-        udp = local_proto == 'udp'
-
-        if not tun:
-            raise RuntimeError, "Unconnected TUN channel %s" % (self,)
-
-        if local_proto == 'udp':
-            rsock = udp_establish(TERMINATE, local_addr, local_port, 
-                    peer_addr, peer_port)
-            remote = os.fdopen(rsock.fileno(), 'r+b', 0)
-        elif local_proto == 'tcp':
-            rsock = tcp_establish(TERMINATE, local_addr, local_port,
-                    peer_addr, peer_port)
-            remote = os.fdopen(rsock.fileno(), 'r+b', 0)
-        else:
-            raise RuntimeError, "Bad protocol for %s: %r" % (self,local_proto)
-
-        # notify that we're ready
-        self._connected.set()
-        
-        # drop strong reference
-        del self
-        
-        print >>sys.stderr, "Connected"
-        tun_fwd(tun, remote,
-            with_pi = with_pi, 
-            ether_mode = ether_mode, 
-            cipher_key = cipher_key, 
-            udp = udp, 
-            TERMINATE = TERMINATE,
-            SUSPEND = SUSPEND,
-            stderr = stderr,
-            cipher = local_cipher
-        )
-        
-        tun.close()
-        remote.close()
-
-
-def create_tunchannel(testbed_instance, guid, devnull = []):
-    """
-    TunChannel factory for metadata.
-    By default, silences traceing.
-    
-    You can override the created element's attributes if you will.
-    """
-    if not devnull:
-        # just so it's not open if not needed
-        devnull.append(open("/dev/null","w"))
-    element = TunChannel()
-    element.stderr = devnull[0] # silence tracing
-    testbed_instance._elements[guid] = element
-
-def preconfigure_tunchannel(testbed_instance, guid):
-    """
-    TunChannel preconfiguration.
-    
-    It initiates the forwarder thread for listening tcp channels.
-    
-    Takes the public address from the operating system, so it should be adequate
-    for most situations when the TunChannel forwarder thread runs in the same
-    process as the testbed controller.
-    """
-    element = testbed_instance._elements[guid]
-    
-    # Find external interface, if any
-    public_addr = os.popen(
-        "/sbin/ifconfig "
-        "| grep $(ip route | grep default | awk '{print $3}' "
-                "| awk -F. '{print $1\"[.]\"$2}' | head -1) "
-        "| head -1 | awk '{print $2}' "
-        "| awk -F : '{print $2}'").read().rstrip()
-    element.tun_addr = public_addr
-
-    # Set standard TUN attributes
-    if not element.tun_port and element.tun_addr:
-        element.tun_port = 15000 + int(guid)
-
-def postconfigure_tunchannel(testbed_instance, guid):
-    """
-    TunChannel preconfiguration.
-    
-    Initiates the forwarder thread for connecting tcp channels or 
-    udp channels in general.
-    
-    Should be adequate for most implementations.
-    """
-    element = testbed_instance._elements[guid]
-   
-    element.launch()
-
-def crossconnect_tunchannel_peer_init(proto, testbed_instance, tun_guid, peer_data,
-        preconfigure_tunchannel = preconfigure_tunchannel):
-    """
-    Cross-connection initialization.
-    Should be adequate for most implementations.
-    
-    For use in metadata, bind the first "proto" argument with the connector type. Eg:
-    
-        conn_init = functools.partial(crossconnect_tunchannel_peer_init, "tcp")
-    
-    If you don't use the stock preconfigure function, specify your own as a keyword argument.
-    """
-    tun = testbed_instance._elements[tun_guid]
-    tun.peer_addr = peer_data.get("tun_addr")
-    tun.peer_proto = peer_data.get("tun_proto") or proto
-    tun.peer_port = peer_data.get("tun_port")
-    tun.peer_cipher = peer_data.get("tun_cipher")
-    tun.tun_key = min(tun.tun_key, peer_data.get("tun_key"))
-    tun.tun_proto = proto
-  
-    preconfigure_tunchannel(testbed_instance, tun_guid)
-
-def crossconnect_tunchannel_peer_compl(proto, testbed_instance, tun_guid, peer_data,
-        postconfigure_tunchannel = postconfigure_tunchannel):
-    """
-    Cross-connection completion.
-    Should be adequeate for most implementations.
-    
-    For use in metadata, bind the first "proto" argument with the connector type. Eg:
-    
-        conn_init = functools.partial(crossconnect_tunchannel_peer_compl, "tcp")
-    
-    If you don't use the stock postconfigure function, specify your own as a keyword argument.
-    """
-    # refresh (refreshable) attributes for second-phase
-    tun = testbed_instance._elements[tun_guid]
-    tun.peer_addr = peer_data.get("tun_addr")
-    tun.peer_proto = peer_data.get("tun_proto") or proto
-    tun.peer_port = peer_data.get("tun_port")
-    tun.peer_cipher = peer_data.get("tun_cipher")
-   
-    postconfigure_tunchannel(testbed_instance, tun_guid)
-
-def prestart_tunchannel(testbed_instance, guid):
-    """
-    Wait for the channel forwarder to be up and running.
-    
-    Useful as a pre-start function to assure proper startup synchronization,
-    be certain to start TunChannels before applications that might require them.
-    """
-    element = testbed_instance.elements[guid]
-    element.wait()
-
diff --git a/src/nepi/util/validation.py b/src/nepi/util/validation.py
deleted file mode 100644 (file)
index f94c248..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import ipaddr
-import re
-
-def is_enum(attribute, value):
-    return isinstance(value, str) and value in attribute.allowed
-
-def is_bool(attribute, value):
-    return isinstance(value, bool)
-
-def is_double(attribute, value):
-    return isinstance(value, float)
-
-def is_number(attribute, value):
-    return isinstance(value, (float,int,long))
-
-def is_integer(attribute, value, min=None, max=None):
-    if not isinstance(value, int):
-        return False
-    if min is not None and value < min:
-        return False
-    if max is not None and value > max:
-        return False
-    return True
-
-def is_integer_range(min=None, max=None):
-    def is_integer_range(attribute, value):
-        if not isinstance(value, int):
-            return False
-        if min is not None and value < min:
-            return False
-        if max is not None and value > max:
-            return False
-        return True
-    return is_integer_range
-
-def is_number_range(min=None, max=None):
-    def is_number_range(attribute, value):
-        if not isinstance(value, (float,int,long)):
-            return False
-        if min is not None and value < min:
-            return False
-        if max is not None and value > max:
-            return False
-        return True
-    return is_number_range
-
-
-def is_string(attribute, value):
-    return isinstance(value, str)
-
-def is_time(attribute, value):
-    return isinstance(value, str) # TODO: Missing validation!
-
-def is_ip4_address(attribute, value):
-    try:
-        ipaddr.IPv4Address(value)
-    except ipaddr.AddressValueError:
-        return False
-    return True
-
-def is_ip6_address(attribute, value):
-    try:
-        ipaddr.IPv6Address(value)
-    except ipaddr.AddressValueError:
-        return False
-    return True
-
-def is_ip_address(attribute, value):
-    if not is_ip4_address(attribute, value) and \
-            not is_ip6_address(attribute, value):
-        return False
-    return True
-
-# TODO: Allow netrefs!
-def is_ref_address(attribute, value):
-    if not is_ip4_address(attribute, value) and \
-            not is_ip6_address(attribute, value):
-        return False
-    return True
-
-def is_mac_address(attribute, value):
-    regex = r'^([0-9a-zA-Z]{0,2}:)*[0-9a-zA-Z]{0,2}'
-    found = re.search(regex, value)
-    if not found or value.count(':') != 5:
-        return False
-    return True
-
diff --git a/test/core/design.py b/test/core/design.py
deleted file mode 100755 (executable)
index 800643d..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.util import tags
-import mock.metadata
-import sys
-import unittest
-
-class DesignTestCase(unittest.TestCase):
-    def setUp(self):
-        # hack to add the mock testbed on the correct module path
-        sys.modules["nepi.testbeds.mock.metadata"] = mock.metadata
-
-    def test_design(self):
-        exp_desc = ExperimentDescription()
-        testbed_id = "mock"
-        provider = FactoriesProvider(testbed_id)
-        desc = exp_desc.add_testbed_description(provider)
-        desc.set_attribute_value("fake", True)
-        node1 = desc.create("Node")
-        node2 = desc.create("Node")
-        iface1 = desc.create("Interface")
-        iface1.set_attribute_value("fake", True)
-        addr1 = iface1.add_address()
-        addr2 = iface1.add_address()
-        addr3 = iface1.add_address()
-        self.assertRaises(RuntimeError, iface1.add_address)
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface2 = desc.create("Interface")
-        iface2.set_attribute_value("fake", True)
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface1.connector("iface").connect(iface2.connector("iface"))
-        app = desc.create("Application")
-        app.connector("node").connect(node1.connector("apps"))
-        app.enable_trace("fake")
-
-        self.assertEquals(node1.tags, [tags.MOBILE, tags.NODE, tags.ALLOW_ROUTES])
-
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/core/execute.py b/test/core/execute.py
deleted file mode 100755 (executable)
index 5e42a33..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.util import tags
-from nepi.util.constants import ApplicationStatus as AS
-import mock
-import mock.metadata 
-import sys
-import time
-import unittest
-
-class ExecuteTestCase(unittest.TestCase):
-    def setUp(self):
-        sys.modules["nepi.testbeds.mock.metadata"] = mock.metadata
-    
-    def make_mock_test(self, instance):
-        instance.defer_configure("fake", True)
-        instance.defer_create(2, "Node")
-        instance.defer_create(3, "Node")
-        instance.defer_create(4, "Node")
-        instance.defer_create(5, "Interface")
-        instance.defer_create_set(5, "fake", True)
-        instance.defer_connect(2, "devs", 5, "node")
-        instance.defer_create(6, "Interface")
-        instance.defer_create_set(6, "fake", True)
-        instance.defer_connect(3, "devs", 6, "node")
-        instance.defer_connect(5, "iface", 6, "iface")
-        instance.defer_create(7, "Application")
-        instance.defer_add_trace(7, "fake")
-        instance.defer_connect(7, "node", 2, "apps")
-    
-    def do_presteps(self, instance):
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-
-    def test_execute(self):
-        instance = mock.TestbedController()
-        
-        self.make_mock_test(instance)
-        self.do_presteps(instance)
-
-        instance.start()
-        attr_list = instance.get_attribute_list(5)
-        self.assertEquals(attr_list, ["test", "fake", "cross", "maxAddresses", "label"])
-        while instance.status(7) != AS.STATUS_FINISHED:
-            time.sleep(0.5)
-        app_result = instance.trace(7, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(app_result.startswith(comp_result))
-
-        traces_info = instance.traces_info()
-        expected_traces_info = dict({
-            7 : dict({
-                'fake': dict({
-                    'host': 'localhost', 
-                    'user': getpass.getuser(), 
-                    'filepath': '<test>'
-                    })
-                })
-            })
-        self.assertEquals(traces_info, expected_traces_info)
-
-        instance.stop()
-        instance.shutdown()
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/core/integration.py b/test/core/integration.py
deleted file mode 100755 (executable)
index df4f211..0000000
+++ /dev/null
@@ -1,429 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import mock
-import mock.metadata
-import mock2
-import mock2.metadata
-import os
-import shutil
-import sys
-import tempfile
-import test_util
-import time
-import unittest
-
-class ExecuteTestCase(unittest.TestCase):
-    def setUp(self):
-        sys.modules["nepi.testbeds.mock.metadata"] = mock.metadata
-        sys.modules["nepi.testbeds.mock"] = mock
-        sys.modules["nepi.testbeds.mock2.metadata"] = mock2.metadata
-        sys.modules["nepi.testbeds.mock2"] = mock2
-        self.root_dir = tempfile.mkdtemp()
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_testbed(self, exp_desc, testbed_id):
-        provider = FactoriesProvider(testbed_id)
-        desc = exp_desc.add_testbed_description(provider)
-        desc.set_attribute_value("fake", True)
-        node1 = desc.create("Node")
-        node2 = desc.create("Node")
-        iface1 = desc.create("Interface")
-        iface1.set_attribute_value("fake", True)
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface2 = desc.create("Interface")
-        iface2.set_attribute_value("fake", True)
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface1.connector("iface").connect(iface2.connector("iface"))
-        app = desc.create("Application")
-        app.connector("node").connect(node1.connector("apps"))
-        app.enable_trace("fake")
-        
-        return exp_desc, desc, app, node1, node2, iface1, iface2
-
-    def make_test_experiment(self):
-        exp_desc = ExperimentDescription()
-        testbed_id = "mock"
-        return self.make_testbed(exp_desc, testbed_id)
-
-    def make_cross_test_experiment(self):
-        exp_desc = ExperimentDescription()
-        testbed_id1 = "mock"
-        testbed_id2 = "mock2"
-        exp_desc, desc1, app1, node11, node12, iface11, iface12 = \
-                self.make_testbed(exp_desc, testbed_id1)
-        exp_desc, desc2, app2, node21, node22, iface21, iface22 = \
-                 self.make_testbed(exp_desc, testbed_id2)
-        iface12.connector("cross").connect(iface21.connector("cross"))
-
-        return exp_desc, desc1, desc2, iface12, iface21
-
-    def test_single_process_cross_integration(self):
-        exp_desc, desc1, desc2, iface12, iface21 = \
-                self.make_cross_test_experiment()
-        xml = exp_desc.to_xml()
-        access_config = None
-        controller = proxy.create_experiment_controller(xml, access_config)
-
-        controller.start()
-        cross1 = controller.get(iface12.guid, "cross")
-        cross2 = controller.get(iface21.guid, "cross")
-        self.assertTrue(cross1 == cross2 == True)
-        controller.stop()
-        controller.shutdown()
-
-    def test_single_process_integration(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        xml = exp_desc.to_xml()
-        access_config = None
-        controller = proxy.create_experiment_controller(xml, access_config)
-
-        controller.start()
-        started_time = controller.started_time
-        self.assertTrue(started_time < time.time())
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(fake_result.startswith(comp_result))
-
-        self.assertEquals(controller.get_testbed_id(node1.guid), "mock")
-        self.assertEquals(controller.get_testbed_version(node1.guid), "0.1")
-        self.assertEquals(controller.get_factory_id(node1.guid), "Node")
-
-        controller.stop()
-        stopped_time = controller.stopped_time
-        self.assertTrue(stopped_time < time.time())
-        controller.shutdown()
-
-    def test_daemonized_controller_integration(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        xml = exp_desc.to_xml()
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        access_config.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, 
-            "export PYTHONPATH=%r:%r:$PYTHONPATH "
-            "export NEPI_TESTBEDS='mock:mock mock2:mock2' " % (
-                os.path.dirname(os.path.dirname(mock.__file__)),
-                os.path.dirname(os.path.dirname(mock2.__file__)),))
-
-        controller = proxy.create_experiment_controller(xml, access_config)
-
-        controller.start()
-        started_time = controller.started_time
-        self.assertTrue(started_time < time.time())
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(fake_result.startswith(comp_result))
-
-        self.assertEquals(controller.get_testbed_id(node1.guid), "mock")
-        self.assertEquals(controller.get_testbed_version(node1.guid), "0.1")
-        self.assertEquals(controller.get_factory_id(node1.guid), "Node")
-
-        controller.stop()
-        stopped_time = controller.stopped_time
-        self.assertTrue(stopped_time < time.time())
-        controller.shutdown()
-
-    def test_daemonized_testbed_integration(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        
-        desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        desc.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        desc.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, 
-            "export PYTHONPATH=%r:%r:$PYTHONPATH "
-            "export NEPI_TESTBEDS='mock:mock mock2:mock2' " % (
-                os.path.dirname(os.path.dirname(mock.__file__)),
-                os.path.dirname(os.path.dirname(mock2.__file__)),))
-
-        xml = exp_desc.to_xml()
-        
-        controller = proxy.create_experiment_controller(xml, access_config = None)
-
-        controller.start()
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(fake_result.startswith(comp_result))
-
-        self.assertEquals(controller.get_testbed_id(node1.guid), "mock")
-        self.assertEquals(controller.get_testbed_version(node1.guid), "0.1")
-        self.assertEquals(controller.get_factory_id(node1.guid), "Node")
-
-        controller.stop()
-        controller.shutdown()
-
-    def test_daemonized_all_integration(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        
-        desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        desc.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        xml = exp_desc.to_xml()
-        
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        access_config.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, 
-            "export PYTHONPATH=%r:%r:$PYTHONPATH "
-            "export NEPI_TESTBEDS='mock:mock mock2:mock2' " % (
-                os.path.dirname(os.path.dirname(mock.__file__)),
-                os.path.dirname(os.path.dirname(mock2.__file__)),))
-        controller = proxy.create_experiment_controller(xml, access_config)
-
-        controller.start()
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(fake_result.startswith(comp_result))
-
-        self.assertEquals(controller.get_testbed_id(node1.guid), "mock")
-        self.assertEquals(controller.get_testbed_version(node1.guid), "0.1")
-        self.assertEquals(controller.get_factory_id(node1.guid), "Node")
-
-        traces_info = controller.traces_info()
-        expected_traces_info = dict({
-            1: dict({ # testbed guid
-                6: dict({ # element guid
-                    'fake': dict({ # trace_id
-                        'host': 'localhost', 
-                        'user': getpass.getuser(), 
-                        'filepath': '<test>'
-                        })
-                    })
-                })
-            })
-        self.assertEquals(traces_info, expected_traces_info)
-
-        controller.stop()
-        controller.shutdown()
-
-    def test_daemonized_all_integration_recovery(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        
-        desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        desc.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        xml = exp_desc.to_xml()
-        
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        access_config.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, 
-            "export PYTHONPATH=%r:%r:$PYTHONPATH "
-            "export NEPI_TESTBEDS='mock:mock mock2:mock2' " % (
-                os.path.dirname(os.path.dirname(mock.__file__)),
-                os.path.dirname(os.path.dirname(mock2.__file__)),))
-        controller = proxy.create_experiment_controller(xml, access_config)
-
-        controller.start()
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(fake_result.startswith(comp_result))
-
-        self.assertEquals(controller.get_testbed_id(node1.guid), "mock")
-        self.assertEquals(controller.get_testbed_version(node1.guid), "0.1")
-        self.assertEquals(controller.get_factory_id(node1.guid), "Node")
-
-        # controller dies
-        del controller
-        
-        # recover
-        access_config.set_attribute_value(DC.RECOVER,True)
-        controller = proxy.create_experiment_controller(xml, access_config)
-        
-        # test recovery
-        self.assertTrue(controller.is_finished(app.guid))
-        fake_result = controller.trace(app.guid, "fake")
-        self.assertTrue(fake_result.startswith(comp_result))
-        
-        controller.stop()
-        controller.shutdown()
-
-    def test_reference_expressions(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        
-        iface1.set_attribute_value("label", "some")
-        addr = iface1.add_address()
-        addr.set_attribute_value("Address", "10.0.0.2")
-        iface2.set_attribute_value("test", "{#[some].addr[0].[Address]#}")
-        
-        xml = exp_desc.to_xml()
-        access_config = None
-        controller = proxy.create_experiment_controller(xml, access_config)
-        controller.start()
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        
-        self.assertTrue(fake_result.startswith(comp_result))
-        
-        self.assertEqual(
-            controller._testbeds[desc.guid].get(iface2.guid, "test"),
-            addr.get_attribute_value("Address") )
-        
-        controller.stop()
-        controller.shutdown()
-
-    def test_testbed_reference_expressions(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        
-        iface1.set_attribute_value("label", "some")
-        addr = iface1.add_address()
-        addr.set_attribute_value("Address", "10.0.0.2")
-
-        desc2 = exp_desc.add_testbed_description(
-            FactoriesProvider("mock2") )
-        desc2.set_attribute_value(DC.DEPLOYMENT_HOST, "{#[some].addr[0].[Address]#}")
-        # DC.DEPLOYMENT_HOST should be ignored if DC.DEPLOYMENT_CONNECTION is not set
-        # But it should be resolved anyway
-        
-        xml = exp_desc.to_xml()
-        access_config = None
-        controller = proxy.create_experiment_controller(xml, access_config)
-        controller.start()
-        while not controller.is_finished(app.guid):
-            time.sleep(0.5)
-        fake_result = controller.trace(app.guid, "fake")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(fake_result.startswith(comp_result))
-
-        self.assertEqual(
-            controller._deployment_config[desc2.guid]
-                .get_attribute_value(DC.DEPLOYMENT_HOST),
-            addr.get_attribute_value("Address") )
-        
-        controller.stop()
-        controller.shutdown()
-
-    def test_ssh_daemonized_integration(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-        env = test_util.test_environment()
-        
-        desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        desc.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        xml = exp_desc.to_xml()
-        
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        access_config.set_attribute_value(DC.USE_AGENT, True)
-        access_config.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, 
-            "export PYTHONPATH=%r:%r:$PYTHONPATH "
-            "export NEPI_TESTBEDS='mock:mock mock2:mock2' " % (
-                os.path.dirname(os.path.dirname(mock.__file__)),
-                os.path.dirname(os.path.dirname(mock2.__file__)),))
-        controller = proxy.create_experiment_controller(xml, access_config)
-
-        try:
-            controller.start()
-            while not controller.is_finished(app.guid):
-                time.sleep(0.5)
-            fake_result = controller.trace(app.guid, "fake")
-            comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-            self.assertTrue(fake_result.startswith(comp_result))
-        finally:
-            controller.stop()
-            controller.shutdown()
-
-    def ptest_experiment_suite(self):
-        exp_desc, desc, app, node1, node2, iface1, iface2 = self.make_test_experiment()
-       
-        xml = exp_desc.to_xml()
-
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        access_config.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, 
-            "export PYTHONPATH=%r:%r:$PYTHONPATH "
-            "export NEPI_TESTBEDS='mock:mock mock2:mock2' " % (
-                os.path.dirname(os.path.dirname(mock.__file__)),
-                os.path.dirname(os.path.dirname(mock2.__file__)),))
-      
-        print self.root_dir
-        exp_suite = proxy.create_experiment_suite(xml, access_config, repetitions = 4)
-        exp_suite.start()
-        while not exp_suite.is_finished():
-            time.sleep(0.5)
-
-        for access_config in exp_suite.get_access_configurations():
-            access_config.set_attribute_value(DC.RECOVER, True)
-            controller = proxy.create_experiment_controller(None, access_config)
-
-            fake_result = controller.trace(app.guid, "fake")
-            comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-            self.assertTrue(fake_result.startswith(comp_result))
-
-            self.assertEquals(controller.get_testbed_id(node1.guid), "mock")
-            self.assertEquals(controller.get_testbed_version(node1.guid), "0.1")
-            self.assertEquals(controller.get_factory_id(node1.guid), "Node")
-
-        exp_suite.shutdown()
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/design/box.py b/test/design/box.py
new file mode 100755 (executable)
index 0000000..f59c5be
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+from nepi.design.box import Box 
+
+import unittest
+
+class BoxDesignTestCase(unittest.TestCase):
+    def test_simple_design(self):
+        node1 = Box()
+        node2 = Box()
+
+        node1.label = "uno"
+        node2.label = "dos"
+
+        node1.tadd('nodo')
+        node2.tadd('mynodo')
+
+        self.assertEquals(node1.tags, set(['nodo']))
+        self.assertEquals(node2.tags, set(['mynodo']))
+       
+        node1.a.hola = "chau"
+        node2.a.hello = "bye"
+
+        self.assertEquals(node1.a.hola, "chau")
+        self.assertEquals(node2.a.hello, "bye")
+
+        node1.connect(node2)
+        
+        self.assertEquals(node1.connections, set([node2]))
+        self.assertEquals(node2.connections, set([node1]))
+        self.assertTrue(node1.is_connected(node2))
+        self.assertTrue(node2.is_connected(node1))
+
+        self.assertEquals(node1.c.dos.a.hello, "bye")
+        self.assertEquals(node2.c.uno.a.hola, "chau")
+       
+        node2.disconnect(node1)
+
+        self.assertEquals(node1.connections, set([]))
+        self.assertEquals(node2.connections, set([]))
+        self.assertFalse(node1.is_connected(node2))
+        self.assertFalse(node2.is_connected(node1))
+
+        self.assertRaises(AttributeError, node1.c.dos)
+        self.assertRaises(AttributeError, node2.c.uno)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/execution/ec.py b/test/execution/ec.py
new file mode 100755 (executable)
index 0000000..6c44878
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+from nepi.execution.ec import ExperimentController, ECState 
+from nepi.execution.scheduler import TaskStatus
+
+import datetime
+import time
+import unittest
+
+class ExecuteControllersTestCase(unittest.TestCase):
+    def test_schedule_print(self):
+        def myfunc():
+            return 'hola!' 
+
+        ec = ExperimentController()
+    
+        tid = ec.schedule("0s", myfunc, track=True)
+        
+        while True:
+            task = ec.get_task(tid)
+            if task.status != TaskStatus.NEW:
+                break
+
+            time.sleep(1)
+
+        self.assertEquals('hola!', task.result)
+
+        ec.shutdown()
+
+    def test_schedule_date(self):
+        def get_time():
+            return datetime.datetime.now() 
+
+        ec = ExperimentController()
+
+        schedule_time = datetime.datetime.now()
+        
+        tid = ec.schedule("4s", get_time, track=True)
+
+        while True:
+            task = ec.get_task(tid)
+            if task.status != TaskStatus.NEW:
+                break
+
+            time.sleep(1)
+
+        execution_time = task.result
+        delta = execution_time - schedule_time
+        self.assertTrue(delta > datetime.timedelta(seconds=4))
+        self.assertTrue(delta < datetime.timedelta(seconds=5))
+
+        ec.shutdown()
+
+    def test_schedule_exception(self):
+        def raise_error():
+            raise RuntimeError, "the error"
+
+        ec = ExperimentController()
+        ec.schedule("2s", raise_error)
+
+        while ec.ecstate not in [ECState.FAILED, ECState.TERMINATED]:
+           time.sleep(1)
+        
+        self.assertEquals(ec.ecstate, ECState.FAILED)
+        ec.shutdown()
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/execution/resource.py b/test/execution/resource.py
new file mode 100755 (executable)
index 0000000..cb9423d
--- /dev/null
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+from nepi.execution.attribute import Attribute
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.resource import ResourceManager, ResourceState, clsinit
+
+import time
+import unittest
+
+@clsinit
+class MyResource(ResourceManager):
+    _rtype = "MyResource"
+
+    @classmethod
+    def _register_attributes(cls):
+        cool_attr = Attribute("my_attr", "is a really nice attribute!")
+        cls._register_attribute(cool_attr)
+
+    def __init__(self, ec, guid):
+        super(MyResource, self).__init__(ec, guid)
+
+@clsinit
+class AnotherResource(ResourceManager):
+    _rtype = "AnotherResource"
+
+    def __init__(self, ec, guid):
+        super(AnotherResource, self).__init__(ec, guid)
+     
+class ResourceFactoryTestCase(unittest.TestCase):
+    def test_add_resource_factory(self):
+        from nepi.execution.resource import ResourceFactory
+
+        ResourceFactory.register_type(MyResource)
+        ResourceFactory.register_type(AnotherResource)
+
+        self.assertEquals(MyResource.rtype(), "MyResource")
+        self.assertEquals(len(MyResource._attributes), 1)
+
+        self.assertEquals(ResourceManager.rtype(), "Resource")
+        self.assertEquals(len(ResourceManager._attributes), 0)
+
+        self.assertEquals(AnotherResource.rtype(), "AnotherResource")
+        self.assertEquals(len(AnotherResource._attributes), 0)
+
+        self.assertEquals(len(ResourceFactory.resource_types()), 2)
+
+class Channel(ResourceManager):
+    _rtype = "Channel"
+
+    def __init__(self, ec, guid):
+        super(Channel, self).__init__(ec, guid)
+
+    def deploy(self):
+        time.sleep(1)
+        super(Channel, self).deploy()
+        self.logger.debug(" -------- DEPLOYED ------- ")
+       
+class Interface(ResourceManager):
+    _rtype = "Interface"
+
+    def __init__(self, ec, guid):
+        super(Interface, self).__init__(ec, guid)
+
+    def deploy(self):
+        node = self.get_connected(Node.rtype())[0]
+        chan = self.get_connected(Channel.rtype())[0]
+
+        if node.state < ResourceState.PROVISIONED:
+            self.ec.schedule("0.5s", self.deploy)
+        elif chan.state < ResourceState.READY:
+            self.ec.schedule("0.5s", self.deploy)
+        else:
+            time.sleep(2)
+            super(Interface, self).deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Node(ResourceManager):
+    _rtype = "Node"
+
+    def __init__(self, ec, guid):
+        super(Node, self).__init__(ec, guid)
+
+    def deploy(self):
+        if self.state == ResourceState.NEW:
+            self.discover()
+            self.provision()
+            self.logger.debug(" -------- PROVISIONED ------- ")
+            self.ec.schedule("3s", self.deploy)
+        elif self.state == ResourceState.PROVISIONED:
+            ifaces = self.get_connected(Interface.rtype())
+            for rm in ifaces:
+                if rm.state < ResourceState.READY:
+                    self.ec.schedule("0.5s", self.deploy)
+                    return 
+
+            super(Node, self).deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+class Application(ResourceManager):
+    _rtype = "Application"
+
+    def __init__(self, ec, guid):
+        super(Application, self).__init__(ec, guid)
+
+    def deploy(self):
+        node = self.get_connected(Node.rtype())[0]
+        if node.state < ResourceState.READY:
+            self.ec.schedule("0.5s", self.deploy)
+        else:
+            super(Application, self).deploy()
+            self.logger.debug(" -------- DEPLOYED ------- ")
+
+class ResourceManagerTestCase(unittest.TestCase):
+    def test_deploy_in_order(self):
+        """
+        Test scenario: 2 applications running one on 1 node each. 
+        Nodes are connected to Interfaces which are connected
+        through a channel between them.
+
+         - Application needs to wait until Node is ready to be ready
+         - Node needs to wait until Interface is ready to be ready
+         - Interface needs to wait until Node is provisioned to be ready
+         - Interface needs to wait until Channel is ready to be ready
+         - The channel doesn't wait for any other resource to be ready
+
+        """
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(Application)
+        ResourceFactory.register_type(Node)
+        ResourceFactory.register_type(Interface)
+        ResourceFactory.register_type(Channel)
+
+        ec = ExperimentController()
+
+        app1 = ec.register_resource("Application")
+        app2 = ec.register_resource("Application")
+        node1 = ec.register_resource("Node")
+        node2 = ec.register_resource("Node")
+        iface1 = ec.register_resource("Interface")
+        iface2 = ec.register_resource("Interface")
+        chan = ec.register_resource("Channel")
+
+        ec.register_connection(app1, node1)
+        ec.register_connection(app2, node2)
+        ec.register_connection(iface1, node1)
+        ec.register_connection(iface2, node2)
+        ec.register_connection(iface1, chan)
+        ec.register_connection(iface2, chan)
+
+        ec.deploy()
+
+        while not all([ ec.state(guid) == ResourceState.STARTED \
+                for guid in [app1, app2, node1, node2, iface1, iface2, chan]]) \
+                and not ec.finished:
+            time.sleep(0.5)
+
+        ec.shutdown()
+
+        rmapp1 = ec.get_resource(app1)
+        rmapp2 = ec.get_resource(app2)
+        rmnode1 = ec.get_resource(node1)
+        rmnode2 = ec.get_resource(node2)
+        rmiface1 = ec.get_resource(iface1)
+        rmiface2 = ec.get_resource(iface2)
+        rmchan = ec.get_resource(chan)
+
+        ## Validate deploy order
+        # - Application needs to wait until Node is ready to be ready
+        self.assertTrue(rmnode1.ready_time < rmapp1.ready_time)
+        self.assertTrue(rmnode2.ready_time < rmapp2.ready_time)
+
+         # - Node needs to wait until Interface is ready to be ready
+        self.assertTrue(rmnode1.ready_time > rmiface1.ready_time)
+        self.assertTrue(rmnode2.ready_time > rmiface2.ready_time)
+
+         # - Interface needs to wait until Node is provisioned to be ready
+        self.assertTrue(rmnode1.provision_time < rmiface1.ready_time)
+        self.assertTrue(rmnode2.provision_time < rmiface2.ready_time)
+
+         # - Interface needs to wait until Channel is ready to be ready
+        self.assertTrue(rmchan.ready_time < rmiface1.ready_time)
+        self.assertTrue(rmchan.ready_time < rmiface2.ready_time)
+
+    def test_start_with_condition(self):
+        # TODO!!!
+        pass
+    
+    def test_stop_with_condition(self):
+        # TODO!!!
+        pass
+
+    def test_set_with_condition(self):
+        # TODO!!!
+        pass
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/lib/mock/__init__.py b/test/lib/mock/__init__.py
deleted file mode 100644 (file)
index f3e1ac0..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from execute import TestbedController 
-
diff --git a/test/lib/mock/constants.py b/test/lib/mock/constants.py
deleted file mode 100644 (file)
index 54fe6da..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# -*- coding: utf-8 -*-
-
-TESTBED_ID = "mock"
-TESTBED_VERSION = "0.1"
diff --git a/test/lib/mock/execute.py b/test/lib/mock/execute.py
deleted file mode 100644 (file)
index fc6f64a..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import testbed_impl
-
-class TestbedController(testbed_impl.TestbedController):
-    def __init__(self):
-        super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
-
-    def do_configure(self):
-        pass
-
-    def action(self, time, guid, action):
-        raise NotImplementedError
-
-    def trace(self, guid, trace_id, attribute='value'):
-        if attribute == 'value':
-            return """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        elif attribute == 'path':
-            return '<test>'
-        else:
-            return None
-
-    def shutdown(self):
-           pass
-
diff --git a/test/lib/mock/metadata.py b/test/lib/mock/metadata.py
deleted file mode 100644 (file)
index 35b7e44..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import metadata
-from nepi.core.attributes import Attribute
-from nepi.util import validation, tags
-from nepi.util.constants import ApplicationStatus as AS
-
-NODE = "Node"
-IFACE = "Interface"
-APP = "Application"
-
-### Connection functions ####
-
-def connect_cross(testbed_instance, guid, cross_data):
-    connected = True
-    testbed_instance.set(guid, "cross", True)
-
-### Creation functions ###
-
-def create_node(testbed_instance, guid):
-    testbed_instance.elements[guid] = NODE
-
-def create_iface(testbed_instance, guid):
-     testbed_instance.elements[guid] = IFACE
-
-def create_application(testbed_instance, guid):
-     testbed_instance.elements[guid] = APP
-
-### Start/Stop functions ###
-
-### Status functions ###
-
-def status_application(testbed_instance, guid):
-    return AS.STATUS_FINISHED
-
-### Factory information ###
-
-connector_types = dict({
-    "apps": dict({
-                "help": "Connector from node to applications", 
-                "name": "apps",
-                "max": -1, 
-                "min": 0
-            }),
-    "devs": dict({
-                "help": "Connector from node to network interfaces", 
-                "name": "devs",
-                "max": -1, 
-                "min": 0
-            }),
-    "node": dict({
-                "help": "Connector to a Node", 
-                "name": "node",
-                "max": 1, 
-                "min": 1
-            }),
-    "iface": dict({
-                "help": "Connector to a Interface", 
-                "name": "iface",
-                "max": 1, 
-                "min": 0
-            }),
-    "cross": dict({
-                "help": "Connector to an Interface in other testbed", 
-                "name": "cross",
-                "max": 1, 
-                "min": 0
-            }),
-   })
-
-connections = [
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, IFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, IFACE, "iface"),
-        "to":   (TESTBED_ID, IFACE, "iface"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "apps"),
-        "to":   (TESTBED_ID, APP, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, IFACE, "cross"),
-        "to":   ("mock2", IFACE, "cross"),
-        "init_code": connect_cross,
-        "can_cross": True,
-    })]
-
-attributes = dict({
-    "fake": dict({
-                "name": "fake",
-                "help": "fake attribute",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "test": dict({
-                "name": "test",
-                "help": "test attribute",
-                "type": Attribute.STRING,
-                "validation_function": validation.is_string
-            }),
-    "cross": dict({
-                "name": "cross",
-                "help": "Attribute that indicates if cross connection was performed",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-        }),
-    "maxAddresses": dict({
-                "name": "maxAddresses",
-                "help": "Attribute that indicates the maximum number of addresses for an interface",
-                "type": Attribute.INTEGER,
-                "value": 3,
-                "flags" : Attribute.DesignReadOnly |\
-                    Attribute.ExecInvisible |\
-                    Attribute.Metadata,
-                "validation_function": validation.is_integer
-       })
-    })
-
-traces = dict({
-    "fake": dict({
-                "name": "fake",
-                "help": "fake trace"
-              }),
-    })
-
-factories_order = [ NODE, IFACE, APP ]
-
-factories_info = dict({
-    NODE: dict({
-            "help": "Fake node",
-            "category": "topology",
-            "create_function": create_node,
-            "start_function": None,
-            "stop_function": None,
-            "status_function": None,
-            "box_attributes": ["fake","test"],
-            "connector_types": ["devs", "apps"],
-            "tags": [tags.MOBILE, tags.NODE, tags.ALLOW_ROUTES],
-       }),
-    IFACE: dict({
-            "help": "Fake iface",
-            "category": "devices",
-            "create_function": create_iface,
-            "start_function": None,
-            "stop_function": None,
-            "status_function": None,
-            "allow_addresses": True,
-            "factory_attributes": ["fake", "maxAddresses"],
-            "box_attributes": ["fake", "test", "cross"],
-            "connector_types": ["node", "iface", "cross"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-       }),
-    APP: dict({
-            "help": "Fake application",
-            "category": "applications",
-            "create_function": create_application,
-            "start_function": None,
-            "stop_function": None,
-            "status_function": status_application,
-            "box_attributes": ["fake", "test"],
-            "connector_types": ["node"],
-            "traces": ["fake"],
-            "tags": [tags.APPLICATION],
-        }),
-})
-
-testbed_attributes = dict({
-        "fake": dict({
-                "name": "fake",
-                "help": "fake attribute",
-                "type": Attribute.BOOL,
-                "value": False,
-                "range": None,
-                "allowed": None,
-                "validation_function": validation.is_bool
-            }),
-        "test": dict({
-                "name": "test",
-                "help": "test attribute",
-                "type": Attribute.STRING,
-                "validation_function": validation.is_string
-            }),
-    })
-
-class MetadataInfo(metadata.MetadataInfo):
-    @property
-    def connector_types(self):
-        return connector_types
-
-    @property
-    def connections(self):
-        return connections
-
-    @property
-    def attributes(self):
-        return attributes
-
-    @property
-    def traces(self):
-        return traces
-
-    @property
-    def create_order(self):
-        return factories_order
-
-    @property
-    def configure_order(self):
-        return factories_order
-
-    @property
-    def factories_info(self):
-        return factories_info
-
-    @property
-    def testbed_attributes(self):
-        return testbed_attributes
-
-    @property
-    def testbed_id(self):
-        return TESTBED_ID
-
-    @property
-    def testbed_version(self):
-        return TESTBED_VERSION
-
diff --git a/test/lib/mock2/__init__.py b/test/lib/mock2/__init__.py
deleted file mode 100644 (file)
index f3e1ac0..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from execute import TestbedController 
-
diff --git a/test/lib/mock2/constants.py b/test/lib/mock2/constants.py
deleted file mode 100644 (file)
index dae7bed..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-TESTBED_ID = "mock2"
-TESTBED_VERSION = "0.1"
-
diff --git a/test/lib/mock2/execute.py b/test/lib/mock2/execute.py
deleted file mode 100644 (file)
index fc6f64a..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import testbed_impl
-
-class TestbedController(testbed_impl.TestbedController):
-    def __init__(self):
-        super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
-
-    def do_configure(self):
-        pass
-
-    def action(self, time, guid, action):
-        raise NotImplementedError
-
-    def trace(self, guid, trace_id, attribute='value'):
-        if attribute == 'value':
-            return """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        elif attribute == 'path':
-            return '<test>'
-        else:
-            return None
-
-    def shutdown(self):
-           pass
-
diff --git a/test/lib/mock2/metadata.py b/test/lib/mock2/metadata.py
deleted file mode 100644 (file)
index a258a4a..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from constants import TESTBED_ID, TESTBED_VERSION
-from nepi.core import metadata
-from nepi.core.attributes import Attribute
-from nepi.util import tags, validation
-from nepi.util.constants import ApplicationStatus as AS
-
-NODE = "Node"
-IFACE = "Interface"
-APP = "Application"
-
-### Connection functions ####
-
-def connect_cross(testbed_instance, guid, cross_data):
-    connected = cross_data["cross"]
-    testbed_instance.set(guid, "cross", connected)
-
-### Creation functions ###
-
-def create_node(testbed_instance, guid):
-    testbed_instance.elements[guid] = NODE
-
-def create_iface(testbed_instance, guid):
-     testbed_instance.elements[guid] = IFACE
-
-def create_application(testbed_instance, guid):
-     testbed_instance.elements[guid] = APP
-
-### Start/Stop functions ###
-
-### Status functions ###
-
-def status_application(testbed_instance, guid):
-    return AS.STATUS_FINISHED
-
-### Factory information ###
-
-connector_types = dict({
-    "apps": dict({
-                "help": "Connector from node to applications", 
-                "name": "apps",
-                "max": -1, 
-                "min": 0
-            }),
-    "devs": dict({
-                "help": "Connector from node to network interfaces", 
-                "name": "devs",
-                "max": -1, 
-                "min": 0
-            }),
-    "node": dict({
-                "help": "Connector to a Node", 
-                "name": "node",
-                "max": 1, 
-                "min": 1
-            }),
-    "iface": dict({
-                "help": "Connector to a Interface", 
-                "name": "iface",
-                "max": 1, 
-                "min": 0
-            }),
-    "cross": dict({
-                "help": "Connector to an Interface in other testbed", 
-                "name": "cross",
-                "max": 1, 
-                "min": 0
-            }),
-   })
-
-connections = [
-    dict({
-        "from": (TESTBED_ID, NODE, "devs"),
-        "to":   (TESTBED_ID, IFACE, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, IFACE, "iface"),
-        "to":   (TESTBED_ID, IFACE, "iface"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, NODE, "apps"),
-        "to":   (TESTBED_ID, APP, "node"),
-        "can_cross": False
-    }),
-    dict({
-        "from": (TESTBED_ID, IFACE, "cross"),
-        "to":   ("mock", IFACE, "cross"),
-        "compl_code": connect_cross,
-        "can_cross": True,
-    })]
-
-attributes = dict({
-    "fake": dict({
-                "name": "fake",
-                "help": "fake attribute",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            }),
-    "test": dict({
-                "name": "test",
-                "help": "test attribute",
-                "type": Attribute.STRING,
-                "validation_function": validation.is_string
-            }),
-    "cross": dict({
-                "name": "cross",
-                "help": "Attribute that indicates if cross connection was performed",
-                "type": Attribute.BOOL,
-                "value": False,
-                "validation_function": validation.is_bool
-            })
-    })
-
-traces = dict({
-    "fake": dict({
-                "name": "fake",
-                "help": "fake trace"
-              }),
-    })
-
-factories_order = [ NODE, IFACE, APP ]
-
-factories_info = dict({
-    NODE: dict({
-            "help": "Fake node",
-            "category": "topology",
-            "create_function": create_node,
-            "start_function": None,
-            "stop_function": None,
-            "status_function": None,
-            "box_attributes": ["fake","test"],
-            "connector_types": ["devs", "apps"],
-            "tags": [tags.NODE, tags.ALLOW_ROUTES],
-       }),
-    IFACE: dict({
-            "help": "Fake iface",
-            "category": "devices",
-            "create_function": create_iface,
-            "start_function": None,
-            "stop_function": None,
-            "status_function": None,
-            "allow_addresses": True,
-            "factory_attributes": ["fake"],
-            "box_attributes": ["fake", "test", "cross"],
-            "connector_types": ["node", "iface", "cross"],
-            "tags": [tags.INTERFACE, tags.ALLOW_ADDRESSES],
-       }),
-    APP: dict({
-            "help": "Fake application",
-            "category": "applications",
-            "create_function": create_application,
-            "start_function": None,
-            "stop_function": None,
-            "status_function": status_application,
-            "box_attributes": ["fake", "test"],
-            "connector_types": ["node"],
-            "traces": ["fake"],
-            "tags": [tags.APPLICATION],
-        }),
-})
-
-testbed_attributes = dict({
-        "fake": dict({
-                "name": "fake",
-                "help": "fake attribute",
-                "type": Attribute.BOOL,
-                "value": False,
-                "range": None,
-                "allowed": None,
-                "validation_function": validation.is_bool
-            }),
-        "test": dict({
-                "name": "test",
-                "help": "test attribute",
-                "type": Attribute.STRING,
-                "validation_function": validation.is_string
-            }),
-    })
-
-class MetadataInfo(metadata.MetadataInfo):
-    @property
-    def connector_types(self):
-        return connector_types
-
-    @property
-    def connections(self):
-        return connections
-
-    @property
-    def attributes(self):
-        return attributes
-
-    @property
-    def traces(self):
-        return traces
-
-    @property
-    def create_order(self):
-        return factories_order
-
-    @property
-    def configure_order(self):
-        return factories_order
-
-    @property
-    def factories_info(self):
-        return factories_info
-
-    @property
-    def testbed_attributes(self):
-        return testbed_attributes
-
-    @property
-    def testbed_id(self):
-        return TESTBED_ID
-
-    @property
-    def testbed_version(self):
-        return TESTBED_VERSION
-
diff --git a/test/resources/linux/application.py b/test/resources/linux/application.py
new file mode 100644 (file)
index 0000000..4f9f90e
--- /dev/null
@@ -0,0 +1,264 @@
+#!/usr/bin/env python
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.resource import ResourceState, ResourceAction
+from nepi.execution.trace import TraceAttr
+from nepi.resources.linux.node import LinuxNode
+from nepi.resources.linux.application import LinuxApplication
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import tempfile
+import unittest
+
+class LinuxApplicationTestCase(unittest.TestCase):
+    def setUp(self):
+        self.fedora_host = 'nepi2.pl.sophia.inria.fr'
+        self.fedora_user = 'inria_nepi'
+
+        self.ubuntu_host = 'roseval.pl.sophia.inria.fr'
+        self.ubuntu_user = 'alina'
+        
+        self.target = 'nepi5.pl.sophia.inria.fr'
+
+    @skipIfNotAlive
+    def t_stdout(self, host, user):
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(LinuxNode)
+        ResourceFactory.register_type(LinuxApplication)
+
+        ec = ExperimentController()
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "echo 'HOLA'"
+        ec.set(app, "command", cmd)
+        ec.register_connection(app, node)
+
+        ec.deploy()
+
+        ec.wait_finished([app])
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec.state(app) == ResourceState.FINISHED)
+
+        stdout = ec.trace(app, 'stdout')
+        self.assertTrue(stdout.strip() == "HOLA")
+
+        ec.shutdown()
+
+    @skipIfNotAlive
+    def t_ping(self, host, user):
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(LinuxNode)
+        ResourceFactory.register_type(LinuxApplication)
+
+        ec = ExperimentController()
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        app = ec.register_resource("LinuxApplication")
+        cmd = "ping -c5 %s" % self.target 
+        ec.set(app, "command", cmd)
+        
+        ec.register_connection(app, node)
+
+        ec.deploy()
+
+        ec.wait_finished([app])
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec.state(app) == ResourceState.FINISHED)
+
+        stdout = ec.trace(app, 'stdout')
+        size = ec.trace(app, 'stdout', attr = TraceAttr.SIZE)
+        self.assertEquals(len(stdout), size)
+        
+        block = ec.trace(app, 'stdout', attr = TraceAttr.STREAM, block = 5, offset = 1)
+        self.assertEquals(block, stdout[5:10])
+
+        path = ec.trace(app, 'stdout', attr = TraceAttr.PATH)
+        rm = ec.get_resource(app)
+        p = os.path.join(rm.app_home, 'stdout')
+        self.assertEquals(path, p)
+
+        ec.shutdown()
+
+    @skipIfNotAlive
+    def t_concurrency(self, host, user):
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(LinuxNode)
+        ResourceFactory.register_type(LinuxApplication)
+
+        ec = ExperimentController()
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        apps = list()
+        for i in xrange(50):
+            app = ec.register_resource("LinuxApplication")
+            cmd = "ping -c5 %s" % self.target 
+            ec.set(app, "command", cmd)
+            ec.register_connection(app, node)
+            apps.append(app)
+
+        ec.deploy()
+
+        ec.wait_finished(apps)
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(
+               all([ec.state(guid) == ResourceState.FINISHED \
+                for guid in apps])
+                )
+
+        for app in apps:
+            stdout = ec.trace(app, 'stdout')
+            size = ec.trace(app, 'stdout', attr = TraceAttr.SIZE)
+            self.assertEquals(len(stdout), size)
+            
+            block = ec.trace(app, 'stdout', attr = TraceAttr.STREAM, block = 5, offset = 1)
+            self.assertEquals(block, stdout[5:10])
+
+            path = ec.trace(app, 'stdout', attr = TraceAttr.PATH)
+            rm = ec.get_resource(app)
+            p = os.path.join(rm.app_home, 'stdout')
+            self.assertEquals(path, p)
+
+        ec.shutdown()
+
+    @skipIfNotAlive
+    def t_condition(self, host, user, depends):
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(LinuxNode)
+        ResourceFactory.register_type(LinuxApplication)
+
+        ec = ExperimentController()
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        server = ec.register_resource("LinuxApplication")
+        cmd = "echo 'HOLA' | nc -l 3333"
+        ec.set(server, "command", cmd)
+        ec.set(server, "depends", depends)
+        ec.register_connection(server, node)
+
+        client = ec.register_resource("LinuxApplication")
+        cmd = "nc 127.0.0.1 3333"
+        ec.set(client, "command", cmd)
+        ec.register_connection(client, node)
+
+        ec.register_condition(client, ResourceAction.START, server, ResourceState.STARTED)
+
+        apps = [client, server]
+        
+        ec.deploy()
+
+        ec.wait_finished(apps)
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec.state(server) == ResourceState.FINISHED)
+        self.assertTrue(ec.state(client) == ResourceState.FINISHED)
+
+        stdout = ec.trace(client, 'stdout')
+        self.assertTrue(stdout.strip() == "HOLA")
+
+        ec.shutdown()
+
+    @skipIfNotAlive
+    def t_http_sources(self, host, user):
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(LinuxNode)
+        ResourceFactory.register_type(LinuxApplication)
+
+        ec = ExperimentController()
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+        ec.set(node, "cleanHome", True)
+        ec.set(node, "cleanProcesses", True)
+
+        sources = "http://nepi.inria.fr/attachment/wiki/WikiStart/pybindgen-r794.tar.gz " \
+            "http://nepi.inria.fr/attachment/wiki/WikiStart/nepi_integration_framework.pdf"
+
+        app = ec.register_resource("LinuxApplication")
+        ec.set(app, "sources", sources)
+
+        ec.register_connection(app, node)
+
+        ec.deploy()
+
+        ec.wait_finished([app])
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec.state(app) == ResourceState.FINISHED)
+
+        err = ec.trace(app, 'http_sources_err')
+        self.assertTrue(err == "")
+        
+        out = ec.trace(app, 'http_sources_out')
+        self.assertTrue(out.find("pybindgen-r794.tar.gz") > -1)
+        self.assertTrue(out.find("nepi_integration_framework.pdf") > -1)
+
+        ec.shutdown()
+
+    def test_stdout_fedora(self):
+        self.t_stdout(self.fedora_host, self.fedora_user)
+
+    def test_stdout_ubuntu(self):
+        self.t_stdout(self.ubuntu_host, self.ubuntu_user)
+
+    def test_ping_fedora(self):
+        self.t_ping(self.fedora_host, self.fedora_user)
+
+    def test_ping_ubuntu(self):
+        self.t_ping(self.ubuntu_host, self.ubuntu_user)
+
+    def test_concurrency_fedora(self):
+        self.t_concurrency(self.fedora_host, self.fedora_user)
+
+    def test_concurrency_ubuntu(self):
+        self.t_concurrency(self.ubuntu_host, self.ubuntu_user)
+
+    def test_condition_fedora(self):
+        self.t_condition(self.fedora_host, self.fedora_user, "nc")
+
+    def test_condition_ubuntu(self):
+        self.t_condition(self.ubuntu_host, self.ubuntu_user, "netcat")
+
+    def test_http_sources_fedora(self):
+        self.t_http_sources(self.fedora_host, self.fedora_user)
+
+    def test_http_sources_ubuntu(self):
+        self.t_http_sources(self.ubuntu_host, self.ubuntu_user)
+
+
+    # TODO: test compilation, sources, dependencies, etc!!!
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/interface.py b/test/resources/linux/interface.py
new file mode 100644 (file)
index 0000000..44c293a
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+from nepi.execution.ec import ExperimentController 
+from nepi.execution.resource import ResourceState
+from nepi.resources.linux.node import LinuxNode
+from nepi.resources.linux.interface import LinuxInterface
+from nepi.resources.linux.channel import LinuxChannel
+from nepi.util.sshfuncs import RUNNING, FINISHED
+
+from test_utils import skipIfNotAlive
+
+import os
+import time
+import tempfile
+import unittest
+
+class LinuxInterfaceTestCase(unittest.TestCase):
+    def setUp(self):
+        self.fedora_host = 'nepi2.pl.sophia.inria.fr'
+        self.fedora_user = 'inria_nepi'
+
+        self.ubuntu_host = 'roseval.pl.sophia.inria.fr'
+        self.ubuntu_user = 'alina'
+
+    @skipIfNotAlive
+    def t_deploy(self, host, user):
+        from nepi.execution.resource import ResourceFactory
+        
+        ResourceFactory.register_type(LinuxNode)
+        ResourceFactory.register_type(LinuxInterface)
+        ResourceFactory.register_type(LinuxChannel)
+
+        ec = ExperimentController()
+        
+        node = ec.register_resource("LinuxNode")
+        ec.set(node, "hostname", host)
+        ec.set(node, "username", user)
+
+        iface = ec.register_resource("LinuxInterface")
+        chan = ec.register_resource("LinuxChannel")
+
+        ec.register_connection(iface, node)
+        ec.register_connection(iface, chan)
+
+        ec.deploy()
+
+        while not all([ ec.state(guid) == ResourceState.STARTED \
+                for guid in [node, iface]]) and not ec.finished:
+            time.sleep(0.5)
+
+        self.assertTrue(ec.state(node) == ResourceState.STARTED)
+        self.assertTrue(ec.state(iface) == ResourceState.STARTED)
+        self.assertTrue(ec.get(iface, "deviceName") == "eth0")
+
+        ec.shutdown()
+
+    def test_deploy_fedora(self):
+        self.t_deploy(self.fedora_host, self.fedora_user)
+
+    def test_deploy_ubuntu(self):
+        self.t_deploy(self.ubuntu_host, self.ubuntu_user)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/node.py b/test/resources/linux/node.py
new file mode 100644 (file)
index 0000000..a09154b
--- /dev/null
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+from nepi.resources.linux.node import LinuxNode
+from nepi.util.sshfuncs import RUNNING, FINISHED
+
+from test_utils import skipIfNotAlive, skipInteractive, create_node
+
+import os
+import time
+import tempfile
+import unittest
+
+class LinuxNodeTestCase(unittest.TestCase):
+    def setUp(self):
+        self.fedora_host = 'nepi2.pl.sophia.inria.fr'
+        self.fedora_user = 'inria_nepi'
+
+        self.ubuntu_host = 'roseval.pl.sophia.inria.fr'
+        self.ubuntu_user = 'alina'
+        
+        self.target = 'nepi5.pl.sophia.inria.fr'
+
+    @skipIfNotAlive
+    def t_xterm(self, host, user):
+        node, ec = create_node(host, user)
+
+        node.install_packages('xterm')
+
+        (out, err), proc = node.execute('xterm', forward_x11 = True)
+        
+        self.assertEquals(out, "")
+
+        (out, err), proc = node.remove_packages('xterm')
+        
+        self.assertEquals(out, "")
+
+    @skipIfNotAlive
+    def t_execute(self, host, user):
+        node, ec = create_node(host, user)
+
+        command = "ping -qc3 %s" % self.target
+        
+        (out, err), proc = node.execute(command)
+
+        expected = """3 packets transmitted, 3 received, 0% packet loss"""
+
+        self.assertTrue(out.find(expected) > 0)
+
+    @skipIfNotAlive
+    def t_run(self, host, user):
+        node, ec = create_node(host, user)
+        
+        app_home = os.path.join(node.exp_home, "my-app")
+        node.mkdir(app_home, clean = True)
+        
+        command = "ping %s" % self.target
+        node.run(command, app_home)
+        pid, ppid = node.checkpid(app_home)
+
+        status = node.status(pid, ppid)
+        self.assertTrue(status, RUNNING)
+
+        node.kill(pid, ppid)
+        status = node.status(pid, ppid)
+        self.assertTrue(status, FINISHED)
+        
+        (out, err), proc = node.check_output(app_home, "stdout")
+
+        expected = """64 bytes from"""
+
+        self.assertTrue(out.find(expected) > 0)
+
+        node.rmdir(app_home)
+
+    @skipIfNotAlive
+    def t_install(self, host, user):
+        node, ec = create_node(host, user)
+
+        (out, err), proc = node.install_packages('gcc')
+        self.assertEquals(out, "")
+
+        (out, err), proc = node.remove_packages('gcc')
+        
+        self.assertEquals(out, "")
+
+
+    @skipIfNotAlive
+    def t_compile(self, host, user):
+        node, ec = create_node(host, user)
+
+        app_home = os.path.join(node.exp_home, "my-app")
+        node.mkdir(app_home, clean = True)
+
+        prog = """#include <stdio.h>
+
+int
+main (void)
+{
+    printf ("Hello, world!\\n");
+    return 0;
+}
+"""
+        # upload the test program
+        dst = os.path.join(app_home, "hello.c")
+        node.upload(prog, dst, text = True)
+
+        # install gcc
+        node.install_packages('gcc')
+
+        # compile the program using gcc
+        command = "cd %s; gcc -Wall hello.c -o hello" % app_home
+        (out, err), proc = node.execute(command)
+
+        # execute the program and get the output from stdout
+        command = "%s/hello" % app_home 
+        (out, err), proc = node.execute(command)
+
+        self.assertEquals(out, "Hello, world!\n")
+
+        # execute the program and get the output from a file
+        command = "%(home)s/hello > %(home)s/hello.out" % {
+                'home': app_home}
+        (out, err), proc = node.execute(command)
+
+        # retrieve the output file 
+        src = os.path.join(app_home, "hello.out")
+        f = tempfile.NamedTemporaryFile(delete=False)
+        dst = f.name
+        node.download(src, dst)
+        f.close()
+
+        node.remove_packages('gcc')
+        node.rmdir(app_home)
+
+        f = open(dst, "r")
+        out = f.read()
+        f.close()
+        
+        self.assertEquals(out, "Hello, world!\n")
+
+    def test_execute_fedora(self):
+        self.t_execute(self.fedora_host, self.fedora_user)
+
+    def test_execute_ubuntu(self):
+        self.t_execute(self.ubuntu_host, self.ubuntu_user)
+
+    def test_run_fedora(self):
+        self.t_run(self.fedora_host, self.fedora_user)
+
+    def test_run_ubuntu(self):
+        self.t_run(self.ubuntu_host, self.ubuntu_user)
+
+    def test_intall_fedora(self):
+        self.t_install(self.fedora_host, self.fedora_user)
+
+    def test_install_ubuntu(self):
+        self.t_install(self.ubuntu_host, self.ubuntu_user)
+
+    def test_compile_fedora(self):
+        self.t_compile(self.fedora_host, self.fedora_user)
+
+    def test_compile_ubuntu(self):
+        self.t_compile(self.ubuntu_host, self.ubuntu_user)
+    
+    @skipInteractive
+    def test_xterm_ubuntu(self):
+        """ Interactive test. Should not run automatically """
+        self.t_xterm(self.ubuntu_host, self.ubuntu_user)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/linux/test_utils.py b/test/resources/linux/test_utils.py
new file mode 100644 (file)
index 0000000..d915707
--- /dev/null
@@ -0,0 +1,48 @@
+from nepi.resources.linux.node import LinuxNode
+
+import os
+
+class DummyEC(object):
+    @property
+    def exp_id(self):
+        return "nepi-1"
+
+def create_node(hostname, username):
+    ec = DummyEC()
+    node = LinuxNode(ec, 1)
+    node.set("hostname", hostname)
+    node.set("username", username)
+
+    # If we don't return the reference to the EC
+    # it will be released by the garbage collector since 
+    # the resources only save a weak refernce to it.
+    return node, ec
+
+def skipIfNotAlive(func):
+    name = func.__name__
+    def wrapped(*args, **kwargs):
+        node, ec = create_node(args[1], args[2])
+
+        if not node.is_alive():
+            print "*** WARNING: Skipping test %s: Node %s is not alive\n" % (
+                name, node.get("hostname"))
+            return
+
+        return func(*args, **kwargs)
+    
+    return wrapped
+
+def skipInteractive(func):
+    name = func.__name__
+    def wrapped(*args, **kwargs):
+        mode = os.environ.get("NEPI_INTERACTIVE", False)
+        mode = mode and  mode.lower() in ['true', 'yes']
+        if not mode:
+            print "*** WARNING: Skipping test %s: Interactive mode off \n" % name
+            return
+
+        return func(*args, **kwargs)
+    
+    return wrapped
+
+
diff --git a/test/resources/ns3/ns3wrapper.py b/test/resources/ns3/ns3wrapper.py
new file mode 100644 (file)
index 0000000..815c0f1
--- /dev/null
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+# Test based on ns-3 csma/examples/csma-ping.cc file
+#
+# Network topology
+#
+#       n0    n1   n2   n3
+#       |     |    |    |
+#       -----------------
+#
+#  node n0 sends IGMP traffic to node n3
+
+
+from nepi.resources.ns3.ns3wrapper import NS3Wrapper
+
+import os.path
+import time
+import tempfile
+import unittest
+
+class NS3WrapperTest(unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def test_csma_ping(self):
+        wrapper = NS3Wrapper()
+
+        ### create 4  nodes
+        # NodeContainer c;
+        c = wrapper.create("NodeContainer")
+
+        # c.Create (4);
+        wrapper.invoke(c, "Create", 4)
+
+        ### connect the nodes to a shared channel
+        # CsmaHelper csma;
+        csma = wrapper.create("CsmaHelper")
+
+        # csma.SetChannelAttribute ("DataRate", DataRateValue (DataRate (5000000)));
+        dr = wrapper.create("DataRate", 5000000)
+        drv = wrapper.create("DataRateValue", dr)
+        wrapper.invoke(csma, "SetChannelAttribute", "DataRate", drv)
+
+        # csma.SetChannelAttribute ("Delay", TimeValue (MilliSeconds (2)));
+        ms = wrapper.create("MilliSeconds", 2)
+        delay = wrapper.create("TimeValue", ms)
+        wrapper.invoke(csma, "SetChannelAttribute", "Delay", delay)
+
+        # csma.SetDeviceAttribute ("EncapsulationMode", StringValue ("Llc"));
+        encap = wrapper.create("StringValue", "Llc")
+        wrapper.invoke(csma, "SetDeviceAttribute", "EncapsulationMode", encap)
+
+        # NetDeviceContainer devs = csma.Install (c);
+        devs = wrapper.invoke(csma, "Install", c)
+
+        ### add IP stack to all nodes
+        # InternetStackHelper ipStack;
+        ipStack = wrapper.create("InternetStackHelper")
+        
+        # ipStack.Install (c);
+        wrapper.invoke(ipStack, "Install", c)
+
+        ### assign ip addresses
+        #Ipv4AddressHelper ip;
+        ip = wrapper.create("Ipv4AddressHelper")
+
+        # ip.SetBase ("192.168.1.0", "255.255.255.0");
+        ip4 = wrapper.create("Ipv4Address", "192.168.1.0")
+        mask4 = wrapper.create("Ipv4Mask", "255.255.255.0")
+        wrapper.invoke(ip, "SetBase", ip4, mask4)
+
+        # Ipv4InterfaceContainer addresses = ip.Assign (devs);
+        addresses = wrapper.invoke(ip, "Assign", devs)
+
+        ### Create source
+        config = wrapper.singleton("Config")
+        
+        # Config::SetDefault ("ns3::Ipv4RawSocketImpl::Protocol", StringValue ("2"));
+        proto = wrapper.create("StringValue", "2")
+        wrapper.invoke(config, "SetDefault", "ns3::Ipv4RawSocketImpl::Protocol", proto)
+
+        # InetSocketAddress dst = InetSocketAddress (addresses.GetAddress (3));
+        addr3 = wrapper.invoke(addresses, "GetAddress", 3)
+        dst = wrapper.create("InetSocketAddress", addr3)
+
+        # OnOffHelper onoff = OnOffHelper ("ns3::Ipv4RawSocketFactory", dst);
+        onoff = wrapper.create("OnOffHelper", "ns3::Ipv4RawSocketFactory", dst)
+
+        # onoff.SetAttribute ("OnTime", RandomVariableValue (ConstantVariable (1.0)));
+        cv1 = wrapper.create("ConstantVariable", 1.0)
+        rand1 = wrapper.create("RandomVariableValue", cv1)
+        wrapper.invoke(onoff, "SetAttribute", "OnTime", rand1)
+
+        # onoff.SetAttribute ("OffTime", RandomVariableValue (ConstantVariable (0.0)));
+        cv2 = wrapper.create("ConstantVariable", 0.0)
+        rand2 = wrapper.create("RandomVariableValue", cv2)
+        wrapper.invoke(onoff, "SetAttribute", "OffTime", rand2)
+
+        # onoff.SetAttribute ("DataRate", DataRateValue (DataRate (15000)));
+        dr2 = wrapper.create("DataRate", 15000)
+        drv2 = wrapper.create("DataRateValue", dr2)
+        wrapper.invoke(onoff, "SetAttribute", "DataRate", drv2)
+
+        # onoff.SetAttribute ("PacketSize", UintegerValue (1200));
+        uiv = wrapper.create("UintegerValue", 1200)
+        wrapper.invoke(onoff, "SetAttribute", "PacketSize", uiv)
+
+        # ApplicationContainer apps = onoff.Install (c.Get (0));
+        n1 = wrapper.invoke(c, "Get", 0)
+        apps = wrapper.invoke(onoff, "Install", n1)
+        
+        # apps.Start (Seconds (1.0));
+        s1 = wrapper.create("Seconds", 1.0)
+        wrapper.invoke(apps, "Start", s1)
+        
+        # apps.Stop (Seconds (10.0));
+        s2 = wrapper.create("Seconds", 10.0)
+        wrapper.invoke(apps, "Stop", s2)
+
+        ### create sink
+        # PacketSinkHelper sink = PacketSinkHelper ("ns3::Ipv4RawSocketFactory", dst);
+        sink = wrapper.create("PacketSinkHelper", "ns3::Ipv4RawSocketFactory", dst)
+        
+        # apps = sink.Install (c.Get (3));
+        n3 = wrapper.invoke(c, "Get", 3)
+        apps = wrapper.invoke (sink, "Install", n3)
+        
+        # apps.Start (Seconds (0.0));
+        s3 = wrapper.create ("Seconds", 0.0)
+        wrapper.invoke (apps, "Start", s3)
+        
+        # apps.Stop (Seconds (11.0));
+        s4 = wrapper.create ("Seconds", 11.0)
+        wrapper.invoke (apps, "Stop", s4)
+
+        ### create pinger
+        #V4PingHelper ping = V4PingHelper (addresses.GetAddress (2));
+        addr2 = wrapper.invoke(addresses, "GetAddress", 2)
+        ping = wrapper.create("V4PingHelper", addr2)
+        
+        #NodeContainer pingers;
+        pingers = wrapper.create("NodeContainer")
+        
+        #pingers.Add (c.Get (0));
+        n0 = wrapper.invoke(c, "Get", 0)
+        wrapper.invoke(pingers, "Add", n0)
+        
+        #pingers.Add (c.Get (1));
+        n1 = wrapper.invoke(c, "Get", 1)
+        wrapper.invoke(pingers, "Add", n1)
+        
+        #pingers.Add (c.Get (3));
+        n3 = wrapper.invoke(c, "Get", 3)
+        wrapper.invoke(pingers, "Add", n3)
+        
+        #apps = ping.Install (pingers);
+        apps = wrapper.invoke(ping, "Install", pingers)
+        
+        #apps.Start (Seconds (2.0));
+        s5 = wrapper.create ("Seconds", 2.0)
+        wrapper.invoke (apps, "Start", s5)
+        
+        #apps.Stop (Seconds (5.0));
+        s6 = wrapper.create ("Seconds", 5.0)
+        wrapper.invoke (apps, "Stop", s6)
+
+        def SinkRx(packet, address):
+            print packet
+
+        def PingRtt(context, rtt):
+            print context, rtt
+
+        ### configure tracing
+        #csma.EnablePcapAll ("csma-ping", false);
+        wrapper.invoke(csma, "EnablePcapAll", "csma-ping", False)
+       
+        # No binging for callback
+        #Config::ConnectWithoutContext ("/NodeList/3/ApplicationList/0/$ns3::PacketSink/Rx", 
+        # MakeCallback (&SinkRx));
+        #cb = wrapper.create("MakeCallback", SinkRx)
+        #wrapper.invoke(config, "ConnectWithoutContext", 
+        #        "/NodeList/3/ApplicationList/0/$ns3::PacketSink/Rx", cb)
+
+        # Config::Connect ("/NodeList/*/ApplicationList/*/$ns3::V4Ping/Rtt", 
+        # MakeCallback (&PingRtt));
+        #cb2 = wrapper.create("MakeCallback", PingRtt)
+        #wrapper.invoke(config, "ConnectWithoutContext", 
+        #        "/NodeList/*/ApplicationList/*/$ns3::V4Ping/Rtt", 
+        #        cb2)
+
+        # Packet::EnablePrinting ();
+        packet = wrapper.singleton("Packet")
+        wrapper.invoke(packet, "EnablePrinting")
+
+        ### run Simulation
+        # Simulator::Run ();
+        simulator = wrapper.singleton("Simulator")
+        wrapper.invoke(simulator, "Run")
+
+        # Simulator::Destroy ();
+        wrapper.invoke(simulator, "Destroy")
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/resources/omf/omf_vlc_exp.py b/test/resources/omf/omf_vlc_exp.py
new file mode 100755 (executable)
index 0000000..d9ab6c5
--- /dev/null
@@ -0,0 +1,262 @@
+#!/usr/bin/env python
+from nepi.execution.resource import ResourceFactory, ResourceManager, ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+
+from nepi.resources.omf.omf_node import OMFNode
+from nepi.resources.omf.omf_application import OMFApplication
+from nepi.resources.omf.omf_interface import OMFWifiInterface
+from nepi.resources.omf.omf_channel import OMFChannel
+from nepi.resources.omf.omf_api import OMFAPIFactory
+
+from nepi.util import guid
+from nepi.util.timefuncs import *
+
+import time
+import unittest
+import logging
+
+logging.basicConfig()
+
+
+class DummyEC(ExperimentController):
+    pass
+
+class DummyRM(ResourceManager):
+    pass
+
+
+class OMFResourceFactoryTestCase(unittest.TestCase):
+
+    def test_creation_phase(self):
+        ResourceFactory.register_type(OMFNode)
+        ResourceFactory.register_type(OMFWifiInterface)
+        ResourceFactory.register_type(OMFChannel)
+        ResourceFactory.register_type(OMFApplication)
+
+        self.assertEquals(OMFNode.rtype(), "OMFNode")
+        self.assertEquals(len(OMFNode._attributes), 7)
+
+        self.assertEquals(OMFWifiInterface.rtype(), "OMFWifiInterface")
+        self.assertEquals(len(OMFWifiInterface._attributes), 9)
+
+        self.assertEquals(OMFChannel.rtype(), "OMFChannel")
+        self.assertEquals(len(OMFChannel._attributes), 5)
+
+        self.assertEquals(OMFApplication.rtype(), "OMFApplication")
+        self.assertEquals(len(OMFApplication._attributes), 8)
+
+        self.assertEquals(len(ResourceFactory.resource_types()), 4)
+
+
+class OMFVLCTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.ec = DummyEC()
+        ResourceFactory.register_type(OMFNode)
+        ResourceFactory.register_type(OMFWifiInterface)
+        ResourceFactory.register_type(OMFChannel)
+        ResourceFactory.register_type(OMFApplication)
+
+    def tearDown(self):
+        self.ec.shutdown()
+
+    def test_creation_and_configuration_node(self):
+
+        node1 = self.ec.register_resource("OMFNode")
+        self.ec.set(node1, 'hostname', 'omf.plexus.wlab17')
+        self.ec.set(node1, 'xmppSlice', "nepi")
+        self.ec.set(node1, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(node1, 'xmppPort', "5222")
+        self.ec.set(node1, 'xmppPassword', "1234")
+
+        self.assertEquals(self.ec.get(node1, 'hostname'), 'omf.plexus.wlab17')
+        self.assertEquals(self.ec.get(node1, 'xmppSlice'), 'nepi')
+        self.assertEquals(self.ec.get(node1, 'xmppHost'), 'xmpp-plexus.onelab.eu')
+        self.assertEquals(self.ec.get(node1, 'xmppPort'), '5222')
+        self.assertEquals(self.ec.get(node1, 'xmppPassword'), '1234')
+
+    def test_creation_and_configuration_interface(self):
+
+        iface1 = self.ec.register_resource("OMFWifiInterface")
+        self.ec.set(iface1, 'alias', "w0")
+        self.ec.set(iface1, 'mode', "adhoc")
+        self.ec.set(iface1, 'type', "g")
+        self.ec.set(iface1, 'essid', "vlcexp")
+        self.ec.set(iface1, 'ip', "10.0.0.17")
+        self.ec.set(iface1, 'xmppSlice', "nepi")
+        self.ec.set(iface1, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(iface1, 'xmppPort', "5222")
+        self.ec.set(iface1, 'xmppPassword', "1234")
+
+        self.assertEquals(self.ec.get(iface1, 'alias'), 'w0')
+        self.assertEquals(self.ec.get(iface1, 'mode'), 'adhoc')
+        self.assertEquals(self.ec.get(iface1, 'type'), 'g')
+        self.assertEquals(self.ec.get(iface1, 'essid'), 'vlcexp')
+        self.assertEquals(self.ec.get(iface1, 'ip'), '10.0.0.17')
+        self.assertEquals(self.ec.get(iface1, 'xmppSlice'), 'nepi')
+        self.assertEquals(self.ec.get(iface1, 'xmppHost'), 'xmpp-plexus.onelab.eu')
+        self.assertEquals(self.ec.get(iface1, 'xmppPort'), '5222')
+        self.assertEquals(self.ec.get(iface1, 'xmppPassword'), '1234')
+
+    def test_creation_and_configuration_channel(self):
+
+        channel = self.ec.register_resource("OMFChannel")
+        self.ec.set(channel, 'channel', "6")
+        self.ec.set(channel, 'xmppSlice', "nepi")
+        self.ec.set(channel, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(channel, 'xmppPort', "5222")
+        self.ec.set(channel, 'xmppPassword', "1234")
+
+        self.assertEquals(self.ec.get(channel, 'channel'), '6')
+        self.assertEquals(self.ec.get(channel, 'xmppSlice'), 'nepi')
+        self.assertEquals(self.ec.get(channel, 'xmppHost'), 'xmpp-plexus.onelab.eu')
+        self.assertEquals(self.ec.get(channel, 'xmppPort'), '5222')
+        self.assertEquals(self.ec.get(channel, 'xmppPassword'), '1234')
+
+    def test_creation_and_configuration_application(self):
+
+        app1 = self.ec.register_resource("OMFApplication")
+        self.ec.set(app1, 'appid', 'Vlc#1')
+        self.ec.set(app1, 'path', "/opt/vlc-1.1.13/cvlc")
+        self.ec.set(app1, 'args', "/opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
+        self.ec.set(app1, 'env', "DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority")
+        self.ec.set(app1, 'xmppSlice', "nepi")
+        self.ec.set(app1, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(app1, 'xmppPort', "5222")
+        self.ec.set(app1, 'xmppPassword', "1234")
+
+        self.assertEquals(self.ec.get(app1, 'appid'), 'Vlc#1')
+        self.assertEquals(self.ec.get(app1, 'path'), '/opt/vlc-1.1.13/cvlc')
+        self.assertEquals(self.ec.get(app1, 'args'), "/opt/10-by-p0d.avi --sout '#rtp{dst=10.0.0.37,port=1234,mux=ts}'")
+        self.assertEquals(self.ec.get(app1, 'env'), 'DISPLAY=localhost:10.0 XAUTHORITY=/root/.Xauthority')
+        self.assertEquals(self.ec.get(app1, 'xmppSlice'), 'nepi')
+        self.assertEquals(self.ec.get(app1, 'xmppHost'), 'xmpp-plexus.onelab.eu')
+        self.assertEquals(self.ec.get(app1, 'xmppPort'), '5222')
+        self.assertEquals(self.ec.get(app1, 'xmppPassword'), '1234')
+
+    def test_connection(self):
+
+        node1 = self.ec.register_resource("OMFNode")
+        iface1 = self.ec.register_resource("OMFWifiInterface")
+        channel = self.ec.register_resource("OMFChannel")
+        app1 = self.ec.register_resource("OMFApplication")
+        app2 = self.ec.register_resource("OMFApplication")
+
+        self.ec.register_connection(app1, node1)
+        self.ec.register_connection(app2, node1)
+        self.ec.register_connection(node1, iface1)
+        self.ec.register_connection(iface1, channel)
+
+        self.assertEquals(len(self.ec.get_resource(node1).connections), 3)
+        self.assertEquals(len(self.ec.get_resource(iface1).connections), 2)
+        self.assertEquals(len(self.ec.get_resource(channel).connections), 1)
+        self.assertEquals(len(self.ec.get_resource(app1).connections), 1)
+        self.assertEquals(len(self.ec.get_resource(app2).connections), 1)
+
+    def test_condition(self):
+
+        node1 = self.ec.register_resource("OMFNode")
+        iface1 = self.ec.register_resource("OMFWifiInterface")
+        channel = self.ec.register_resource("OMFChannel")
+        app1 = self.ec.register_resource("OMFApplication")
+        app2 = self.ec.register_resource("OMFApplication")
+
+        self.ec.register_connection(app1, node1)
+        self.ec.register_connection(app2, node1)
+        self.ec.register_connection(node1, iface1)
+        self.ec.register_connection(iface1, channel)
+
+        self.ec.register_condition(app2, ResourceAction.START, app1, ResourceState.STARTED , "4s")
+
+        self.assertEquals(len(self.ec.get_resource(app2).conditions), 1)
+
+    def test_deploy(self):
+        node1 = self.ec.register_resource("OMFNode")
+        self.ec.set(node1, 'hostname', 'omf.plexus.wlab17')
+        self.ec.set(node1, 'xmppSlice', "nepi")
+        self.ec.set(node1, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(node1, 'xmppPort', "5222")
+        self.ec.set(node1, 'xmppPassword', "1234")
+        
+        iface1 = self.ec.register_resource("OMFWifiInterface")
+        self.ec.set(iface1, 'alias', "w0")
+        self.ec.set(iface1, 'mode', "adhoc")
+        self.ec.set(iface1, 'type', "g")
+        self.ec.set(iface1, 'essid', "vlcexp")
+        self.ec.set(iface1, 'ip', "10.0.0.17")
+        self.ec.set(iface1, 'xmppSlice', "nepi")
+        self.ec.set(iface1, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(iface1, 'xmppPort', "5222")
+        self.ec.set(iface1, 'xmppPassword', "1234")
+        
+        channel = self.ec.register_resource("OMFChannel")
+        self.ec.set(channel, 'channel', "6")
+        self.ec.set(channel, 'xmppSlice', "nepi")
+        self.ec.set(channel, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(channel, 'xmppPort', "5222")
+        self.ec.set(channel, 'xmppPassword', "1234")
+        
+        app1 = self.ec.register_resource("OMFApplication")
+        self.ec.set(app1, 'xmppSlice', "nepi")
+        self.ec.set(app1, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(app1, 'xmppPort', "5222")
+        self.ec.set(app1, 'xmppPassword', "1234")
+
+        app2 = self.ec.register_resource("OMFApplication")
+        self.ec.set(app2, 'xmppSlice', "nepi")
+        self.ec.set(app2, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(app2, 'xmppPort', "5222")
+        self.ec.set(app2, 'xmppPassword', "1234")
+
+        app3 = self.ec.register_resource("OMFApplication")
+        self.ec.set(app3, 'xmppSlice', "nepi")
+        self.ec.set(app3, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(app3, 'xmppPort', "5222")
+        self.ec.set(app3, 'xmppPassword', "1234")
+
+        app4 = self.ec.register_resource("OMFApplication")
+        self.ec.set(app4, 'xmppSlice', "nepi")
+        self.ec.set(app4, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(app4, 'xmppPort', "5222")
+        self.ec.set(app4, 'xmppPassword', "1234")
+
+        app5 = self.ec.register_resource("OMFApplication")
+        self.ec.set(app5, 'xmppSlice', "nepi")
+        self.ec.set(app5, 'xmppHost', "xmpp-plexus.onelab.eu")
+        self.ec.set(app5, 'xmppPort', "5222")
+        self.ec.set(app5, 'xmppPassword', "1234")
+
+        self.ec.register_connection(app1, node1)
+        self.ec.register_connection(app2, node1)
+        self.ec.register_connection(app3, node1)
+        self.ec.register_connection(app4, node1)
+        self.ec.register_connection(app5, node1)
+        self.ec.register_connection(node1, iface1)
+        self.ec.register_connection(iface1, channel)
+
+        self.ec.register_condition(app2, ResourceAction.START, app1, ResourceState.STARTED , "3s")
+        self.ec.register_condition(app3, ResourceAction.START, app2, ResourceState.STARTED , "2s")
+        self.ec.register_condition(app4, ResourceAction.START, app3, ResourceState.STARTED , "3s")
+        self.ec.register_condition(app5, ResourceAction.START, [app3, app2], ResourceState.STARTED , "2s")
+        self.ec.register_condition(app5, ResourceAction.START, app1, ResourceState.STARTED , "1m20s")
+
+        self.ec.deploy()
+        time.sleep(150)
+
+        self.assertEquals(round(strfdiff(self.ec.get_resource(app2).start_time, self.ec.get_resource(app1).start_time),1), 3.0)
+        self.assertEquals(round(strfdiff(self.ec.get_resource(app3).start_time, self.ec.get_resource(app2).start_time),1), 2.0)
+        self.assertEquals(round(strfdiff(self.ec.get_resource(app4).start_time, self.ec.get_resource(app3).start_time),1), 3.0)
+        self.assertEquals(round(strfdiff(self.ec.get_resource(app5).start_time, self.ec.get_resource(app3).start_time),1), 2.0)
+        self.assertEquals(round(strfdiff(self.ec.get_resource(app5).start_time, self.ec.get_resource(app1).start_time),1), 7.0)
+
+        # Precision is at 1/10. So this one returns an error 7.03 != 7.0
+        #self.assertEquals(strfdiff(self.ec.get_resource(app5).start_time, self.ec.get_resource(app1).start_time), 7)
+    #In order to release everythings
+        time.sleep(5)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
diff --git a/test/testbeds/netns/design.py b/test/testbeds/netns/design.py
deleted file mode 100755 (executable)
index b545e6f..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-import os
-import shutil
-import test_util
-import unittest
-import uuid
-
-class NetnsDesignTestCase(unittest.TestCase):
-    def test_design_if(self):
-        testbed_id = "netns"
-        exp_desc = ExperimentDescription()
-        provider = FactoriesProvider(testbed_id)
-        
-        tstbd_desc = exp_desc.add_testbed_description(provider)
-        tstbd_desc.set_attribute_value("enableDebug", True)
-        node1 = tstbd_desc.create("Node")
-        node2 = tstbd_desc.create("Node")
-        iface1 = tstbd_desc.create("NodeInterface")
-        iface1.set_attribute_value("up", True)
-        node1.connector("devs").connect(iface1.connector("node"))
-        ip1 = iface1.add_address()
-        ip1.set_attribute_value("Address", "10.0.0.1")
-        iface2 = tstbd_desc.create("NodeInterface")
-        iface2.set_attribute_value("up", True)
-        node2.connector("devs").connect(iface2.connector("node"))
-        ip2 = iface2.add_address()
-        ip2.set_attribute_value("Address", "10.0.0.2")
-        switch = tstbd_desc.create("Switch")
-        switch.set_attribute_value("up", True)
-        iface1.connector("switch").connect(switch.connector("devs"))
-        iface2.connector("switch").connect(switch.connector("devs"))
-        app = tstbd_desc.create("Application")
-        app.set_attribute_value("command", "ping -qc10 10.0.0.2")
-        app.connector("node").connect(node1.connector("apps"))
-        
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-        
-if __name__ == '__main__':
-    unittest.main()
diff --git a/test/testbeds/netns/execute.py b/test/testbeds/netns/execute.py
deleted file mode 100755 (executable)
index 5e361f1..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.util.constants import ApplicationStatus as AS
-from nepi.testbeds import netns
-import os
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class NetnsExecuteTestCase(unittest.TestCase):
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_run_ping_if(self):
-        user = getpass.getuser()
-        instance = netns.TestbedController()
-        instance.defer_configure("homeDirectory", self.root_dir)
-        instance.defer_create(2, "Node")
-        instance.defer_create(3, "Node")
-        instance.defer_create(4, "NodeInterface")
-        instance.defer_create_set(4, "up", True)
-        instance.defer_connect(2, "devs", 4, "node")
-        instance.defer_add_address(4, "10.0.0.1", 24, None)
-        instance.defer_create(5, "NodeInterface")
-        instance.defer_create_set(5, "up", True)
-        instance.defer_connect(3, "devs", 5, "node")
-        instance.defer_add_address(5, "10.0.0.2", 24, None)
-        instance.defer_create(6, "Switch")
-        instance.defer_create_set(6, "up", True)
-        instance.defer_connect(4, "switch", 6, "devs")
-        instance.defer_connect(5, "switch", 6, "devs")
-        instance.defer_create(7, "Application")
-        instance.defer_create_set(7, "command", "ping -qc1 10.0.0.2")
-        instance.defer_create_set(7, "user", user)
-        instance.defer_add_trace(7, "stdout")
-        instance.defer_connect(7, "node", 2, "apps")
-
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-        instance.start()
-
-        while instance.status(7) != AS.STATUS_FINISHED:
-            time.sleep(0.5)
-        ping_result = instance.trace(7, "stdout")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(ping_result.startswith(comp_result))
-        instance.stop()
-        instance.shutdown()
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_run_ping_p2pif(self):
-        user = getpass.getuser()
-        instance = netns.TestbedController()
-        instance.defer_configure("homeDirectory", self.root_dir)
-        instance.defer_create(2, "Node")
-        instance.defer_create(3, "Node")
-        instance.defer_create(4, "P2PNodeInterface")
-        instance.defer_create_set(4, "up", True)
-        instance.defer_connect(2, "devs", 4, "node")
-        instance.defer_add_address(4, "10.0.0.1", 24, None)
-        instance.defer_create(5, "P2PNodeInterface")
-        instance.defer_create_set(5, "up", True)
-        instance.defer_connect(3, "devs", 5, "node")
-        instance.defer_add_address(5, "10.0.0.2", 24, None)
-        instance.defer_connect(4, "p2p", 5, "p2p")
-        instance.defer_create(6, "Application")
-        instance.defer_create_set(6, "command", "ping -qc1 10.0.0.2")
-        instance.defer_create_set(6, "user", user)
-        instance.defer_add_trace(6, "stdout")
-        instance.defer_connect(6, "node", 2, "apps")
-
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-        instance.start()
-
-        while instance.status(6) != AS.STATUS_FINISHED:
-            time.sleep(0.5)
-        ping_result = instance.trace(6, "stdout")
-        comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(ping_result.startswith(comp_result))
-        instance.stop()
-        instance.shutdown()
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_run_ping_routing(self):
-        user = getpass.getuser()
-        instance = netns.TestbedController()
-        instance.defer_configure("homeDirectory", self.root_dir)
-        #instance.defer_configure("enableDebug", True)
-        instance.defer_create(2, "Node")
-        instance.defer_create(3, "Node")
-        instance.defer_create(4, "Node")
-        instance.defer_create(5, "NodeInterface")
-        instance.defer_create_set(5, "up", True)
-        instance.defer_connect(2, "devs", 5, "node")
-        instance.defer_add_address(5, "10.0.0.1", 24, None)
-        instance.defer_create(6, "NodeInterface")
-        instance.defer_create_set(6, "up", True)
-        instance.defer_connect(3, "devs", 6, "node")
-        instance.defer_add_address(6, "10.0.0.2", 24, None)
-        instance.defer_create(7, "NodeInterface")
-        instance.defer_create_set(7, "up", True)
-        instance.defer_connect(3, "devs", 7, "node")
-        instance.defer_add_address(7, "10.0.1.1", 24, None)
-        instance.defer_create(8, "NodeInterface")
-        instance.defer_create_set(8, "up", True)
-        instance.defer_connect(4, "devs", 8, "node")
-        instance.defer_add_address(8, "10.0.1.2", 24, None)
-        instance.defer_create(9, "Switch")
-        instance.defer_create_set(9, "up", True)
-        instance.defer_connect(5, "switch", 9, "devs")
-        instance.defer_connect(6, "switch", 9, "devs")
-        instance.defer_create(10, "Switch")
-        instance.defer_create_set(10, "up", True)
-        instance.defer_connect(7, "switch", 10, "devs")
-        instance.defer_connect(8, "switch", 10, "devs")
-        instance.defer_create(11, "Application")
-        instance.defer_create_set(11, "command", "ping -qc1 10.0.1.2")
-        instance.defer_create_set(11, "user", user)
-        instance.defer_add_trace(11, "stdout")
-        instance.defer_connect(11, "node", 2, "apps")
-
-        instance.defer_add_route(2, "10.0.1.0", 24, "10.0.0.2")
-        instance.defer_add_route(4, "10.0.0.0", 24, "10.0.1.1")
-
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-        instance.start()
-
-        while instance.status(11) != AS.STATUS_FINISHED:
-            time.sleep(0.5)
-        ping_result = instance.trace(11, "stdout")
-        comp_result = """PING 10.0.1.2 (10.0.1.2) 56(84) bytes of data.
-
---- 10.0.1.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-        self.assertTrue(ping_result.startswith(comp_result))
-        instance.stop()
-        instance.shutdown()
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_node_pcap_trace(self):
-        user = getpass.getuser()
-        instance = netns.TestbedController()
-        instance.defer_configure("homeDirectory", self.root_dir)
-        instance.defer_create(2, "Node")
-        instance.defer_add_trace(2, "pcap")
-        instance.defer_create(3, "Node")
-        instance.defer_create(4, "P2PNodeInterface")
-        instance.defer_create_set(4, "up", True)
-        instance.defer_connect(2, "devs", 4, "node")
-        instance.defer_add_address(4, "10.0.0.1", 24, None)
-        instance.defer_create(5, "P2PNodeInterface")
-        instance.defer_create_set(5, "up", True)
-        instance.defer_connect(3, "devs", 5, "node")
-        instance.defer_add_address(5, "10.0.0.2", 24, None)
-        instance.defer_connect(4, "p2p", 5, "p2p")
-        instance.defer_create(6, "Application")
-        instance.defer_add_trace(6, "stdout")
-        instance.defer_create_set(6, "command", "ping -qc5 10.0.0.2")
-        instance.defer_create_set(6, "user", user)
-        instance.defer_connect(6, "node", 2, "apps")
-
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-        instance.start()
-
-        while instance.status(6) != AS.STATUS_FINISHED:
-            time.sleep(0.5)
-        
-        pcap_result = instance.trace(2, "pcap")
-        self.assertEquals(len(pcap_result), 1024)
-        instance.stop()
-        instance.shutdown()
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/netns/integration.py b/test/testbeds/netns/integration.py
deleted file mode 100755 (executable)
index 6da9567..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class NetnsIntegrationTestCase(unittest.TestCase):
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-
-    def _test_switched(self, controller_access_config = None,
-            testbed_access_config = None):
-        testbed_id = "netns"
-        exp_desc = ExperimentDescription()
-        user = getpass.getuser()
-        netns_provider = FactoriesProvider(testbed_id)
-        
-        netns_desc = exp_desc.add_testbed_description(netns_provider)
-        netns_desc.set_attribute_value("homeDirectory", self.root_dir)
-        #netns_desc.set_attribute_value("enableDebug", True)
-        node1 = netns_desc.create("Node")
-        node2 = netns_desc.create("Node")
-        iface1 = netns_desc.create("NodeInterface")
-        iface1.set_attribute_value("up", True)
-        node1.connector("devs").connect(iface1.connector("node"))
-        ip1 = iface1.add_address()
-        ip1.set_attribute_value("Address", "10.0.0.1")
-        iface2 = netns_desc.create("NodeInterface")
-        iface2.set_attribute_value("up", True)
-        node2.connector("devs").connect(iface2.connector("node"))
-        ip2 = iface2.add_address()
-        ip2.set_attribute_value("Address", "10.0.0.2")
-        switch = netns_desc.create("Switch")
-        switch.set_attribute_value("up", True)
-        iface1.connector("switch").connect(switch.connector("devs"))
-        iface2.connector("switch").connect(switch.connector("devs"))
-        app = netns_desc.create("Application")
-        app.set_attribute_value("command", "ping -qc1 10.0.0.2")
-        app.set_attribute_value("user", user)
-        app.connector("node").connect(node1.connector("apps"))
-        app.enable_trace("stdout")
-
-        if testbed_access_config:
-            for attr in testbed_access_config.attributes:
-                if attr.value:
-                    netns_desc.set_attribute_value(attr.name, attr.value)
-
-        xml = exp_desc.to_xml()
-
-        controller = proxy.create_experiment_controller(xml, 
-                controller_access_config)
-        
-        try:
-            controller.start()
-            while not controller.is_finished(app.guid):
-                time.sleep(0.5)
-            ping_result = controller.trace(app.guid, "stdout")
-            comp_result = """PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-
---- 10.0.0.2 ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time 0ms
-"""
-            self.assertTrue(ping_result.startswith(comp_result))
-        finally:
-            controller.stop()
-            controller.shutdown()
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_switched(self):
-        self._test_switched()
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_daemonized_controller(self):
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        
-        self._test_switched(controller_access_config = access_config)
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_daemonized_tbd(self):
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        self._test_switched(testbed_access_config = access_config)
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_daemonized_all(self):
-        controller_access_config = proxy.AccessConfiguration()
-        controller_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        controller_access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        controller_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-        testbed_access_config = proxy.AccessConfiguration()
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        testbed_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        testbed_access_config.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        self._test_switched(
-                controller_access_config = controller_access_config,
-                testbed_access_config = testbed_access_config)
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_ssh_daemonized_tbd(self):
-        env = test_util.test_environment()
-
-        testbed_access_config = proxy.AccessConfiguration()
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        testbed_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        testbed_access_config.set_attribute_value(DC.USE_AGENT, True)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        testbed_access_config.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        self._test_switched(
-                testbed_access_config = testbed_access_config)
-
-    def DISABLED_test_sudo_daemonized_tbd(self):
-        env = test_util.test_environment()
-
-        testbed_access_config = proxy.AccessConfiguration()
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        testbed_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        testbed_access_config.set_attribute_value(DC.USE_AGENT, True)
-        testbed_access_config.set_attribute_value(DC.USE_SUDO, True)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        testbed_access_config.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        self._test_switched(
-                testbed_access_config = testbed_access_config)
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_ssh_daemonized_all(self):
-        env = test_util.test_environment()
-
-        controller_access_config = proxy.AccessConfiguration()
-        controller_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        controller_access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        controller_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        controller_access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        controller_access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        controller_access_config.set_attribute_value(DC.USE_AGENT, True)
-
-        testbed_access_config = proxy.AccessConfiguration()
-        testbed_access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        testbed_access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        # BUG! IT DOESN'T WORK WITH 2 LEVELS OF SSH!
-        #testbed_access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        #testbed_access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        #testbed_access_config.set_attribute_value(DC.USE_AGENT, True)
-        inst_root_dir = os.path.join(self.root_dir, "instance")
-        os.mkdir(inst_root_dir)
-        testbed_access_config.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-        
-        self._test_switched(
-                controller_access_config = controller_access_config,
-                testbed_access_config = testbed_access_config)
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/ns3/design.py b/test/testbeds/ns3/design.py
deleted file mode 100755 (executable)
index 8d034cd..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-import os
-import shutil
-import test_util
-import unittest
-import uuid
-
-class Ns3DesignTestCase(unittest.TestCase):
-    def test_design_if(self):
-        testbed_id = "ns3"
-        exp_desc = ExperimentDescription()
-        provider = FactoriesProvider(testbed_id)
-        tstbd_desc = exp_desc.add_testbed_description(provider)
-
-        node1 = tstbd_desc.create("ns3::Node")
-        ipv41 = tstbd_desc.create("ns3::Ipv4L3Protocol")
-        arp1  = tstbd_desc.create("ns3::ArpL3Protocol")
-        icmp1 = tstbd_desc.create("ns3::Icmpv4L4Protocol")
-        udp1 = tstbd_desc.create("ns3::UdpL4Protocol")
-        node1.connector("protos").connect(ipv41.connector("node"))
-        node1.connector("protos").connect(arp1.connector("node"))
-        node1.connector("protos").connect(icmp1.connector("node"))
-        node1.connector("protos").connect(udp1.connector("node"))
-        iface1 = tstbd_desc.create("ns3::PointToPointNetDevice")
-        queue1 = tstbd_desc.create("ns3::DropTailQueue")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface1.connector("queue").connect(queue1.connector("dev"))
-        trace1 = iface1.enable_trace("P2PPcapTrace")
-        ip1 = iface1.add_address()
-        ip1.set_attribute_value("Address", "10.0.0.1")
-
-        node2 = tstbd_desc.create("ns3::Node")
-        ipv42 = tstbd_desc.create("ns3::Ipv4L3Protocol")
-        arp2  = tstbd_desc.create("ns3::ArpL3Protocol")
-        icmp2 = tstbd_desc.create("ns3::Icmpv4L4Protocol")
-        udp2 = tstbd_desc.create("ns3::UdpL4Protocol")
-        node2.connector("protos").connect(ipv42.connector("node"))
-        node2.connector("protos").connect(arp2.connector("node"))
-        node2.connector("protos").connect(icmp2.connector("node"))
-        node2.connector("protos").connect(udp2.connector("node"))
-        iface2 = tstbd_desc.create("ns3::PointToPointNetDevice")
-        queue2 = tstbd_desc.create("ns3::DropTailQueue")
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface2.connector("queue").connect(queue2.connector("dev"))
-        trace2 = iface2.enable_trace("P2PPcapTrace")
-        ip2 = iface2.add_address()
-        ip2.set_attribute_value("Address", "10.0.0.2")
-
-        chan = tstbd_desc.create("ns3::PointToPointChannel")
-        iface1.connector("chan").connect(chan.connector("dev2"))
-        iface2.connector("chan").connect(chan.connector("dev2"))
-
-        app = tstbd_desc.create("ns3::V4Ping")
-        app.set_attribute_value("Remote", "10.0.0.2")
-        app.set_attribute_value("StartTime", "0s")
-        app.set_attribute_value("StopTime", "20s")
-        app.set_attribute_value("Verbose", False)
-        app.connector("node").connect(node1.connector("apps"))
-
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-        
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/ns3/execute.py b/test/testbeds/ns3/execute.py
deleted file mode 100755 (executable)
index 9a6e236..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-
-from nepi.util.constants import ApplicationStatus as AS
-from nepi.testbeds import ns3
-import os
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class Ns3ExecuteTestCase(unittest.TestCase):
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-
-    @test_util.skipUnless(test_util.ns3_usable(), 
-           "Test requires working ns-3 bindings")
-    def test_run_ping_if(self):
-        instance = ns3.TestbedController()
-        instance.defer_configure("homeDirectory", self.root_dir)
-        instance.defer_create(2, "ns3::Node")
-        instance.defer_create(3, "ns3::Ipv4L3Protocol")
-        instance.defer_create(4, "ns3::ArpL3Protocol")
-        instance.defer_create(5, "ns3::Icmpv4L4Protocol")
-        instance.defer_create(6, "ns3::UdpL4Protocol")
-        instance.defer_connect(2, "protos", 3, "node")
-        instance.defer_connect(2, "protos", 4, "node")
-        instance.defer_connect(2, "protos", 5, "node")
-        instance.defer_connect(2, "protos", 6, "node")
-        instance.defer_create(7, "ns3::PointToPointNetDevice")
-        instance.defer_create(8, "ns3::DropTailQueue")
-        instance.defer_connect(2, "devs", 7, "node")
-        instance.defer_connect(7, "queue", 8, "dev")
-        instance.defer_add_trace(7, "P2PAsciiTrace")
-        instance.defer_add_address(7, "10.0.0.1", 24, None)
-
-        instance.defer_create(9, "ns3::Node")
-        instance.defer_create(10, "ns3::Ipv4L3Protocol")
-        instance.defer_create(11, "ns3::ArpL3Protocol")
-        instance.defer_create(12, "ns3::Icmpv4L4Protocol")
-        instance.defer_create(13, "ns3::UdpL4Protocol")
-        instance.defer_connect(9, "protos", 10, "node")
-        instance.defer_connect(9, "protos", 11, "node")
-        instance.defer_connect(9, "protos", 12, "node")
-        instance.defer_connect(9, "protos", 13, "node")
-        instance.defer_create(14, "ns3::PointToPointNetDevice")
-        instance.defer_create(15, "ns3::DropTailQueue")
-        instance.defer_connect(9, "devs", 14, "node")
-        instance.defer_connect(14, "queue", 15, "dev")
-        instance.defer_add_trace(14, "P2PAsciiTrace")
-        instance.defer_add_address(14, "10.0.0.2", 24, None)
-
-        instance.defer_create(16, "ns3::PointToPointChannel")
-        instance.defer_connect(7, "chan", 16, "dev2")
-        instance.defer_connect(14, "chan", 16, "dev2")
-
-        instance.defer_create(17, "ns3::V4Ping")
-        instance.defer_create_set(17, "Remote", "10.0.0.2")
-        instance.defer_create_set(17, "StartTime", "0s")
-        instance.defer_create_set(17, "StopTime", "10s")
-        instance.defer_add_trace(17, "Rtt")
-        instance.defer_connect(17, "node", 2, "apps")
-
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-        instance.start()
-
-        while instance.status(17) != AS.STATUS_FINISHED:
-            time.sleep(0.1)
-        ping_result = instance.trace(14, "P2PAsciiTrace")
-        ping_rtt = instance.trace(17, "Rtt")
-        comp_result = "- 9.021 /NodeList/1/DeviceList/0/$ns3::PointToPointNetDevice/TxQueue/Dequeue ns3::PppHeader (Point-to-Point Protocol: IP (0x0021)) ns3::Ipv4Header (tos 0x0 DSCP Default ECN Not-ECT ttl 64 id 9 protocol 1 offset (bytes) 0 flags [none] length: 84 10.0.0.2 > 10.0.0.1) ns3::Icmpv4Header (type=0, code=0) ns3::Icmpv4Echo (identifier=0, sequence=9)"
-        comp_rtt_result = """+41992186.0ns\t+41992186.0ns
-+1041992186.0ns\t+41992186.0ns
-+2041992186.0ns\t+41992186.0ns
-+3041992186.0ns\t+41992186.0ns
-+4041992186.0ns\t+41992186.0ns
-+5041992186.0ns\t+41992186.0ns
-+6041992186.0ns\t+41992186.0ns
-+7041992186.0ns\t+41992186.0ns
-+8041992186.0ns\t+41992186.0ns
-+9041992186.0ns\t+41992186.0ns
-"""
-
-        self.assertNotEqual(ping_result.find(comp_result), -1)
-        self.assertEqual(ping_rtt.strip(), comp_rtt_result.strip())
-        instance.stop()
-        instance.shutdown()
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/ns3/execute2.py b/test/testbeds/ns3/execute2.py
deleted file mode 100755 (executable)
index f193af8..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.util.constants import ApplicationStatus as AS
-from nepi.testbeds import ns3
-import os
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-# The reason execute tests are run in separate scripts for ns3, is that the 
-# same ns3 Simulator will be loaded only once per process, resulting in a 
-# dirty state of the Simulator after the first test is executed.
-# As it is not possible to reset the state of the Simulator to the original
-# one, different tests should be executed in different processes (different 
-# unittest instance)
-class Ns3ExecuteTestCase(unittest.TestCase):
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-
-    @test_util.skipUnless(test_util.ns3_usable(),
-            "Test requires working ns-3 bindings")
-    def test_run_ping_routing(self):
-        instance = ns3.TestbedController()
-        instance.defer_configure("homeDirectory", self.root_dir)
-        instance.defer_create(2, "ns3::Node")
-        instance.defer_create(3, "ns3::Ipv4L3Protocol")
-        instance.defer_create(4, "ns3::ArpL3Protocol")
-        instance.defer_create(5, "ns3::Icmpv4L4Protocol")
-        instance.defer_create(6, "ns3::UdpL4Protocol")
-        instance.defer_connect(2, "protos", 3, "node")
-        instance.defer_connect(2, "protos", 4, "node")
-        instance.defer_connect(2, "protos", 5, "node")
-        instance.defer_connect(2, "protos", 6, "node")
-        instance.defer_create(7, "ns3::PointToPointNetDevice")
-        instance.defer_create(8, "ns3::DropTailQueue")
-        instance.defer_connect(2, "devs", 7, "node")
-        instance.defer_connect(7, "queue", 8, "dev")
-        instance.defer_add_trace(7, "P2PAsciiTrace")
-        instance.defer_add_address(7, "10.0.0.1", 24, None)
-
-        instance.defer_create(9, "ns3::Node")
-        instance.defer_create(10, "ns3::Ipv4L3Protocol")
-        instance.defer_create(11, "ns3::ArpL3Protocol")
-        instance.defer_create(12, "ns3::Icmpv4L4Protocol")
-        instance.defer_create(13, "ns3::UdpL4Protocol")
-        instance.defer_connect(9, "protos", 10, "node")
-        instance.defer_connect(9, "protos", 11, "node")
-        instance.defer_connect(9, "protos", 12, "node")
-        instance.defer_connect(9, "protos", 13, "node")
-        instance.defer_create(14, "ns3::PointToPointNetDevice")
-        instance.defer_create(15, "ns3::DropTailQueue")
-        instance.defer_connect(9, "devs", 14, "node")
-        instance.defer_connect(14, "queue", 15, "dev")
-        instance.defer_add_trace(14, "P2PAsciiTrace")
-        instance.defer_add_address(14, "10.0.0.2", 24, None)
-
-        instance.defer_create(16, "ns3::PointToPointChannel")
-        instance.defer_connect(7, "chan", 16, "dev2")
-        instance.defer_connect(14, "chan", 16, "dev2")
-
-        instance.defer_create(17, "ns3::PointToPointNetDevice")
-        instance.defer_create(18, "ns3::DropTailQueue")
-        instance.defer_connect(9, "devs", 17, "node")
-        instance.defer_connect(17, "queue", 18, "dev")
-        instance.defer_add_trace(17, "P2PAsciiTrace")
-        instance.defer_add_address(17, "10.0.1.1", 24, None)
-
-        instance.defer_create(19, "ns3::Node")
-        instance.defer_create(20, "ns3::Ipv4L3Protocol")
-        instance.defer_create(21, "ns3::ArpL3Protocol")
-        instance.defer_create(22, "ns3::Icmpv4L4Protocol")
-        instance.defer_create(23, "ns3::UdpL4Protocol")
-        instance.defer_connect(19, "protos", 20, "node")
-        instance.defer_connect(19, "protos", 21, "node")
-        instance.defer_connect(19, "protos", 22, "node")
-        instance.defer_connect(19, "protos", 23, "node")
-        instance.defer_create(24, "ns3::PointToPointNetDevice")
-        instance.defer_create(25, "ns3::DropTailQueue")
-        instance.defer_connect(19, "devs", 24, "node")
-        instance.defer_connect(24, "queue", 25, "dev")
-        instance.defer_add_trace(24, "P2PAsciiTrace")
-        instance.defer_add_address(24, "10.0.1.2", 24, None)
-
-        instance.defer_create(26, "ns3::PointToPointChannel")
-        instance.defer_connect(17, "chan", 26, "dev2")
-        instance.defer_connect(24, "chan", 26, "dev2")
-
-        instance.defer_create(27, "ns3::V4Ping")
-        instance.defer_create_set(27, "Remote", "10.0.1.2")
-        instance.defer_create_set(27, "StartTime", "0s")
-        instance.defer_create_set(27, "StopTime", "10s")
-        instance.defer_connect(27, "node", 2, "apps")
-
-        instance.defer_add_route(2, "10.0.1.0", 24, "10.0.0.2")
-        instance.defer_add_route(19, "10.0.0.0", 24, "10.0.1.1")
-
-        instance.do_setup()
-        instance.do_create()
-        instance.do_connect_init()
-        instance.do_connect_compl()
-        instance.do_preconfigure()
-        instance.do_configure()
-        instance.do_prestart()
-        instance.start()
-
-        while instance.status(27) != AS.STATUS_FINISHED:
-            time.sleep(0.1)
-        ping_result = instance.trace(24, "P2PAsciiTrace")
-        comp_result = "- 9.04199 /NodeList/2/DeviceList/0/$ns3::PointToPointNetDevice/TxQueue/Dequeue ns3::PppHeader (Point-to-Point Protocol: IP (0x0021)) ns3::Ipv4Header (tos 0x0 DSCP Default ECN Not-ECT ttl 64 id 9 protocol 1 offset (bytes) 0 flags [none] length: 84 10.0.1.2 > 10.0.0.1) ns3::Icmpv4Header (type=0, code=0) ns3::Icmpv4Echo (identifier=0, sequence=9)"
-
-        self.assertNotEqual(ping_result.find(comp_result), -1)
-        instance.stop()
-        instance.shutdown()
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/ns3/integration.py b/test/testbeds/ns3/integration.py
deleted file mode 100755 (executable)
index e2c24c7..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import re
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class Ns3IntegrationTestCase(unittest.TestCase):
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-
-    def _test_fd_net_device(self, daemonize_testbed,
-            controller_access_configuration):
-        testbed_id = "ns3"
-        exp_desc = ExperimentDescription()
-        ns3_provider = FactoriesProvider(testbed_id)
-        ns3_desc1 = exp_desc.add_testbed_description(ns3_provider)
-        root_dir1 = os.path.join(self.root_dir, "1")
-        ns3_desc1.set_attribute_value("homeDirectory", root_dir1)
-        ns3_desc1.set_attribute_value("SimulatorImplementationType",
-                "ns3::RealtimeSimulatorImpl")
-        ns3_desc1.set_attribute_value("ChecksumEnabled", True)
-        ns3_desc2 = exp_desc.add_testbed_description(ns3_provider)
-        root_dir2 = os.path.join(self.root_dir, "2")
-        ns3_desc2.set_attribute_value("homeDirectory", root_dir2)
-        ns3_desc2.set_attribute_value("SimulatorImplementationType",
-                "ns3::RealtimeSimulatorImpl")
-        ns3_desc2.set_attribute_value("ChecksumEnabled", True)
-
-        node1 = ns3_desc1.create("ns3::Node")
-        ipv41 = ns3_desc1.create("ns3::Ipv4L3Protocol")
-        arp1  = ns3_desc1.create("ns3::ArpL3Protocol")
-        icmp1 = ns3_desc1.create("ns3::Icmpv4L4Protocol")
-        node1.connector("protos").connect(ipv41.connector("node"))
-        node1.connector("protos").connect(arp1.connector("node"))
-        node1.connector("protos").connect(icmp1.connector("node"))
-        iface1 = ns3_desc1.create("ns3::FdNetDevice")
-        node1.connector("devs").connect(iface1.connector("node"))
-        ip1 = iface1.add_address()
-        ip1.set_attribute_value("Address", "10.0.0.1")
-        tc1 = ns3_desc1.create("ns3::Nepi::TunChannel")
-        tc1.connector("fd->").connect(iface1.connector("->fd"))
-
-        node2 = ns3_desc2.create("ns3::Node")
-        ipv42 = ns3_desc2.create("ns3::Ipv4L3Protocol")
-        arp2  = ns3_desc2.create("ns3::ArpL3Protocol")
-        icmp2 = ns3_desc2.create("ns3::Icmpv4L4Protocol")
-        node2.connector("protos").connect(ipv42.connector("node"))
-        node2.connector("protos").connect(arp2.connector("node"))
-        node2.connector("protos").connect(icmp2.connector("node"))
-        iface2 = ns3_desc2.create("ns3::FdNetDevice")
-        iface2.enable_trace("FdAsciiTrace")
-        node2.connector("devs").connect(iface2.connector("node"))
-        ip2 = iface2.add_address()
-        ip2.set_attribute_value("Address", "10.0.0.2")
-        tc2 = ns3_desc2.create("ns3::Nepi::TunChannel")
-        tc2.connector("fd->").connect(iface2.connector("->fd"))
-
-        tc2.connector("udp").connect(tc1.connector("udp"))
-
-        app = ns3_desc1.create("ns3::V4Ping")
-        app.set_attribute_value("Remote", "10.0.0.2")
-        app.set_attribute_value("StartTime", "0s")
-        app.set_attribute_value("StopTime", "2s")
-        app.connector("node").connect(node1.connector("apps"))
-
-        if daemonize_testbed:
-            ns3_desc1.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-            inst_root_dir = os.path.join(root_dir1, "instance")
-            os.makedirs(inst_root_dir)
-            ns3_desc1.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-            #ns3_desc1.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-            ns3_desc2.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-            inst_root_dir = os.path.join(root_dir2, "instance")
-            os.makedirs(inst_root_dir)
-            ns3_desc2.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-            #ns3_desc2.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-        xml = exp_desc.to_xml()
-        
-        if controller_access_configuration:
-            controller = ExperimentController(xml, self.root_dir)
-        else:
-            controller = proxy.create_experiment_controller(xml, 
-                controller_access_configuration)
-        
-        try:
-            controller.start()
-            while not controller.is_finished(app.guid):
-                time.sleep(0.5)
-            ping_result = controller.trace(iface2.guid, "FdAsciiTrace")
-            ping_exp_result = r"""r [-+0-9.e]+ /NodeList/0/DeviceList/0/\$ns3::FdNetDevice/Rx Payload \(size=42\)
-r [-+0-9.e]+ /NodeList/0/DeviceList/0/\$ns3::FdNetDevice/Rx Payload \(size=98\)
-r [-+0-9.e]+ /NodeList/0/DeviceList/0/\$ns3::FdNetDevice/Rx Payload \(size=42\)
-r [-+0-9.e]+ /NodeList/0/DeviceList/0/\$ns3::FdNetDevice/Rx Payload \(size=98\)
-"""
-            if not re.match(ping_exp_result, ping_result):
-                self.fail("Unexpected trace: %s" % (ping_result,))
-
-        finally:
-            controller.stop()
-            controller.shutdown()
-    def _test_if(self, daemonize_testbed, controller_access_configuration):
-        exp_desc = ExperimentDescription()
-        testbed_id = "ns3"
-        ns3_provider = FactoriesProvider(testbed_id)
-        ns3_desc = exp_desc.add_testbed_description(ns3_provider)
-        ns3_desc.set_attribute_value("homeDirectory", self.root_dir)
-
-        node1 = ns3_desc.create("ns3::Node")
-        ipv41 = ns3_desc.create("ns3::Ipv4L3Protocol")
-        arp1  = ns3_desc.create("ns3::ArpL3Protocol")
-        icmp1 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-        node1.connector("protos").connect(ipv41.connector("node"))
-        node1.connector("protos").connect(arp1.connector("node"))
-        node1.connector("protos").connect(icmp1.connector("node"))
-        iface1 = ns3_desc.create("ns3::PointToPointNetDevice")
-        queue1 = ns3_desc.create("ns3::DropTailQueue")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface1.connector("queue").connect(queue1.connector("dev"))
-        trace1 = iface1.enable_trace("P2PAsciiTrace")
-        ip1 = iface1.add_address()
-        ip1.set_attribute_value("Address", "10.0.0.1")
-
-        node2 = ns3_desc.create("ns3::Node")
-        ipv42 = ns3_desc.create("ns3::Ipv4L3Protocol")
-        arp2  = ns3_desc.create("ns3::ArpL3Protocol")
-        icmp2 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-        node2.connector("protos").connect(ipv42.connector("node"))
-        node2.connector("protos").connect(arp2.connector("node"))
-        node2.connector("protos").connect(icmp2.connector("node"))
-        iface2 = ns3_desc.create("ns3::PointToPointNetDevice")
-        queue2 = ns3_desc.create("ns3::DropTailQueue")
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface2.connector("queue").connect(queue2.connector("dev"))
-        trace2 = iface2.enable_trace("P2PAsciiTrace")
-        ip2 = iface2.add_address()
-        ip2.set_attribute_value("Address", "10.0.0.2")
-
-        chan = ns3_desc.create("ns3::PointToPointChannel")
-        iface1.connector("chan").connect(chan.connector("dev2"))
-        iface2.connector("chan").connect(chan.connector("dev2"))
-
-        app = ns3_desc.create("ns3::V4Ping")
-        app.set_attribute_value("Remote", "10.0.0.2")
-        app.set_attribute_value("StartTime", "0s")
-        app.set_attribute_value("StopTime", "20s")
-        app.connector("node").connect(node1.connector("apps"))
-
-        if daemonize_testbed:
-            ns3_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-            inst_root_dir = os.path.join(self.root_dir, "instance")
-            os.mkdir(inst_root_dir)
-            ns3_desc.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-            #ns3_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-        xml = exp_desc.to_xml()
-        
-        if controller_access_configuration:
-            controller = ExperimentController(xml, self.root_dir)
-        else:
-            controller = proxy.create_experiment_controller(xml, 
-                controller_access_configuration)
-        
-        try:
-            controller.start()
-            while not controller.is_finished(app.guid):
-                time.sleep(0.5)
-            ping_result = controller.trace(iface2.guid, "P2PAsciiTrace")
-
-            comp_result = "- 19.021 /NodeList/1/DeviceList/0/$ns3::PointToPointNetDevice/TxQueue/Dequeue ns3::PppHeader (Point-to-Point Protocol: IP (0x0021)) ns3::Ipv4Header (tos 0x0 DSCP Default ECN Not-ECT ttl 64 id 19 protocol 1 offset (bytes) 0 flags [none] length: 84 10.0.0.2 > 10.0.0.1) ns3::Icmpv4Header (type=0, code=0) ns3::Icmpv4Echo (identifier=0, sequence=19)"
-            
-            if ping_result.find(comp_result) == -1:
-                self.fail("Unexpected trace: %s" % (ping_result,))
-        finally:
-            controller.stop()
-            controller.shutdown()
-
-    @test_util.skipUnless(test_util.ns3_usable(), 
-           "Test requires working ns-3 bindings")
-    def test_all_daemonized_fd_net_device(self):
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        #access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-        self._test_fd_net_device(
-            daemonize_testbed = True,
-            controller_access_configuration = access_config)
-
-    @test_util.skipUnless(test_util.ns3_usable(), 
-           "Test requires working ns-3 bindings")
-    def test_all_ssh_daemonized_fd_net_device(self):
-        env = test_util.test_environment()
-
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        #access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        access_config.set_attribute_value(DC.USE_AGENT, True)
-        
-        self._test_fd_net_device(
-            daemonize_testbed = True,
-            controller_access_configuration = access_config)
-
-    @test_util.skipUnless(test_util.ns3_usable(), 
-           "Test requires working ns-3 bindings")
-    def test_local_if(self):
-        self._test_if(
-            daemonize_testbed = False,
-            controller_access_configuration = None)
-
-    @test_util.skipUnless(test_util.ns3_usable(), 
-           "Test requires working ns-3 bindings")
-    def test_all_daemonized_if(self):
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        #access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-        self._test_if(
-            daemonize_testbed = True,
-            controller_access_configuration = access_config)
-
-    @test_util.skipUnless(test_util.ns3_usable(), 
-           "Test requires working ns-3 bindings")
-    def test_all_ssh_daemonized_if(self):
-        env = test_util.test_environment()
-
-        access_config = proxy.AccessConfiguration()
-        access_config.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        access_config.set_attribute_value(DC.ROOT_DIRECTORY, self.root_dir)
-        #access_config.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        access_config.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        access_config.set_attribute_value(DC.DEPLOYMENT_PORT, env.port)
-        access_config.set_attribute_value(DC.USE_AGENT, True)
-        
-        self._test_if(
-            daemonize_testbed = True,
-            controller_access_configuration = access_config)
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/planetlab/design.py b/test/testbeds/planetlab/design.py
deleted file mode 100755 (executable)
index 24777ca..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-import unittest
-
-class PlanetlabDesignTestCase(unittest.TestCase):
-    def make_test_design(self):
-        testbed_id = "planetlab"
-        exp_desc = ExperimentDescription()
-        provider = FactoriesProvider(testbed_id)
-        tstbd_desc = exp_desc.add_testbed_description(provider)
-        tstbd_desc.set_attribute_value("slice", "inria_nepi")
-        node1 = tstbd_desc.create("Node")
-        node2 = tstbd_desc.create("Node")
-        iface1 = tstbd_desc.create("NodeInterface")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface2 = tstbd_desc.create("NodeInterface")
-        node2.connector("devs").connect(iface2.connector("node"))
-        switch = tstbd_desc.create("Internet")
-        iface1.connector("inet").connect(switch.connector("devs"))
-        iface2.connector("inet").connect(switch.connector("devs"))
-        app = tstbd_desc.create("Application")
-        app.set_attribute_value("command", "ping -qc10 10.0.0.2")
-        app.connector("node").connect(node1.connector("apps"))
-        
-        return exp_desc, tstbd_desc, node1, node2, iface1, iface2, app
-        
-    def test_design_simple(self):
-        exp_desc, tstbd_desc, node1, node2, iface1, iface2, app = self.make_test_design()
-
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-
-    def test_design_constrained(self):
-        exp_desc, tstbd_desc, node1, node2, iface1, iface2, app = self.make_test_design()
-        
-        node1.set_attribute_value("hostname", "onelab*.inria.fr")
-        node2.set_attribute_value("hostname", "onelab*.inria.fr")
-
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-
-    def test_design_constrained2(self):
-        exp_desc, tstbd_desc, node1, node2, iface1, iface2, app = self.make_test_design()
-        
-        node1.set_attribute_value("minReliability", 90.0)
-        node1.set_attribute_value("operatingSystem", "f12")
-        node2.set_attribute_value("minReliability", 50.0)
-        node2.set_attribute_value("architecture", "x86_64")
-
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-        
-    def test_design_emulation(self):
-        exp_desc, tstbd_desc, node1, node2, iface1, iface2, app = self.make_test_design()
-        
-        netpipe1 = tstbd_desc.create("NetPipe")
-        netpipe1.set_attribute_value("mode","CLIENT")
-        netpipe1.set_attribute_value("portList","80,443")
-        netpipe1.set_attribute_value("bwIn",1.0)
-        netpipe1.set_attribute_value("bwOut",128.0/1024.0)
-        netpipe1.set_attribute_value("delayIn",12)
-        netpipe1.set_attribute_value("delayOut",92)
-        netpipe1.set_attribute_value("plrIn",0.05)
-        netpipe1.set_attribute_value("plrOut",0.15)
-        node1.connector("pipes").connect(netpipe1.connector("node"))
-
-        xml = exp_desc.to_xml()
-        exp_desc2 = ExperimentDescription()
-        exp_desc2.from_xml(xml)
-        xml2 = exp_desc2.to_xml()
-        self.assertTrue(xml == xml2)
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/test/testbeds/planetlab/execute.py b/test/testbeds/planetlab/execute.py
deleted file mode 100755 (executable)
index 557425f..0000000
+++ /dev/null
@@ -1,671 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.util.constants import ApplicationStatus as AS
-from nepi.testbeds import planetlab
-import os
-import re
-import shutil
-import sys
-import tempfile
-import test_util
-import time
-import unittest
-
-class PlanetLabExecuteTestCase(unittest.TestCase):
-    testbed_id = "planetlab"
-    slicename = "inria_nepi"
-    plchost = "nepiplc.pl.sophia.inria.fr"
-    
-    host1 = "nepi1.pl.sophia.inria.fr"
-    host2 = "nepi2.pl.sophia.inria.fr"
-    
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    
-    PLR50_PY = os.path.join(
-        os.path.dirname(planetlab.__file__), 
-        'scripts',
-        'plr50.py')
-    PLR50_C = os.path.join(
-        os.path.dirname(planetlab.__file__), 
-        'scripts',
-        'plr50.c')
-    TOS_PY = os.path.join(
-        os.path.dirname(planetlab.__file__), 
-        'scripts',
-        'tosqueue.py')
-    CLS_PY = os.path.join(
-        os.path.dirname(planetlab.__file__), 
-        'scripts',
-        'classqueue.py')
-    
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        self.__class__.port_base = self.port_base + 100
-        
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_instance(self):
-        testbed_id = self.testbed_id
-        slicename = self.slicename
-        plchost = self.plchost
-        
-        instance = planetlab.TestbedController()
-        pl_ssh_key = os.environ.get(
-            "PL_SSH_KEY",
-            "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-        slicename = os.environ.get(
-            "PL_SLICE",
-            slicename)
-        pl_user, pl_pwd = test_util.pl_auth()
-        
-        instance.defer_configure("homeDirectory", self.root_dir)
-        instance.defer_configure("slice", slicename)
-        instance.defer_configure("sliceSSHKey", pl_ssh_key)
-        instance.defer_configure("authUser", pl_user)
-        instance.defer_configure("authPass", pl_pwd)
-        instance.defer_configure("plcHost", plchost)
-        instance.defer_configure("tapPortBase", self.port_base)
-        instance.defer_configure("p2pDeployment", False) # it's interactive, we don't want it in tests
-        instance.defer_configure("cleanProc", True)
-        
-        # Hack, but we need vsys_vnet
-        instance.do_setup()
-        vnet = instance.vsys_vnet
-        self.net_prefix = vnet.rsplit('.',1)[0]
-        
-        return instance
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_simple(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "Node")
-        instance.defer_create_set(3, "hostname", self.host2)
-        instance.defer_create(4, "NodeInterface")
-        instance.defer_connect(2, "devs", 4, "node")
-        instance.defer_create(5, "NodeInterface")
-        instance.defer_connect(3, "devs", 5, "node")
-        instance.defer_create(6, "Internet")
-        instance.defer_connect(4, "inet", 6, "devs")
-        instance.defer_connect(5, "inet", 6, "devs")
-        instance.defer_create(7, "Application")
-        instance.defer_create_set(7, "command", "ping -qc1 {#[GUID-5].addr[0].[Address]#}")
-        instance.defer_add_trace(7, "stdout")
-        instance.defer_add_trace(7, "stderr")
-        instance.defer_connect(7, "node", 2, "apps")
-
-        comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time \d*ms.*
-"""
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            
-            # Manually replace netref
-            instance.set(7, "command",
-                instance.get(7, "command")
-                    .replace("{#[GUID-5].addr[0].[Address]#}", 
-                        instance.get_address(5, 0, "Address") )
-            )
-
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(7) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = instance.trace(7, "stdout") or ""
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n" + ping_result)
-        
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_depends(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "NodeInterface")
-        instance.defer_connect(2, "devs", 3, "node")
-        instance.defer_create(4, "Internet")
-        instance.defer_connect(3, "inet", 4, "devs")
-        instance.defer_create(5, "Application")
-        instance.defer_create_set(5, "command", "gfortran --version")
-        instance.defer_create_set(5, "depends", "gcc-gfortran")
-        instance.defer_add_trace(5, "stdout")
-        instance.defer_add_trace(5, "stderr")
-        instance.defer_connect(5, "node", 2, "apps")
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(5) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = instance.trace(5, "stdout") or ""
-            comp_result = r".*GNU Fortran \(GCC\).*"
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n" + ping_result)
-        
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_build(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "NodeInterface")
-        instance.defer_connect(2, "devs", 3, "node")
-        instance.defer_create(4, "Internet")
-        instance.defer_connect(3, "inet", 4, "devs")
-        instance.defer_create(10, "Application")
-        instance.defer_create_set(10, "command", "./consts")
-        instance.defer_create_set(10, "buildDepends", "gcc")
-        instance.defer_create_set(10, "build", "gcc ${SOURCES}/consts.c -o consts")
-        instance.defer_create_set(10, "install", "cp consts ${SOURCES}/consts")
-        instance.defer_create_set(10, "sources", os.path.join(os.path.dirname(planetlab.__file__),'scripts','consts.c'))
-        instance.defer_add_trace(10, "stdout")
-        instance.defer_add_trace(10, "stderr")
-        instance.defer_connect(10, "node", 2, "apps")
-
-        comp_result = \
-r""".*ETH_P_ALL = 0x[0-9a-fA-F]{8}
-ETH_P_IP = 0x[0-9a-fA-F]{8}
-TUNGETIFF = 0x[0-9a-fA-F]{8}
-TUNSETIFF = 0x[0-9a-fA-F]{8}
-IFF_NO_PI = 0x[0-9a-fA-F]{8}
-IFF_TAP = 0x[0-9a-fA-F]{8}
-IFF_TUN = 0x[0-9a-fA-F]{8}
-IFF_VNET_HDR = 0x[0-9a-fA-F]{8}
-TUN_PKT_STRIP = 0x[0-9a-fA-F]{8}
-IFHWADDRLEN = 0x[0-9a-fA-F]{8}
-IFNAMSIZ = 0x[0-9a-fA-F]{8}
-IFREQ_SZ = 0x[0-9a-fA-F]{8}
-FIONREAD = 0x[0-9a-fA-F]{8}.*
-"""
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(10) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = instance.trace(10, "stdout") or ""
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n" + ping_result)
-        
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_simple_vsys(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "NodeInterface")
-        instance.defer_connect(2, "devs", 3, "node")
-        instance.defer_create(4, "Internet")
-        instance.defer_connect(3, "inet", 4, "devs")
-        instance.defer_create(5, "TunInterface")
-        instance.defer_add_address(5, self.net_prefix+".2", 24, False)
-        instance.defer_connect(2, "devs", 5, "node")
-        instance.defer_create(6, "Application")
-        instance.defer_create_set(6, "command", """
-set -e
-netconfig help > /dev/null
-test -e /vsys/vif_up.in > /dev/null
-test -e /vsys/vif_up.out > /dev/null
-test -e /vsys/fd_tuntap.control > /dev/null
-echo 'OKIDOKI'
-""")
-        instance.defer_create_set(6, "sudo", True) # only sudo has access to /vsys
-        instance.defer_add_trace(6, "stdout")
-        instance.defer_add_trace(6, "stderr")
-        instance.defer_connect(6, "node", 2, "apps")
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(6) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            test_result = (instance.trace(6, "stdout") or "").strip()
-            comp_result = "OKIDOKI"
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertEqual(comp_result, test_result)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_emulation(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "NodeInterface")
-        instance.defer_connect(2, "devs", 3, "node")
-        instance.defer_create(4, "Internet")
-        instance.defer_connect(3, "inet", 4, "devs")
-        instance.defer_create(7, "NetPipe")
-        instance.defer_create_set(7, "mode", "CLIENT")
-        instance.defer_create_set(7, "portList", "80")
-        instance.defer_create_set(7, "bwOut", 12.0/1024.0) # 12kbps
-        instance.defer_create_set(7, "bwIn", 64.0/1024.0) # 64kbps
-        instance.defer_create_set(7, "plrOut", 0.01) # 1% plr outbound - high loss
-        instance.defer_create_set(7, "plrIn", 0.001) # 0.1% plr inbound - regular loss
-        instance.defer_create_set(7, "delayOut", int(1500 * 8 / (12.0/1024.0) / 1000)) # tx delay at 12kbps in ms
-        instance.defer_create_set(7, "delayIn", int(1500 * 8 / (64.0/1024.0) / 1000)) # rx delay at 64kbps in ms
-        instance.defer_add_trace(7, "netpipeStats")
-        instance.defer_connect(2, "pipes", 7, "node")
-        instance.defer_create(8, "Application")
-        instance.defer_create_set(8, "command", "time wget -q -O /dev/null http://www.google.com/") # Fetch ~10kb
-        instance.defer_add_trace(8, "stdout")
-        instance.defer_add_trace(8, "stderr")
-        instance.defer_connect(8, "node", 2, "apps")
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(8) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            test_result = (instance.trace(8, "stderr") or "").strip()
-            comp_result = r".*real\s*(?P<min>[0-9]+)m(?P<sec>[0-9]+[.][0-9]+)s.*"
-            netpipe_stats = instance.trace(7, "netpipeStats")
-            
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        match = re.match(comp_result, test_result, re.MULTILINE)
-        self.assertTrue(match, "Unexpected output: %s" % (test_result,))
-        
-        minutes = int(match.group("min"))
-        seconds = float(match.group("sec"))
-        self.assertTrue((minutes * 60 + seconds) > 1.0, "Emulation not effective: %s" % (test_result,))
-
-        self.assertTrue(netpipe_stats, "Unavailable netpipe stats")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def _pingtest(self, TunClass, ConnectionProto, Cipher, Filter1=None, Filter2=None, Filter1args=None, Filter2args=None, PLREX=None, flood=False):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "Node")
-        instance.defer_create_set(3, "hostname", self.host2)
-        instance.defer_create(4, "NodeInterface")
-        instance.defer_connect(2, "devs", 4, "node")
-        instance.defer_create(5, "Internet")
-        instance.defer_connect(4, "inet", 5, "devs")
-        instance.defer_create(6, "NodeInterface")
-        instance.defer_connect(3, "devs", 6, "node")
-        instance.defer_connect(6, "inet", 5, "devs")
-        instance.defer_create(7, TunClass)
-        instance.defer_create_set(7, "tun_cipher", Cipher)
-        instance.defer_add_trace(7, "packets")
-        instance.defer_add_address(7, self.net_prefix+".2", 24, False)
-        if flood:
-            instance.defer_create_set(7, "bwlimit", 128)
-        instance.defer_connect(2, "devs", 7, "node")
-        instance.defer_create(8, TunClass)
-        instance.defer_create_set(8, "tun_cipher", Cipher)
-        instance.defer_add_trace(8, "packets")
-        instance.defer_add_address(8, self.net_prefix+".3", 24, False)
-        instance.defer_connect(3, "devs", 8, "node")
-        instance.defer_create(9, "Application")
-        if flood:
-            instance.defer_create_set(9, "command", "sudo -S ping -s 1000 -l 1000 -qfc1000 {#[GUID-8].addr[0].[Address]#} ; sleep 20 ; ping -qc10 {#[GUID-8].addr[0].[Address]#}")
-        else:
-            instance.defer_create_set(9, "command", "ping -qc10 {#[GUID-8].addr[0].[Address]#}")
-        instance.defer_add_trace(9, "stdout")
-        instance.defer_add_trace(9, "stderr")
-        instance.defer_connect(9, "node", 2, "apps")
-        
-        if Filter1:
-            instance.defer_create(10, "TunFilter")
-            instance.defer_create_set(10, "module", Filter1)
-            if Filter1args:
-                instance.defer_create_set(10, "args", Filter1args)
-            instance.defer_connect(7, "fd->", 10, "->fd")
-            
-        if Filter2:
-            instance.defer_create(11, "TunFilter")
-            instance.defer_create_set(11, "module", Filter2)
-            if Filter2args:
-                instance.defer_create_set(11, "args", Filter2args)
-            instance.defer_connect(8, "fd->", 11, "->fd")
-
-        if PLREX is None:
-            if Filter1 and Filter2:
-                plr = "[5-9][0-9]"
-            elif Filter1 or Filter2:
-                plr = "[3-9][0-9]"
-            else:
-                plr = "0"
-        else:
-            plr = PLREX
-       
-        instance.defer_connect(
-            (10 if Filter1 else 7), ConnectionProto, 
-            (11 if Filter2 else 8), ConnectionProto)
-
-        comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-10 packets transmitted, [0-9]+ received,.* %s%% packet loss, time \d*ms.*
-""" % (plr,)
-        if flood:
-            comp_result = ".*" + comp_result
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            
-            # Manually replace netref
-            instance.set(9, "command",
-                instance.get(9, "command")
-                    .replace("{#[GUID-8].addr[0].[Address]#}", 
-                        instance.get_address(8, 0, "Address") )
-            )
-            
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(9) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = instance.trace(9, "stdout") or ""
-            packets1 = instance.trace(7, "packets") or ""
-            packets2 = instance.trace(8, "packets") or ""
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE|re.DOTALL),
-            "Unexpected trace:\n%s\nPackets @ source:\n%s\nPackets @ target:\n%s" % (
-                ping_result,
-                packets1,
-                packets2))
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping(self):
-        self._pingtest("TunInterface", "tcp", "AES")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping_udp(self):
-        self._pingtest("TunInterface", "udp", "AES")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping_gre(self):
-        self._pingtest("TunInterface", "gre", "PLAIN")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping_flood(self):
-        self._pingtest("TunInterface", "tcp", "PLAIN", flood = True)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping_flood_udp(self):
-        self._pingtest("TunInterface", "udp", "PLAIN", flood = True)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping(self):
-        self._pingtest("TapInterface", "tcp", "AES")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping_udp(self):
-        self._pingtest("TapInterface", "udp", "AES")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping_gre(self):
-        self._pingtest("TapInterface", "gre", "PLAIN")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping_udp_loss1_py(self):
-        self._pingtest("TapInterface", "udp", "AES", self.PLR50_PY, None, "plr=50")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping_udp_loss2_py(self):
-        self._pingtest("TapInterface", "udp", "AES", self.PLR50_PY, self.PLR50_PY, "plr=40", "plr=40")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping_udp_loss1_c(self):
-        self._pingtest("TapInterface", "udp", "AES", self.PLR50_C, None, "plr=50")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tap_ping_udp_loss2_c(self):
-        self._pingtest("TapInterface", "udp", "AES", self.PLR50_C, self.PLR50_C, "plr=40", "plr=40")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping_udp_tos(self):
-        self._pingtest("TunInterface", "udp", "AES", self.TOS_PY, self.TOS_PY, "size=1000", "size=1000", "0")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_tun_ping_udp_class(self):
-        self._pingtest("TunInterface", "udp", "AES", self.CLS_PY, self.CLS_PY, "size=10", "size=10", "0")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_nepi_depends(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "NodeInterface")
-        instance.defer_connect(2, "devs", 3, "node")
-        instance.defer_create(4, "Internet")
-        instance.defer_connect(3, "inet", 4, "devs")
-        instance.defer_create(5, "NepiDependency")
-        instance.defer_connect(5, "node", 2, "deps")
-        instance.defer_create(12, "Application")
-        instance.defer_connect(12, "node", 2, "apps")
-        instance.defer_create_set(12, "command", "python -c 'import nepi'")
-        instance.defer_add_trace(12, "stderr")
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(12) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = (instance.trace(12, "stderr") or "").strip()
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-        
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertEqual(ping_result, "")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_ns3_depends(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "hostname", self.host1)
-        instance.defer_create(3, "NodeInterface")
-        instance.defer_connect(2, "devs", 3, "node")
-        instance.defer_create(4, "Internet")
-        instance.defer_connect(3, "inet", 4, "devs")
-        instance.defer_create(5, "NepiDependency")
-        instance.defer_connect(5, "node", 2, "deps")
-        instance.defer_create(6, "NS3Dependency")
-        instance.defer_connect(6, "node", 2, "deps")
-        instance.defer_create(12, "Application")
-        instance.defer_connect(12, "node", 2, "apps")
-        instance.defer_create_set(12, "command", "python -c 'import nepi.testbeds.ns3.execute ; tb = nepi.testbeds.ns3.execute.TestbedController() ; mod = tb._configure_ns3_module()'")
-        instance.defer_add_trace(12, "stderr")
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(12) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = (instance.trace(12, "stderr") or "").strip()
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-        
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertEqual(ping_result, "")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_discovery(self):
-        instance = self.make_instance()
-        
-        instance.defer_create(2, "Node")
-        instance.defer_create_set(2, "operatingSystem", "f12")
-        instance.defer_create(3, "Node")
-        instance.defer_create_set(3, "operatingSystem", "f12")
-        instance.defer_create(4, "NodeInterface")
-        instance.defer_connect(2, "devs", 4, "node")
-        instance.defer_create(5, "NodeInterface")
-        instance.defer_connect(3, "devs", 5, "node")
-        instance.defer_create(6, "Internet")
-        instance.defer_connect(4, "inet", 6, "devs")
-        instance.defer_connect(5, "inet", 6, "devs")
-        instance.defer_create(7, "Application")
-        instance.defer_create_set(7, "command", "ping -qc1 {#[GUID-5].addr[0].[Address]#}")
-        instance.defer_add_trace(7, "stdout")
-        instance.defer_add_trace(7, "stderr")
-        instance.defer_connect(7, "node", 2, "apps")
-
-        comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time \d*ms.*
-"""
-
-        try:
-            instance.do_setup()
-            instance.do_create()
-            instance.do_connect_init()
-            instance.do_connect_compl()
-            instance.do_preconfigure()
-            
-            # Manually replace netref
-            instance.set(7, "command",
-                instance.get(7, "command")
-                    .replace("{#[GUID-5].addr[0].[Address]#}", 
-                        instance.get_address(5, 0, "Address") )
-            )
-
-            instance.do_configure()
-            
-            instance.do_prestart()
-            instance.start()
-            while instance.status(7) != AS.STATUS_FINISHED:
-                time.sleep(0.5)
-            ping_result = instance.trace(7, "stdout") or ""
-            instance.stop()
-        finally:
-            try:
-                instance.shutdown()
-            except:
-                pass
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n" + ping_result)
-        
-        
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/planetlab/integration.py b/test/testbeds/planetlab/integration.py
deleted file mode 100755 (executable)
index 71d9e2b..0000000
+++ /dev/null
@@ -1,423 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-import logging
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC
-import os
-import re
-import shutil
-import sys
-import tempfile
-import test_util
-import time
-import unittest
-
-class PlanetLabIntegrationTestCase(unittest.TestCase):
-    testbed_id = "planetlab"
-    slicename = "inria_nepi"
-    slicehrn = "nepi.inria.nepi"
-    plchost = "nepiplc.pl.sophia.inria.fr"
-    
-    host1 = "nepi1.pl.sophia.inria.fr"
-    host2 = "nepi2.pl.sophia.inria.fr"
-    host3 = "nepi3.pl.sophia.inria.fr"
-    host4 = "nepi5.pl.sophia.inria.fr"
-
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        self.__class__.port_base = self.port_base + 100
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_experiment_desc(self, use_sfa = False):
-        testbed_id = self.testbed_id
-        slicename = self.slicename
-        plchost = self.plchost
-        pl_ssh_key = os.environ.get(
-            "PL_SSH_KEY",
-            "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-        pl_user, pl_pwd = test_util.pl_auth()
-
-        exp_desc = ExperimentDescription()
-        pl_provider = FactoriesProvider(testbed_id)
-        pl_desc = exp_desc.add_testbed_description(pl_provider)
-        pl_desc.set_attribute_value("homeDirectory", self.root_dir)
-        pl_desc.set_attribute_value("slice", slicename)
-        pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc.set_attribute_value("authUser", pl_user)
-        pl_desc.set_attribute_value("authPass", pl_pwd)
-        pl_desc.set_attribute_value("plcHost", plchost)
-        pl_desc.set_attribute_value("tapPortBase", self.port_base)
-        pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-        pl_desc.set_attribute_value("cleanProc", True)
-        pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-        if use_sfa:
-            pl_desc.set_attribute_value("sfa", True)
-            pl_desc.set_attribute_value("sliceHrn", self.slicehrn)
-        
-        return pl_desc, exp_desc
-    
-    def _test_simple(self, daemonize_testbed, controller_access_configuration,
-            environ = None, use_sfa = False):
-        pl, exp = self.make_experiment_desc(use_sfa)
-        
-        node1 = pl.create("Node")
-        node2 = pl.create("Node")
-        node1.set_attribute_value("hostname", self.host1)
-        node2.set_attribute_value("hostname", self.host2)
-        iface1 = pl.create("NodeInterface")
-        iface2 = pl.create("NodeInterface")
-        iface2.set_attribute_value("label", "node2iface")
-        inet = pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        iface2.connector("inet").connect(inet.connector("devs"))
-        app = pl.create("Application")
-        app.set_attribute_value("command", "ping -qc1 {#[node2iface].addr[0].[Address]#}")
-        app.enable_trace("stdout")
-        app.connector("node").connect(node1.connector("apps"))
-
-        if daemonize_testbed:
-            pl.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-            inst_root_dir = os.path.join(self.root_dir, "instance")
-            os.mkdir(inst_root_dir)
-            pl.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-            pl.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-            if environ:
-                pl.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, environ)
-
-        xml = exp.to_xml()
-
-        if controller_access_configuration:
-            controller = proxy.create_experiment_controller(xml, 
-                controller_access_configuration)
-        else:
-            controller = ExperimentController(xml, self.root_dir)
-        
-        try:
-            controller.start()
-            while not controller.is_finished(app.guid):
-                time.sleep(0.5)
-            ping_result = controller.trace(app.guid, "stdout")
-            comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-1 packets transmitted, 1 received, 0% packet loss, time \d*ms.*
-"""
-            self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-                "Unexpected trace:\n" + ping_result)
-        
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-
-    def _test_spanning_deployment(self, use_sfa = False):
-        pl, exp = self.make_experiment_desc(use_sfa)
-
-        pl.set_attribute_value("p2pDeployment", True) # we do want it here - even if interactive
-        
-        from nepi.testbeds import planetlab as plpackage
-        
-        nodes = [ pl.create("Node") for i in xrange(4) ]
-        ifaces = [ pl.create("NodeInterface") for node in nodes ]
-        inet = pl.create("Internet")
-        for node, iface in zip(nodes,ifaces):
-            node.connector("devs").connect(iface.connector("node"))
-            iface.connector("inet").connect(inet.connector("devs"))
-        
-        apps = []
-        for node in nodes:
-            app = pl.create("Application")
-            app.set_attribute_value("command", "./consts")
-            app.set_attribute_value("buildDepends", "gcc")
-            app.set_attribute_value("build", "gcc ${SOURCES}/consts.c -o consts")
-            app.set_attribute_value("install", "cp consts ${SOURCES}/consts")
-            app.set_attribute_value("sources", os.path.join(
-                os.path.dirname(plpackage.__file__),'scripts','consts.c'))
-            app.enable_trace("stdout")
-            app.enable_trace("stderr")
-            app.enable_trace("buildlog")
-            node.connector("apps").connect(app.connector("node"))
-            apps.append(app)
-
-        comp_result = \
-r""".*ETH_P_ALL = 0x[0-9a-fA-F]{8}
-ETH_P_IP = 0x[0-9a-fA-F]{8}
-TUNGETIFF = 0x[0-9a-fA-F]{8}
-TUNSETIFF = 0x[0-9a-fA-F]{8}
-IFF_NO_PI = 0x[0-9a-fA-F]{8}
-IFF_TAP = 0x[0-9a-fA-F]{8}
-IFF_TUN = 0x[0-9a-fA-F]{8}
-IFF_VNET_HDR = 0x[0-9a-fA-F]{8}
-TUN_PKT_STRIP = 0x[0-9a-fA-F]{8}
-IFHWADDRLEN = 0x[0-9a-fA-F]{8}
-IFNAMSIZ = 0x[0-9a-fA-F]{8}
-IFREQ_SZ = 0x[0-9a-fA-F]{8}
-FIONREAD = 0x[0-9a-fA-F]{8}.*
-"""
-
-        comp_build = r".*(Identity added|gcc).*"
-
-        xml = exp.to_xml()
-
-        controller = ExperimentController(xml, self.root_dir)
-        try:
-            controller.start()
-            while not all(controller.is_finished(app.guid) for app in apps):
-                time.sleep(0.5)
-            
-            for app in apps:
-                app_result = controller.trace(app.guid, "stdout") or ""
-                self.assertTrue(re.match(comp_result, app_result, re.MULTILINE),
-                    "Unexpected trace:\n" + app_result)
-
-                build_result = controller.trace(app.guid, "buildlog") or ""
-                self.assertTrue(re.match(comp_build, build_result, re.MULTILINE | re.DOTALL),
-                    "Unexpected trace:\n" + build_result)
-        
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-
-    def _test_recover(self, daemonize_testbed, controller_access_configuration, 
-            environ = None, use_sfa = False):
-        pl, exp = self.make_experiment_desc(use_sfa)
-        
-        pl.set_attribute_value(DC.RECOVERY_POLICY, DC.POLICY_RECOVER)
-        
-        node1 = pl.create("Node")
-        node2 = pl.create("Node")
-        node1.set_attribute_value("hostname", self.host1)
-        node2.set_attribute_value("hostname", self.host2)
-        
-        iface1 = pl.create("NodeInterface")
-        iface2 = pl.create("NodeInterface")
-        inet = pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        iface2.connector("inet").connect(inet.connector("devs"))
-        
-        tap1 = pl.create("TapInterface")
-        tap2 = pl.create("TapInterface")
-        node1.connector("devs").connect(tap1.connector("node"))
-        node2.connector("devs").connect(tap2.connector("node"))
-        tap1.connector("udp").connect(tap2.connector("udp"))
-        
-        tap1ip = tap1.add_address()
-        tap1ip.set_attribute_value("Address", "192.168.2.2")
-        tap1ip.set_attribute_value("NetPrefix", 24)
-        tap1ip.set_attribute_value("Broadcast", False)
-
-        tap2ip = tap2.add_address()
-        tap2ip.set_attribute_value("Address", "192.168.2.3")
-        tap2ip.set_attribute_value("NetPrefix", 24)
-        tap2ip.set_attribute_value("Broadcast", False)
-        
-        app = pl.create("Application")
-        app.set_attribute_value("command", "ping -qc10 192.168.2.3")
-        app.enable_trace("stdout")
-        app.connector("node").connect(node1.connector("apps"))
-
-        if daemonize_testbed:
-            pl.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-            inst_root_dir = os.path.join(self.root_dir, "instance")
-            os.mkdir(inst_root_dir)
-            pl.set_attribute_value(DC.ROOT_DIRECTORY, inst_root_dir)
-            pl.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-
-            if environ:
-                pl.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP, environ)
-
-        xml = exp.to_xml()
-
-        if controller_access_configuration:
-            controller = proxy.create_experiment_controller(xml, 
-                controller_access_configuration)
-        else:
-            controller = ExperimentController(xml, self.root_dir)
-        
-        try:
-            controller.start()
-            
-            # purposedly break connection
-            controller = None
-            
-            # recover
-            if controller_access_configuration:
-                controller_access_configuration.set_attribute_value(
-                    DC.RECOVER, True)
-                controller = proxy.create_experiment_controller(None, 
-                    controller_access_configuration)
-            else:
-                controller = ExperimentController(None, self.root_dir)
-                controller.recover()
-            
-            while not controller.is_finished(app.guid):
-                time.sleep(0.5)
-            ping_result = controller.trace(app.guid, "stdout")
-            comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-10 packets transmitted, 10 received, 0% packet loss, time \d*ms.*
-"""
-            self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-                "Unexpected trace:\n" + ping_result)
-        
-        finally:
-            if controller is not None:
-                try:
-                    controller.stop()
-                except:
-                    import traceback
-                    traceback.print_exc()
-                try:
-                    controller.shutdown()
-                except:
-                    import traceback
-                    traceback.print_exc()
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_simple(self):
-        self._test_simple(
-            daemonize_testbed = False,
-            controller_access_configuration = None)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_simple_sfa(self):
-        self._test_simple(
-            daemonize_testbed = False,
-            controller_access_configuration = None,
-            use_sfa = True)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is interactive, requires NEPI_FULL_TESTS=yes")
-    def test_spanning_deployment(self):
-        self._test_spanning_deployment()
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is interactive, requires NEPI_FULL_TESTS=yes")
-    def test_spanning_deployment_sfa(self):
-        self._test_spanning_deployment(use_sfa = True)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_simple_daemonized(self):
-        access_config = proxy.AccessConfiguration({
-            DC.DEPLOYMENT_MODE : DC.MODE_DAEMON,
-            DC.ROOT_DIRECTORY : self.root_dir,
-            DC.LOG_LEVEL : DC.DEBUG_LEVEL,
-        })
-
-        self._test_simple(
-            daemonize_testbed = False,
-            controller_access_configuration = access_config)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_simple_daemonized_sfa(self):
-        access_config = proxy.AccessConfiguration({
-            DC.DEPLOYMENT_MODE : DC.MODE_DAEMON,
-            DC.ROOT_DIRECTORY : self.root_dir,
-            DC.LOG_LEVEL : DC.DEBUG_LEVEL,
-        })
-
-        self._test_simple(
-            daemonize_testbed = False,
-            controller_access_configuration = access_config,
-            use_sfa = True)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_z_simple_ssh(self): # _z_ cause we want it last - it messes up the process :(
-        # Recreate environment
-        environ = ' ; '.join( map("export %s=%r".__mod__, os.environ.iteritems()) )
-
-        env = test_util.test_environment()
-
-        access_config = proxy.AccessConfiguration({
-            DC.DEPLOYMENT_MODE : DC.MODE_DAEMON,
-            DC.ROOT_DIRECTORY : self.root_dir,
-            DC.LOG_LEVEL : DC.DEBUG_LEVEL,
-            DC.DEPLOYMENT_COMMUNICATION : DC.ACCESS_SSH,
-            DC.DEPLOYMENT_PORT : env.port,
-            DC.USE_AGENT : True,
-            DC.DEPLOYMENT_ENVIRONMENT_SETUP : environ,
-        })
-
-        self._test_simple(
-            daemonize_testbed = False,
-            controller_access_configuration = access_config,
-            environ = environ)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_recover(self):
-        self._test_recover(
-            daemonize_testbed = False,
-            controller_access_configuration = None)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_recover_sfa(self):
-        self._test_recover(
-            daemonize_testbed = False,
-            controller_access_configuration = None,
-            use_sfa = True)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_recover_daemonized(self):
-        access_config = proxy.AccessConfiguration({
-            DC.DEPLOYMENT_MODE : DC.MODE_DAEMON,
-            DC.ROOT_DIRECTORY : self.root_dir,
-            DC.LOG_LEVEL : DC.DEBUG_LEVEL,
-        })
-
-        self._test_recover(
-            daemonize_testbed = False,
-            controller_access_configuration = access_config)
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_recover_daemonized_sfa(self):
-        access_config = proxy.AccessConfiguration({
-            DC.DEPLOYMENT_MODE : DC.MODE_DAEMON,
-            DC.ROOT_DIRECTORY : self.root_dir,
-            DC.LOG_LEVEL : DC.DEBUG_LEVEL,
-        })
-
-        self._test_recover(
-            daemonize_testbed = False,
-            controller_access_configuration = access_config,
-            use_sfa = True)
-
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/planetlab/integration_cross.py b/test/testbeds/planetlab/integration_cross.py
deleted file mode 100755 (executable)
index 30143d0..0000000
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC, ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP
-import os
-import re
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class PlanetLabMultiIntegrationTestCase(unittest.TestCase):
-    testbed_id = "planetlab"
-    
-    slicename1 = "inria_nepi"
-    plchost1 = "nepiplc.pl.sophia.inria.fr"
-
-    slicename2 = "inria_nepi12"
-    plchost2 = "www.planet-lab.eu"
-    
-    host1pl1 = "nepi1.pl.sophia.inria.fr"
-    host2pl1 = "nepi2.pl.sophia.inria.fr"
-
-    host1pl2 = "planetlab1.utt.fr"
-    host2pl2 = "planetlab2.utt.fr"
-
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        self.__class__.port_base = self.port_base + 100
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_experiment_desc(self):
-        testbed_id = self.testbed_id
-        
-        slicename1 = self.slicename1
-        plchost1 = self.plchost1
-        
-        slicename2 = self.slicename2
-        plchost2 = self.plchost2
-        
-        pl_ssh_key = os.environ.get(
-            "PL_SSH_KEY",
-            "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-        pl_user, pl_pwd = test_util.pl_auth()
-
-        exp_desc = ExperimentDescription()
-        pl_provider = FactoriesProvider(testbed_id)
-        pl_desc = exp_desc.add_testbed_description(pl_provider)
-        pl_desc.set_attribute_value("homeDirectory", self.root_dir)
-        pl_desc.set_attribute_value("slice", slicename1)
-        pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc.set_attribute_value("authUser", pl_user)
-        pl_desc.set_attribute_value("authPass", pl_pwd)
-        pl_desc.set_attribute_value("plcHost", plchost1)
-        pl_desc.set_attribute_value("tapPortBase", self.port_base)
-        pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-        pl_desc.set_attribute_value("cleanProc", True)
-
-        pl_desc2 = exp_desc.add_testbed_description(pl_provider)
-        pl_desc2.set_attribute_value("homeDirectory", self.root_dir+"v2")
-        pl_desc2.set_attribute_value("slice", slicename2)
-        pl_desc2.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc2.set_attribute_value("authUser", pl_user)
-        pl_desc2.set_attribute_value("authPass", pl_pwd)
-        pl_desc2.set_attribute_value("plcHost", plchost2)
-        pl_desc2.set_attribute_value("tapPortBase", self.port_base+500)
-        pl_desc2.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-        pl_desc2.set_attribute_value("cleanProc", True)
-        
-        return pl_desc, pl_desc2, exp_desc
-    
-    def make_pl_tapnode(self, pl, tapip, hostname, label_prefix):
-        node1 = pl.create("Node")
-        node1.set_attribute_value("hostname", hostname)
-        node1.set_attribute_value("label", label_prefix)
-        iface1 = pl.create("NodeInterface")
-        iface1.set_attribute_value("label", label_prefix+"iface")
-        if tapip:
-            tap1 = pl.create("TapInterface")
-            tap1.enable_trace("packets") # for error output
-            tap1.set_attribute_value("label", label_prefix+"tap")
-            
-            node1.connector("devs").connect(tap1.connector("node"))
-            
-            tap1ip = tap1.add_address()
-            tap1ip.set_attribute_value("Address", tapip)
-            tap1ip.set_attribute_value("NetPrefix", 24)
-            tap1ip.set_attribute_value("Broadcast", False)
-        else:
-            tap1 = None
-            tap1ip = None
-        inet = pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        
-        return node1, iface1, tap1, tap1ip, inet
-
-    def make_ns_in_pl(self, pl, exp, node1, iface1, root):
-        ns3_testbed_id = "ns3"
-        
-        # Add NS3 support in node1
-        plnepi = pl.create("NepiDependency")
-        plns3 = pl.create("NS3Dependency")
-        plnepi.connector("node").connect(node1.connector("deps"))
-        plns3.connector("node").connect(node1.connector("deps"))
-
-        # Create NS3 testbed running in node1
-        ns3_provider = FactoriesProvider(ns3_testbed_id)
-        ns3_desc = exp.add_testbed_description(ns3_provider)
-        ns3_desc.set_attribute_value("rootDirectory", root)
-        ns3_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-        ns3_desc.set_attribute_value("ChecksumEnabled", True)
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_HOST, "{#[%s].addr[0].[Address]#}" % (
-            iface1.get_attribute_value("label"),))
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_USER, 
-            pl.get_attribute_value("slice"))
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_KEY, 
-            pl.get_attribute_value("sliceSSHKey"))
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP,
-            "{#[%s].[%s]#}" % (
-                node1.get_attribute_value("label"),
-                ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,))
-        ns3_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        
-        return ns3_desc
-    
-    
-    def _test_plns3_crossconnect(self, proto):
-        pl, pl2, exp = self.make_experiment_desc()
-        
-        # Create PL node, ifaces, assign addresses
-        node1, iface1, _, _, inet1 = self.make_pl_tapnode(pl, 
-            None, self.host1pl1, "node1")
-        node2, iface2, tap2, tap2ip, inet2 = self.make_pl_tapnode(pl2, 
-            "192.168.2.3", self.host1pl2, "node2")
-
-        # Create NS3 instance in node1
-        # With a node and all required protocols to be pinged
-        ns3 = self.make_ns_in_pl(pl, exp, node1, iface1, "tb-ns-rcross-1")
-
-        ns1 = ns3.create("ns3::Node")
-        ipv41 = ns3.create("ns3::Ipv4L3Protocol")
-        arp1  = ns3.create("ns3::ArpL3Protocol")
-        icmp1 = ns3.create("ns3::Icmpv4L4Protocol")
-        ns1.connector("protos").connect(ipv41.connector("node"))
-        ns1.connector("protos").connect(arp1.connector("node"))
-        ns1.connector("protos").connect(icmp1.connector("node"))
-        ns1if = ns3.create("ns3::FdNetDevice")
-        ns1if.enable_trace("FdPcapTrace")
-        ns1if.set_attribute_value("label", "ns1if")
-        ns1tc = ns3.create("ns3::Nepi::TunChannel")
-        ns1.connector("devs").connect(ns1if.connector("node"))
-        ns1tc.connector("fd->").connect(ns1if.connector("->fd"))
-        ip1 = ns1if.add_address()
-        ip1.set_attribute_value("Address", "192.168.2.2")
-        ip1.set_attribute_value("NetPrefix", 24)
-        ip1.set_attribute_value("Broadcast", False)
-            
-        # Connect the two
-        tap2.connector(proto).connect(ns1tc.connector(proto))
-        
-        # Create PlanetLab ping application, pinging the from one PL to another
-        ping = pl2.create("Application")
-        ping.set_attribute_value("command", "ping -qc10 {#[ns1if].addr[0].[Address]#}")
-        ping.enable_trace("stdout")
-        ping.enable_trace("stderr")
-        ping.connector("node").connect(node2.connector("apps"))
-
-        comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-10 packets transmitted, 10 received, 0% packet loss, time \d*ms.*
-"""
-
-        xml = exp.to_xml()
-
-        controller = ExperimentController(xml, self.root_dir)
-        
-        try:
-            controller.start()
-
-            while not controller.is_finished(ping.guid):
-                time.sleep(0.5)
-              
-            ping_result = controller.trace(ping.guid, "stdout")
-            tap2_trace = controller.trace(tap2.guid, "packets")
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n%s\nTap trace:\n%s\n" % (
-                ping_result,
-                tap2_trace) )
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_plns3_crossconnect_udp(self):
-        self._test_plns3_crossconnect("udp")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_plns3_crossconnect_tcp(self):
-        self._test_plns3_crossconnect("tcp")
-
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/planetlab/integration_multi.py b/test/testbeds/planetlab/integration_multi.py
deleted file mode 100755 (executable)
index e8ed2da..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC, ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP
-import os
-import re
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class PlanetLabMultiIntegrationTestCase(unittest.TestCase):
-    testbed_id = "planetlab"
-    
-    slicename1 = "inria_nepi"
-    plchost1 = "nepiplc.pl.sophia.inria.fr"
-    plcvnet1 = "192.168.2"
-
-    slicename2 = "inria_nepi2"
-    plchost2 = "nepiplc.pl.sophia.inria.fr"
-    plcvnet2 = "192.168.3"
-    
-    host1pl1 = "nepi1.pl.sophia.inria.fr"
-    host2pl1 = "nepi2.pl.sophia.inria.fr"
-
-    host1pl2 = "nepi3.pl.sophia.inria.fr"
-    host2pl2 = "nepi5.pl.sophia.inria.fr"
-
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        self.__class__.port_base = self.port_base + 100
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_experiment_desc(self):
-        testbed_id = self.testbed_id
-        
-        slicename1 = self.slicename1
-        plchost1 = self.plchost1
-        
-        slicename2 = self.slicename2
-        plchost2 = self.plchost2
-        
-        pl_ssh_key = os.environ.get(
-            "PL_SSH_KEY",
-            "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-        pl_user, pl_pwd = test_util.pl_auth()
-
-        exp_desc = ExperimentDescription()
-        pl_provider = FactoriesProvider(testbed_id)
-        pl_desc = exp_desc.add_testbed_description(pl_provider)
-        pl_desc.set_attribute_value("homeDirectory", self.root_dir)
-        pl_desc.set_attribute_value("slice", slicename1)
-        pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc.set_attribute_value("authUser", pl_user)
-        pl_desc.set_attribute_value("authPass", pl_pwd)
-        pl_desc.set_attribute_value("plcHost", plchost1)
-        pl_desc.set_attribute_value("tapPortBase", self.port_base)
-        pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-        pl_desc.set_attribute_value("cleanProc", True)
-        pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-
-        pl_desc2 = exp_desc.add_testbed_description(pl_provider)
-        pl_desc2.set_attribute_value("homeDirectory", self.root_dir+"v2")
-        pl_desc2.set_attribute_value("slice", slicename2)
-        pl_desc2.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc2.set_attribute_value("authUser", pl_user)
-        pl_desc2.set_attribute_value("authPass", pl_pwd)
-        pl_desc2.set_attribute_value("plcHost", plchost2)
-        pl_desc2.set_attribute_value("tapPortBase", self.port_base+500)
-        pl_desc2.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-        pl_desc2.set_attribute_value("cleanProc", True)
-        pl_desc2.set_attribute_value("plLogLevel", "DEBUG")
-        
-        return pl_desc, pl_desc2, exp_desc
-    
-    def make_pl_tapnode(self, pl, tapip, hostname, label_prefix):
-        node1 = pl.create("Node")
-        node1.set_attribute_value("hostname", hostname)
-        node1.set_attribute_value("label", label_prefix)
-        iface1 = pl.create("NodeInterface")
-        iface1.set_attribute_value("label", label_prefix+"iface")
-        tap1 = pl.create("TapInterface")
-        tap1.enable_trace("packets") # for error output
-        tap1.set_attribute_value("label", label_prefix+"tap")
-        inet = pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        node1.connector("devs").connect(tap1.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        
-        tap1ip = tap1.add_address()
-        tap1ip.set_attribute_value("Address", tapip)
-        tap1ip.set_attribute_value("NetPrefix", 24)
-        tap1ip.set_attribute_value("Broadcast", False)
-        
-        return node1, iface1, tap1, tap1ip, inet
-    
-    def _test_plpl_crossconnect(self, proto, recover = False):
-        pl, pl2, exp = self.make_experiment_desc()
-        
-        if recover:
-            pl.set_attribute_value(DC.RECOVERY_POLICY, DC.POLICY_RECOVER)
-            pl2.set_attribute_value(DC.RECOVERY_POLICY, DC.POLICY_RECOVER)
-        
-        # Create PL node, ifaces, assign addresses
-        node1, iface1, tap1, tap1ip, inet1 = self.make_pl_tapnode(pl, 
-            self.plcvnet1+".2", self.host1pl1, "node1")
-        node2, iface2, tap2, tap2ip, inet2 = self.make_pl_tapnode(pl2, 
-            self.plcvnet2+".3", self.host1pl2, "node2")
-            
-        # Connect the two
-        tap1.connector(proto).connect(tap2.connector(proto))
-        tap1.set_attribute_value("pointopoint", "{#[node2tap].addr[0].[Address]#}")
-        tap2.set_attribute_value("pointopoint", "{#[node1tap].addr[0].[Address]#}")
-        
-        # Disable encryption for GRE
-        if proto == "gre":
-            tap1.set_attribute_value("tun_cipher", "PLAIN")
-            tap2.set_attribute_value("tun_cipher", "PLAIN")
-        
-        # Create PlanetLab ping application, pinging the from one PL to another
-        ping = pl.create("Application")
-        ping.set_attribute_value("command", "ping -qc10 {#[node2tap].addr[0].[Address]#}")
-        ping.enable_trace("stdout")
-        ping.enable_trace("stderr")
-        ping.connector("node").connect(node1.connector("apps"))
-
-        comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-10 packets transmitted, 10 received, 0% packet loss, time \d*ms.*
-"""
-
-        xml = exp.to_xml()
-
-        try:
-            controller = ExperimentController(xml, self.root_dir)
-            controller.start()
-            
-            if recover:
-                controller = None
-                controller = ExperimentController(None, self.root_dir)
-                controller.recover()
-
-            while not controller.is_finished(ping.guid):
-                time.sleep(0.5)
-              
-            ping_result = controller.trace(ping.guid, "stdout")
-            tap_trace = controller.trace(tap1.guid, "packets")
-            tap2_trace = controller.trace(tap2.guid, "packets")
-        
-        finally:
-            if controller is not None:
-                try:
-                    controller.stop()
-                except:
-                    import traceback
-                    traceback.print_exc()
-                try:
-                    controller.shutdown()
-                except:
-                    import traceback
-                    traceback.print_exc()
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n%s\nTap trace at origin:\n%s\nTap trace at destination:\n%s\n" % (
-                ping_result,
-                tap_trace,
-                tap2_trace) )
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_plpl_crossconnect_udp(self):
-        self._test_plpl_crossconnect("udp")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_plpl_crossconnect_tcp(self):
-        self._test_plpl_crossconnect("tcp")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_plpl_crossconnect_gre(self):
-        self._test_plpl_crossconnect("gre")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    def test_plpl_crossconnect_udp_recover(self):
-        self._test_plpl_crossconnect("udp", 
-            recover = True)
-
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/testbeds/planetlab/integration_ns3.py b/test/testbeds/planetlab/integration_ns3.py
deleted file mode 100755 (executable)
index a737732..0000000
+++ /dev/null
@@ -1,488 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import getpass
-from nepi.core.design import ExperimentDescription, FactoriesProvider
-from nepi.core.execute import ExperimentController
-from nepi.util import proxy
-from nepi.util.constants import DeploymentConfiguration as DC, ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP
-import os
-import re
-import shutil
-import tempfile
-import test_util
-import time
-import unittest
-
-class PlanetLabCrossIntegrationTestCase(unittest.TestCase):
-    testbed_id = "planetlab"
-    slicename = "inria_nepi"
-    plchost = "nepiplc.pl.sophia.inria.fr"
-    
-    host1 = "nepi1.pl.sophia.inria.fr"
-    host2 = "nepi2.pl.sophia.inria.fr"
-    host3 = "nepi3.pl.sophia.inria.fr"
-    host4 = "nepi5.pl.sophia.inria.fr"
-
-    port_base = 2000 + (os.getpid() % 1000) * 13
-    
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        self.__class__.port_base = self.port_base + 100
-
-    def tearDown(self):
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def make_experiment_desc(self):
-        testbed_id = self.testbed_id
-        slicename = self.slicename
-        plchost = self.plchost
-        pl_ssh_key = os.environ.get(
-            "PL_SSH_KEY",
-            "%s/.ssh/id_rsa_planetlab" % (os.environ['HOME'],) )
-        pl_user, pl_pwd = test_util.pl_auth()
-
-        exp_desc = ExperimentDescription()
-        pl_provider = FactoriesProvider(testbed_id)
-        pl_desc = exp_desc.add_testbed_description(pl_provider)
-        pl_desc.set_attribute_value("homeDirectory", self.root_dir)
-        pl_desc.set_attribute_value("slice", slicename)
-        pl_desc.set_attribute_value("sliceSSHKey", pl_ssh_key)
-        pl_desc.set_attribute_value("authUser", pl_user)
-        pl_desc.set_attribute_value("authPass", pl_pwd)
-        pl_desc.set_attribute_value("plcHost", plchost)
-        pl_desc.set_attribute_value("tapPortBase", self.port_base)
-        pl_desc.set_attribute_value("p2pDeployment", False) # it's interactive, we don't want it in tests
-        pl_desc.set_attribute_value("cleanProc", True)
-        #pl_desc.set_attribute_value("plLogLevel", "DEBUG")
-        
-        return pl_desc, exp_desc
-    
-    def make_pl_tapnode(self, pl, ip = "192.168.2.2", inet = None, label = "node1", hostname = None):
-        if not isinstance(ip, list):
-            ips = [ip]
-        else:
-            ips = ip
-        node1 = pl.create("Node")
-        node1.set_attribute_value("hostname", hostname or self.host1)
-        node1.set_attribute_value("label", label)
-        iface1 = pl.create("NodeInterface")
-        iface1.set_attribute_value("label", label+"iface")
-        tap1 = []
-        tap1ip = []
-        for i,ip in enumerate(ips):
-            _tap1 = pl.create("TapInterface")
-            _tap1.enable_trace("packets") # for error output
-            _tap1.set_attribute_value("label", label+"tap"+(str(i+1) if i else ""))
-        
-            _tap1ip = _tap1.add_address()
-            _tap1ip.set_attribute_value("Address", ip)
-            _tap1ip.set_attribute_value("NetPrefix", 24)
-            _tap1ip.set_attribute_value("Broadcast", False)
-        
-            node1.connector("devs").connect(_tap1.connector("node"))
-            
-            tap1.append(_tap1)
-            tap1ip.append(_tap1ip)
-            
-        inet = inet or pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        
-        if len(tap1) == 1:
-            tap1 = tap1[0]
-        if len(tap1ip) == 1:
-            tap1ip = tap1ip[0]
-        
-        return node1, iface1, tap1, tap1ip, inet
-    
-    def make_ns_in_pl(self, pl, exp, node1, iface1, root):
-        ns3_testbed_id = "ns3"
-        
-        # Add NS3 support in node1
-        plnepi = pl.create("NepiDependency")
-        plns3 = pl.create("NS3Dependency")
-        plnepi.connector("node").connect(node1.connector("deps"))
-        plns3.connector("node").connect(node1.connector("deps"))
-
-        # Create NS3 testbed running in node1
-        ns3_provider = FactoriesProvider(ns3_testbed_id)
-        ns3_desc = exp.add_testbed_description(ns3_provider)
-        ns3_desc.set_attribute_value("rootDirectory", root)
-        ns3_desc.set_attribute_value("SimulatorImplementationType", "ns3::RealtimeSimulatorImpl")
-        ns3_desc.set_attribute_value("ChecksumEnabled", True)
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_HOST, "{#[%s].addr[0].[Address]#}" % (
-            iface1.get_attribute_value("label"),))
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_USER, 
-            pl.get_attribute_value("slice"))
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_KEY, 
-            pl.get_attribute_value("sliceSSHKey"))
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_MODE, DC.MODE_DAEMON)
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_COMMUNICATION, DC.ACCESS_SSH)
-        ns3_desc.set_attribute_value(DC.DEPLOYMENT_ENVIRONMENT_SETUP,
-            "{#[%s].[%s]#}" % (
-                node1.get_attribute_value("label"),
-                ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,))
-        ns3_desc.set_attribute_value(DC.LOG_LEVEL, DC.DEBUG_LEVEL)
-        
-        return ns3_desc
-    
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_ns3_in_pl(self):
-        ns3_testbed_id = "ns3"
-        
-        pl, exp = self.make_experiment_desc()
-        
-        node1 = pl.create("Node")
-        node1.set_attribute_value("hostname", self.host1)
-        node1.set_attribute_value("label", "node1")
-        iface1 = pl.create("NodeInterface")
-        iface1.set_attribute_value("label", "node1iface")
-        inet = pl.create("Internet")
-        node1.connector("devs").connect(iface1.connector("node"))
-        iface1.connector("inet").connect(inet.connector("devs"))
-        
-        # Add NS3 support in node1
-        ns3_desc = self.make_ns_in_pl(pl, exp, node1, iface1, "tb-ns3-1")
-
-        xml = exp.to_xml()
-
-        try:
-            controller = ExperimentController(xml, self.root_dir)
-            controller.start()
-            # just test that it starts...
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_ns3_in_pl_crossconnect(self):
-        pl, exp = self.make_experiment_desc()
-        
-        # Create PL node, ifaces, assign addresses
-        node1, iface1, tap1, tap1ip, inet = self.make_pl_tapnode(pl)
-        
-        # Add NS3 support in node1
-        ns3_desc = self.make_ns_in_pl(pl, exp, node1, iface1, "tb-ns3-2")
-        
-        # Create NS3 node that is responsive to pings, connected
-        # to node1 through the Tap interface
-        ns1 = ns3_desc.create("ns3::Node")
-        ipv41 = ns3_desc.create("ns3::Ipv4L3Protocol")
-        arp1  = ns3_desc.create("ns3::ArpL3Protocol")
-        icmp1 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-        ns1.connector("protos").connect(ipv41.connector("node"))
-        ns1.connector("protos").connect(arp1.connector("node"))
-        ns1.connector("protos").connect(icmp1.connector("node"))
-        ns1if = ns3_desc.create("ns3::FdNetDevice")
-        ns1if.enable_trace("FdPcapTrace")
-        ns1if.set_attribute_value("label", "ns1if")
-        ns1.connector("devs").connect(ns1if.connector("node"))
-        tap1.connector("fd->").connect(ns1if.connector("->fd"))
-        tap1.set_attribute_value("tun_cipher", "PLAIN")
-        ip1 = ns1if.add_address()
-        ip1.set_attribute_value("Address", "192.168.2.3")
-        ip1.set_attribute_value("NetPrefix", 24)
-        ip1.set_attribute_value("Broadcast", False)
-
-        # Create PlanetLab ping application, pinging the NS3 node
-        ping = pl.create("Application")
-        ping.set_attribute_value("command", "ping -qc10 {#[ns1if].addr[0].[Address]#}")
-        ping.enable_trace("stdout")
-        ping.enable_trace("stderr")
-        ping.connector("node").connect(node1.connector("apps"))
-
-        comp_result = r"""PING .* \(.*\) \d*\(\d*\) bytes of data.
-
---- .* ping statistics ---
-10 packets transmitted, 10 received, 0% packet loss, time \d*ms.*
-"""
-
-        xml = exp.to_xml()
-
-        try:
-            controller = ExperimentController(xml, self.root_dir)
-            controller.start()
-
-            while not controller.is_finished(ping.guid):
-                time.sleep(0.5)
-              
-            ping_result = controller.trace(ping.guid, "stdout")
-            tap_trace = controller.trace(tap1.guid, "packets")
-
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-
-        # asserts at the end, to make sure there's proper cleanup
-        self.assertTrue(re.match(comp_result, ping_result, re.MULTILINE),
-            "Unexpected trace:\n%s\nTap trace:\n%s\n" % (
-                ping_result,
-                tap_trace) )
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_ns3_in_pl_snat(self):
-        pl, exp = self.make_experiment_desc()
-        
-        # Create PL node, ifaces, assign addresses
-        node1, iface1, tap1, tap1ip, inet = self.make_pl_tapnode(pl)
-        
-        # Add NS3 support in node1
-        ns3_desc = self.make_ns_in_pl(pl, exp, node1, iface1, "tb-ns3-3")
-        
-        # Enable SNAT
-        tap1.set_attribute_value("snat", True)
-        
-        # Add second PL node (ping target)
-        node2 = pl.create("Node")
-        node2.set_attribute_value("hostname", self.host2)
-        node2.set_attribute_value("label", "node2")
-        iface2 = pl.create("NodeInterface")
-        iface2.set_attribute_value("label", "node2iface")
-        node2.connector("devs").connect(iface2.connector("node"))
-        iface2.connector("inet").connect(inet.connector("devs"))
-        
-        # Create NS3 node that is responsive to pings, connected
-        # to node1 through the Tap interface
-        ns1 = ns3_desc.create("ns3::Node")
-        ipv41 = ns3_desc.create("ns3::Ipv4L3Protocol")
-        arp1  = ns3_desc.create("ns3::ArpL3Protocol")
-        icmp1 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-        ns1.connector("protos").connect(ipv41.connector("node"))
-        ns1.connector("protos").connect(arp1.connector("node"))
-        ns1.connector("protos").connect(icmp1.connector("node"))
-        ns1if = ns3_desc.create("ns3::FdNetDevice")
-        ns1if.enable_trace("FdPcapTrace")
-        ns1if.set_attribute_value("label", "ns1if")
-        ns1.connector("devs").connect(ns1if.connector("node"))
-        tap1.connector("fd->").connect(ns1if.connector("->fd"))
-        tap1.set_attribute_value("tun_cipher", "PLAIN")
-        ip1 = ns1if.add_address()
-        ip1.set_attribute_value("Address", "192.168.2.3")
-        ip1.set_attribute_value("NetPrefix", 24)
-        ip1.set_attribute_value("Broadcast", False)
-        
-        # Add default route to the PL node
-        r1 = ns1.add_route()
-        r1.set_attribute_value("Destination", "0.0.0.0")
-        r1.set_attribute_value("NetPrefix", 0)
-        r1.set_attribute_value("NextHop", "192.168.2.2")
-
-        # Create NS3 ping application, pinging the PL node
-        ping = ns3_desc.create("ns3::V4Ping")
-        ping.set_attribute_value("Remote", "{#[node2iface].addr[0].[Address]#}")
-        ping.set_attribute_value("StartTime", "0s")
-        ping.set_attribute_value("StopTime", "10s")
-        ping.connector("node").connect(ns1.connector("apps"))
-
-        xml = exp.to_xml()
-
-        try:
-            controller = ExperimentController(xml, self.root_dir)
-            controller.start()
-
-            while not controller.is_finished(ping.guid):
-                time.sleep(0.5)
-              
-            tap_trace = controller.trace(tap1.guid, "packets")
-
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-        
-        # asserts at the end, to make sure there's proper cleanup
-        sent = 0
-        replied = 0
-        for seq in xrange(10):
-            re_send = r""".*
-[0-9.:]* IP 192.168.2.3 > (\d*\.){3}\d*: ICMP echo request, id 0, seq %(seq)d, length \d*
-.*""" % dict(seq=seq)
-
-            re_reply = r""".*
-[0-9.:]* IP 192.168.2.3 > (\d*\.){3}\d*: ICMP echo request, id 0, seq %(seq)d, length \d*.*
-[0-9.:]* IP (\d*\.){3}\d* > 192.168.2.3: ICMP echo reply, id 0, seq %(seq)d, length \d*
-.*""" % dict(seq=seq)
-
-            sent += bool(re.match(re_send, tap_trace, re.MULTILINE|re.DOTALL))
-            replied += bool(re.match(re_reply, tap_trace, re.MULTILINE|re.DOTALL))
-
-        self.assertTrue(sent == replied and sent > 5,
-            "Unexpected trace:\n%s\n" % (
-                tap_trace,) )
-
-    def _test_ns3_in_pl_p2p(self, proto):
-        pl, exp = self.make_experiment_desc()
-        
-        # Create PL node, ifaces, assign addresses
-        node1, iface1, (tap0,tap1), (tap0ip,tap1ip), inet = self.make_pl_tapnode(pl, 
-            label="node1", hostname = self.host1,
-            ip=["192.168.2.2","192.168.2.5"])
-        node2, iface2, (tap2,tap3), (tap2ip,tap3ip), inet = self.make_pl_tapnode(pl, inet=inet, 
-            label="node2", hostname = self.host2,
-            ip=["192.168.2.6","192.168.2.9"])
-        node3, iface3, tap4, tap4ip, inet = self.make_pl_tapnode(pl, inet=inet, 
-            label="node3", hostname = self.host3,
-            ip="192.168.2.10")
-        
-        # Add NS3 support in node1
-        ns3_desc = self.make_ns_in_pl(pl, exp, node1, iface1, "tb-ns3-4-%s" % (proto,))
-        
-        # Configure P2P links
-        tap0.set_attribute_value("pointopoint", "192.168.2.1") # cross-p2p is not automatic
-        tap1.connector(proto).connect(tap2.connector(proto))
-        tap3.connector(proto).connect(tap4.connector(proto))
-        
-        # Configure routes
-        r = node1.add_route()
-        r.set_attribute_value("Destination", "192.168.2.8")
-        r.set_attribute_value("NetPrefix", 29)
-        r.set_attribute_value("NextHop", "192.168.2.6")
-
-        r = node2.add_route()
-        r.set_attribute_value("Destination", "192.168.2.0")
-        r.set_attribute_value("NetPrefix", 29)
-        r.set_attribute_value("NextHop", "192.168.2.5")
-
-        r = node3.add_route()
-        r.set_attribute_value("Destination", "192.168.2.0")
-        r.set_attribute_value("NetPrefix", 29)
-        r.set_attribute_value("NextHop", "192.168.2.9")
-        
-        # Create NS3 node that is responsive to pings, connected
-        # to node1 through the Tap interface
-        ns1 = ns3_desc.create("ns3::Node")
-        ipv41 = ns3_desc.create("ns3::Ipv4L3Protocol")
-        arp1  = ns3_desc.create("ns3::ArpL3Protocol")
-        icmp1 = ns3_desc.create("ns3::Icmpv4L4Protocol")
-        ns1.connector("protos").connect(ipv41.connector("node"))
-        ns1.connector("protos").connect(arp1.connector("node"))
-        ns1.connector("protos").connect(icmp1.connector("node"))
-        ns1if = ns3_desc.create("ns3::FdNetDevice")
-        ns1if.enable_trace("FdPcapTrace")
-        ns1if.set_attribute_value("label", "ns1if")
-        ns1.connector("devs").connect(ns1if.connector("node"))
-        tap0.connector("fd->").connect(ns1if.connector("->fd"))
-        tap0.set_attribute_value("tun_cipher", "PLAIN")
-        ip1 = ns1if.add_address()
-        ip1.set_attribute_value("Address", "192.168.2.1")
-        ip1.set_attribute_value("NetPrefix", 30)
-        ip1.set_attribute_value("Broadcast", False)
-        
-        # Add default route to the PL node
-        r1 = ns1.add_route()
-        r1.set_attribute_value("Destination", "0.0.0.0")
-        r1.set_attribute_value("NetPrefix", 0)
-        r1.set_attribute_value("NextHop", "192.168.2.2")
-
-        # Create NS3 ping application, pinging the PL node
-        ping = ns3_desc.create("ns3::V4Ping")
-        ping.set_attribute_value("Remote", "{#[node3tap].addr[0].[Address]#}")
-        ping.set_attribute_value("StartTime", "0s")
-        ping.set_attribute_value("StopTime", "10s")
-        ping.connector("node").connect(ns1.connector("apps"))
-
-        xml = exp.to_xml()
-
-        try:
-            controller = ExperimentController(xml, self.root_dir)
-            controller.start()
-
-            while not controller.is_finished(ping.guid):
-                time.sleep(0.5)
-
-            tap_trace = []
-            for i,tap in enumerate([ tap0, tap1, tap2, tap3, tap4 ]):
-                tap_trace.append("\nTrace for tap%d:\n" % i)
-                tap_trace.append(controller.trace(tap.guid, "packets"))
-            tap_trace = "".join(tap_trace)
-            tap0_trace = controller.trace(tap0.guid, "packets")
-
-        finally:
-            try:
-                controller.stop()
-            except:
-                import traceback
-                traceback.print_exc()
-            try:
-                controller.shutdown()
-            except:
-                import traceback
-                traceback.print_exc()
-        
-        # asserts at the end, to make sure there's proper cleanup
-        sent = 0
-        replied = 0
-        for seq in xrange(10):
-            re_send = r""".*
-[0-9.:]* IP 192.168.2.1 > (\d*\.){3}\d*: ICMP echo request, id 0, seq %(seq)d, length \d*
-.*""" % dict(seq=seq)
-
-            re_reply = r""".*
-[0-9.:]* IP 192.168.2.1 > (\d*\.){3}\d*: ICMP echo request, id 0, seq %(seq)d, length \d*.*
-[0-9.:]* IP (\d*\.){3}\d* > 192.168.2.1: ICMP echo reply, id 0, seq %(seq)d, length \d*
-.*""" % dict(seq=seq)
-
-            sent += bool(re.match(re_send, tap0_trace, re.MULTILINE|re.DOTALL))
-            replied += bool(re.match(re_reply, tap0_trace, re.MULTILINE|re.DOTALL))
-
-        self.assertTrue(replied >= sent/2 and sent > 5,
-            "Unexpected trace:\n%s\n" % (
-                tap_trace,) )
-
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_ns3_in_pl_p2p_udp(self):
-        self._test_ns3_in_pl_p2p("udp")
-
-    @test_util.skipUnless(test_util.pl_auth() is not None, 
-        "Test requires PlanetLab authentication info (PL_USER and PL_PASS environment variables)")
-    @test_util.skipUnless(os.environ.get('NEPI_FULL_TESTS','').lower() in ('1','yes','true','on'),
-        "Test is expensive, requires NEPI_FULL_TESTS=yes")
-    def test_ns3_in_pl_p2p_tcp(self):
-        self._test_ns3_in_pl_p2p("tcp")
-
-if __name__ == '__main__':
-    unittest.main()
-
diff --git a/test/util/parser.py b/test/util/parser.py
new file mode 100755 (executable)
index 0000000..44cac7a
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+from nepi.design.box import Box 
+from nepi.util.parser import XMLParser
+
+import unittest
+
+class BoxDesignTestCase(unittest.TestCase):
+    def test_to_xml(self):
+        node1 = Box()
+        node2 = Box()
+
+        node1.label = "node1"
+        node2.label = "node2"
+
+        node1.connect(node2)
+
+        node1.a.dog = "cat"
+        node1.a.one = "two"
+        node1.a.t = "q"
+
+        node1.c.node2.a.sky = "sea"
+        node2.a.bee = "honey"
+
+        node1.tadd("unooo")
+        node2.tadd("dosss")
+
+        parser = XMLParser()
+        xml = parser.to_xml(node1)
+        
+        node = parser.from_xml(xml)
+        xml2 = parser.to_xml(node)
+        
+        self.assertEquals(xml, xml2)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/util/plot.py b/test/util/plot.py
new file mode 100755 (executable)
index 0000000..806ae5d
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+from nepi.design.box import Box 
+from nepi.util.plot import Plotter
+
+import subprocess
+import unittest
+
+class BoxPlotTestCase(unittest.TestCase):
+    def xtest_plot(self):
+        """ XXX: This test is interactive, it will open an evince instance,
+        so it should not run automatically """
+        node1 = Box(label="node1")
+        ping1 = Box(label="ping")
+        mobility1 = Box(label="mob1")
+        node2 = Box(label="node2")
+        mobility2 = Box(label="mob2")
+        iface1 = Box(label="iface1")
+        iface2 = Box(label="iface2")
+        channel = Box(label="chan")
+
+        node1.connect(ping1)
+        node1.connect(mobility1)
+        node1.connect(iface1)
+        channel.connect(iface1)
+        channel.connect(iface2)
+        node2.connect(iface2)
+        node2.connect(mobility2)
+
+        plotter = Plotter(node1)
+        fname = plotter.plot()
+        subprocess.call(["dot", "-Tps", fname, "-o", "%s.ps"%fname])
+        subprocess.call(["evince","%s.ps"%fname])
+       
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/util/server.py b/test/util/server.py
deleted file mode 100755 (executable)
index 1e079e2..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from nepi.util import server
-from nepi.util.constants import DeploymentConfiguration as DC
-
-import getpass
-import os
-import shutil
-import sys
-import tempfile
-import test_util
-import unittest
-import time
-
-class ServerTestCase(unittest.TestCase):
-    def setUp(self):
-        self.root_dir = tempfile.mkdtemp()
-        
-        # Silence the server
-        self.stderr = sys.stderr
-        sys.stderr = open("/dev/null","r+b")
-
-    def tearDown(self):
-        sys.stderr = self.stderr
-        try:
-            shutil.rmtree(self.root_dir)
-        except:
-            # retry
-            time.sleep(0.1)
-            shutil.rmtree(self.root_dir)
-
-    def test_server(self):
-        s = server.Server(self.root_dir)
-        s.run()
-        c = server.Client(self.root_dir)
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-    def test_server_reconnect(self):
-        s = server.Server(self.root_dir)
-        s.run()
-        c = server.Client(self.root_dir)
-        
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        
-        # disconnect
-        del c
-        
-        # reconnect
-        c = server.Client(self.root_dir)
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-                
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-    def test_server_auto_reconnect(self):
-        s = server.Server(self.root_dir)
-        s.run()
-        c = server.Client(self.root_dir)
-        
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        
-        # purposedly break the connection
-        c._process.stdin.close()
-        c._process.stdout.close()
-        c._process.stderr.close()
-        
-        # assert that the communication works (possible with auto-reconnection)
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-                
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-    def test_server_long_message(self):
-        s = server.Server(self.root_dir)
-        s.run()
-        c = server.Client(self.root_dir)
-        msg = "1"*1145
-        c.send_msg(msg)
-        reply = c.read_reply()
-        self.assertEqual(reply, ("Reply to: "+msg))
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-    @test_util.skipUnless(os.getuid() == 0, "Test requires root privileges")
-    def test_sudo_server(self):
-        env = test_util.test_environment()
-        user = getpass.getuser()
-        # launch server
-        python_code = "from nepi.util import server;s=server.Server('%s');\
-                s.run()" % self.root_dir
-        server.popen_python(python_code, 
-                sudo = True)
-        c = server.Client(self.root_dir, 
-                sudo = True)
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-
-    def test_ssh_server(self):
-        env = test_util.test_environment()
-        user = getpass.getuser()
-        # launch server
-        python_code = "from nepi.util import server;s=server.Server('%s');\
-                s.run()" % self.root_dir
-        server.popen_python(python_code, 
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port, 
-                user = user, 
-                agent = True)
-        c = server.Client(self.root_dir, 
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port,
-                user = user, 
-                agent = True)
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-    def test_ssh_server_reconnect(self):
-        env = test_util.test_environment()
-        user = getpass.getuser()
-        # launch server
-        python_code = "from nepi.util import server;s=server.Server('%s');\
-                s.run()" % self.root_dir
-        server.popen_python(python_code, 
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port, 
-                user = user, 
-                agent = True)
-        
-        c = server.Client(self.root_dir, 
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port,
-                user = user, 
-                agent = True)
-                
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        
-        # disconnect
-        del c
-        
-        # reconnect
-        c = server.Client(self.root_dir,
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port,
-                user = user, 
-                agent = True)
-                
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-    def test_ssh_server_auto_reconnect(self):
-        env = test_util.test_environment()
-        user = getpass.getuser()
-        # launch server
-        python_code = "from nepi.util import server;s=server.Server('%s');\
-                s.run()" % self.root_dir
-        server.popen_python(python_code, 
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port, 
-                user = user, 
-                agent = True)
-        
-        c = server.Client(self.root_dir, 
-                communication = DC.ACCESS_SSH,
-                host = "localhost", 
-                port = env.port,
-                user = user, 
-                agent = True)
-                
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        
-        # purposedly break the connection
-        c._process.stdin.close()
-        c._process.stdout.close()
-        c._process.stderr.close()
-        
-        # assert that the communication works (possible with auto-reconnection)
-        c.send_msg("Hola")
-        reply = c.read_reply()
-        self.assertEqual(reply, "Reply to: Hola")
-        
-        c.send_stop()
-        reply = c.read_reply()
-        self.assertEqual(reply, "Stopping server")
-
-if __name__ == '__main__':
-    unittest.main()
-
similarity index 55%
rename from test/lib/test_util.py
rename to test/util/sshfuncs.py
index fbecdfc..f860214 100644 (file)
@@ -1,52 +1,19 @@
-import nepi.util.environ
-import ctypes
-import imp
-import sys
-
-# Unittest from Python 2.6 doesn't have these decorators
-def _bannerwrap(f, text):
-    name = f.__name__
-    def banner(*args, **kwargs):
-        sys.stderr.write("*** WARNING: Skipping test %s: `%s'\n" %
-                (name, text))
-        return None
-    return banner
-def skip(text):
-    return lambda f: _bannerwrap(f, text)
-def skipUnless(cond, text):
-    return (lambda f: _bannerwrap(f, text)) if not cond else lambda f: f
-def skipIf(cond, text):
-    return (lambda f: _bannerwrap(f, text)) if cond else lambda f: f
-
-def ns3_bindings_path():
-    if "NEPI_NS3BINDINGS" in os.environ:
-        return os.environ["NEPI_NS3BINDINGS"]
-    return None
-
-def ns3_library_path():
-    if "NEPI_NS3LIBRARY" in os.environ:
-        return os.environ["NEPI_NS3LIBRARY"]
-    return None
-
-def ns3_usable():
-    try:
-        from nepi.testbeds.ns3 import execute
-        execute.load_ns3_module()
-    except:
-        import traceback
-        import sys
-        traceback.print_exc(file = sys.stderr)
-        return False
-    return True
-
-def pl_auth():
-    user = os.environ.get('PL_USER')
-    pwd = os.environ.get('PL_PASS')
-     
-    if user and pwd:
-        return (user,pwd)
-    else:
-        return None
+#!/usr/bin/env python
+
+from nepi.util.sshfuncs import rexec, rcopy, rspawn, rcheckpid, rstatus, rkill,\
+        RUNNING, FINISHED 
+
+import getpass
+import unittest
+import os
+import subprocess
+import re
+import signal
+import shutil
+import socket
+import subprocess
+import tempfile
+import time
 
 def find_bin(name, extra_path = None):
     search = []
@@ -74,17 +41,14 @@ def find_bin_or_die(name, extra_path = None):
                 "continue.") % name)
     return r
 
-# SSH stuff
-
-import os, os.path, re, signal, shutil, socket, subprocess, tempfile
 def gen_ssh_keypair(filename):
-    ssh_keygen = nepi.util.environ.find_bin_or_die("ssh-keygen")
+    ssh_keygen = find_bin_or_die("ssh-keygen")
     args = [ssh_keygen, '-q', '-N', '', '-f', filename]
     assert subprocess.Popen(args).wait() == 0
     return filename, "%s.pub" % filename
 
 def add_key_to_agent(filename):
-    ssh_add = nepi.util.environ.find_bin_or_die("ssh-add")
+    ssh_add = find_bin_or_die("ssh-add")
     args = [ssh_add, filename]
     null = file("/dev/null", "w")
     assert subprocess.Popen(args, stderr = null).wait() == 0
@@ -131,7 +95,7 @@ def gen_auth_keys(pubkey, output, environ):
     return output
 
 def start_ssh_agent():
-    ssh_agent = nepi.util.environ.find_bin_or_die("ssh-agent")
+    ssh_agent = find_bin_or_die("ssh-agent")
     proc = subprocess.Popen([ssh_agent], stdout = subprocess.PIPE)
     (out, foo) = proc.communicate()
     assert proc.returncode == 0
@@ -148,7 +112,7 @@ def start_ssh_agent():
 def stop_ssh_agent(data):
     # No need to gather the pid, ssh-agent knows how to kill itself; after we
     # had set up the environment
-    ssh_agent = nepi.util.environ.find_bin_or_die("ssh-agent")
+    ssh_agent = find_bin_or_die("ssh-agent")
     null = file("/dev/null", "w")
     proc = subprocess.Popen([ssh_agent, "-k"], stdout = null)
     null.close()
@@ -160,16 +124,6 @@ class test_environment(object):
     def __init__(self):
         sshd = find_bin_or_die("sshd")
         environ = {}
-        if 'PYTHONPATH' in os.environ:
-            environ['PYTHONPATH'] = ":".join(map(os.path.realpath, 
-                os.environ['PYTHONPATH'].split(":")))
-        if 'NEPI_NS3BINDINGS' in os.environ:
-            environ['NEPI_NS3BINDINGS'] = \
-                    os.path.realpath(os.environ['NEPI_NS3BINDINGS'])
-        if 'NEPI_NS3LIBRARY' in os.environ:
-            environ['NEPI_NS3LIBRARY'] = \
-                    os.path.realpath(os.environ['NEPI_NS3LIBRARY'])
-
         self.dir = tempfile.mkdtemp()
         self.server_keypair = gen_ssh_keypair(
                 os.path.join(self.dir, "server_key"))
@@ -194,3 +148,99 @@ class test_environment(object):
             stop_ssh_agent(self.ssh_agent_vars)
         shutil.rmtree(self.dir)
 
+class SSHfuncsTestCase(unittest.TestCase):
+    def test_rexec(self):
+        env = test_environment()
+        user = getpass.getuser()
+        host = "localhost" 
+
+        command = "hostname"
+
+        plocal = subprocess.Popen(command, stdout=subprocess.PIPE, 
+                stdin=subprocess.PIPE)
+        outlocal, errlocal = plocal.communicate()
+
+        (outremote, errrmote), premote = rexec(command, host, user, 
+                port = env.port, agent = True)
+
+        self.assertEquals(outlocal, outremote)
+
+    def test_rcopy(self):
+        env = test_environment()
+        user = getpass.getuser()
+        host = "localhost"
+
+        # create some temp files and directories to copy
+        dirpath = tempfile.mkdtemp()
+        f = tempfile.NamedTemporaryFile(dir=dirpath, delete=False)
+        f.close()
+      
+        f1 = tempfile.NamedTemporaryFile(delete=False)
+        f1.close()
+        f1.name
+
+        source = [dirpath, f1.name]
+        destdir = tempfile.mkdtemp()
+        dest = "%s@%s:%s" % (user, host, destdir)
+        rcopy(source, dest, port = env.port, agent = True, recursive = True)
+
+        files = []
+        def recls(files, dirname, names):
+            files.extend(names)
+        os.path.walk(destdir, recls, files)
+        
+        origfiles = map(lambda s: os.path.basename(s), [dirpath, f.name, f1.name])
+
+        self.assertEquals(sorted(origfiles), sorted(files))
+
+    def test_rproc_manage(self):
+        env = test_environment()
+        user = getpass.getuser()
+        host = "localhost" 
+        command = "ping localhost"
+        
+        f = tempfile.NamedTemporaryFile(delete=False)
+        pidfile = f.name 
+
+        (out,err), proc = rspawn(
+                command, 
+                pidfile,
+                host = host,
+                user = user,
+                port = env.port,
+                agent = True)
+
+        time.sleep(2)
+
+        (pid, ppid) = rcheckpid(pidfile,
+                host = host,
+                user = user,
+                port = env.port,
+                agent = True)
+
+        status = rstatus(pid, ppid,
+                host = host,
+                user = user, 
+                port = env.port, 
+                agent = True)
+
+        self.assertEquals(status, RUNNING)
+
+        rkill(pid, ppid,
+                host = host,
+                user = user, 
+                port = env.port, 
+                agent = True)
+
+        status = rstatus(pid, ppid,
+                host = host,
+                user = user, 
+                port = env.port, 
+                agent = True)
+        
+        self.assertEquals(status, FINISHED)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/test/util/tunchannel.py b/test/util/tunchannel.py
deleted file mode 100644 (file)
index 7bd3176..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-
-from nepi.util import tunchannel
-import socket
-import time
-import threading
-import unittest
-
-class TunnChannelTestCase(unittest.TestCase):
-    def test_send_suspend_terminate(self):
-        def tun_fwd(local, remote, TERMINATE, SUSPEND, STOPPED):
-            tunchannel.tun_fwd(local, remote, True, True, None, True,
-                TERMINATE, SUSPEND, None)
-            STOPPED.append(None)
-    
-        TERMINATE = []
-        SUSPEND = []
-        STOPPED = []
-    
-        s1, s2 = socket.socketpair()
-        s3, s4 = socket.socketpair()
-        s4.settimeout(2.0)
-
-        t = threading.Thread(target=tun_fwd, args=[s2, s3, TERMINATE, SUSPEND, STOPPED])
-        t.start()
-
-        txt = "0000|received"
-        s1.send(txt)
-        rtxt = s4.recv(len(txt))
-
-        self.assertTrue(rtxt == txt[4:])
-        
-        # Let's try to suspend execution now
-        cond = threading.Condition()
-        SUSPEND.insert(0, cond)
-
-        txt = "0000|suspended"
-        s1.send(txt)
-        
-        rtxt = "timeout"
-        try:
-            rtxt = s4.recv(len(txt))
-        except socket.timeout:
-            pass
-                    
-        self.assertTrue(rtxt == "timeout")
-
-        # Let's see if we can resume and receive the message
-        cond = SUSPEND[0]
-        SUSPEND.remove(cond)
-        cond.acquire()
-        cond.notify()
-        cond.release()
-
-        rtxt = s4.recv(len(txt))
-        self.assertTrue(rtxt == txt[4:])
-              
-        # Stop forwarding         
-        TERMINATE.append(None)
-
-        txt = "0000|never received"
-        s1.send(txt)
-        
-        rtxt = "timeout"
-        try:
-            rtxt = s4.recv(len(txt))
-        except socket.timeout:
-            pass
-                    
-        self.assertTrue(rtxt == "timeout")
-        self.assertTrue(STOPPED)
-
-if __name__ == '__main__':
-    unittest.main()
-