( pl_user, movie, exp_id, pl_ssh_key, results_dir ) = get_options()
ec = ExperimentController(exp_id = exp_id)
-
- # hosts in Europe
- #host1 = "planetlab2.u-strasbg.fr"
- #host2 = "planet1.servers.ua.pt"
- #host3 = "planetlab1.cs.uoi.gr"
- #host4 = "planetlab1.aston.ac.uk"
- #host5 = "planetlab2.willab.fi"
- #host6 = "planetlab-1.fokus.fraunhofer.de"
# host in the US
host1 = "planetlab4.wail.wisc.edu"
host5 = "earth.cs.brown.edu"
host6 = "planetlab2.engr.uconn.edu"
- # describe nodes in the central ring
+ # describe nodes in the central ring
ring_hosts = [host1, host2, host3, host4]
ccnds = dict()
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
+
+from nepi.execution.ec import ExperimentController
+from nepi.execution.resource import ResourceAction, ResourceState
+
+import os
+
+def create_node(ec, username, pl_user, pl_password, hostname=None, country=None,
+ operatingSystem=None, minBandwidth=None, minCpu=None):
+
+ node = ec.register_resource("PlanetlabNode")
+
+ if username:
+ ec.set(node, "username", username)
+ if pl_user:
+ ec.set(node, "pluser", pl_user)
+ if pl_password:
+ ec.set(node, "plpassword", pl_password)
+
+ if hostname:
+ ec.set(node, "hostname", hostname)
+ if country:
+ ec.set(node, "country", country)
+ if operatingSystem:
+ ec.set(node, "operatingSystem", operatingSystem)
+ if minBandwidth:
+ ec.set(node, "minBandwidth", minBandwidth)
+ if minCpu:
+ ec.set(node, "minCpu", minCpu)
+
+ ec.set(node, "cleanHome", True)
+ ec.set(node, "cleanProcesses", True)
+
+ return node
+
+def add_app(ec, command, node, sudo=None, video=None, depends=None, forward_x11=None, \
+ env=None):
+ app = ec.register_resource("LinuxApplication")
+ if sudo is not None:
+ ec.set(app, "sudo", sudo)
+ if video is not None:
+ ec.set(app, "sources", video)
+ if depends is not None:
+ ec.set(app, "depends", depends)
+ if forward_x11 is not None:
+ ec.set(app, "forwardX11", forward_x11)
+ if env is not None:
+ ec.set(app, "env", env)
+ ec.set(app, "command", command)
+
+ ec.register_connection(app, node)
+
+ return app
+
+exp_id = "ping_exp"
+
+# Create the entity Experiment Controller:
+ec = ExperimentController(exp_id)
+
+# Register the nodes resources:
+
+# The username in this case is the slice name, the one to use for login in
+# via ssh into PlanetLab nodes. Replace with your own slice name.
+username = "inria_sfatest"
+
+# The pluser and plpassword are the ones used to login in the PlanetLab web
+# site. Replace with your own user and password account information.
+pl_user = "lucia.guevgeozian_odizzio@inria.fr"
+pl_password = os.environ.get("PL_PASS")
+
+# Choose the PlanetLab nodes for the experiment, in this example 5 nodes are
+# used, and they are picked according to different criterias.
+
+# First node will be the one defined by its hostname.
+hostname = "planetlab2.utt.fr"
+node1 = create_node(ec, username, pl_user, pl_password, hostname=hostname)
+
+# Second node will be any node in France.
+country = "France"
+node2 = create_node(ec, username, pl_user, pl_password, country=country)
+
+# Third node will be a node in France that has Fedora 14 installed.
+operatingSystem = "f14"
+node3 = create_node(ec, username, pl_user, pl_password, country=country,
+ operatingSystem=operatingSystem)
+
+# Forth node will have at least 50% of CPU available
+minCpu=50
+node4 = create_node(ec, username, pl_user, pl_password, minCpu=minCpu)
+
+# Fifth node can be any node, constrains are not important.
+node5 = create_node(ec, username, pl_user, pl_password)
+
+# Register the applications to run in the nodes, in this case just ping to the
+# first node:
+apps_per_node = dict()
+apps = []
+for node in [node2, node3, node4, node5]:
+ command = "ping -c5 %s > ping%s.txt" % (hostname, node)
+ app = add_app(ec, command, node)
+ apps_per_node[node] = app
+ apps.append(app)
+
+# Register conditions
+
+# The nodes that are completely identified by their hostnames have to be provisioned
+# before the rest of the nodes. This assures that no other resource will use the
+# identified node even if the constraints matchs.
+# In this example node2, node3, node4 and node5, are deployed after node1 is
+# provisioned. node1 must be the node planetlab2.utt.fr, meanwhile node2, node3,
+# node4 and node5 just need to fulfill certain constraints.
+# Applications are always deployed after nodes, so no need to register conditions
+# for the apps in this example.
+
+ec.register_condition(node2, ResourceAction.DEPLOY, node1, ResourceState.PROVISIONED)
+ec.register_condition(node3, ResourceAction.DEPLOY, node1, ResourceState.PROVISIONED)
+ec.register_condition(node4, ResourceAction.DEPLOY, node1, ResourceState.PROVISIONED)
+ec.register_condition(node5, ResourceAction.DEPLOY, node1, ResourceState.PROVISIONED)
+
+# Deploy the experiment:
+ec.deploy()
+
+# Wait until the applications are finish to retrive the traces:
+ec.wait_finished(apps)
+
+traces = dict()
+for node, app in apps_per_node.iteritems():
+ ping_string = "ping%s.txt" % node
+ trace = ec.trace(app, ping_string)
+ traces[node]= trace
+
+# Choose a directory to store the traces locally, change to a convenient path for you:
+directory = "examples/planetlab/"
+for node, trace in traces.iteritems():
+ trace_file = directory + "ping%s.txt" % node
+ f = open(trace_file, "w")
+ f.write(trace)
+ f.close()
+
+# Do the experiment controller shutdown:
+ec.shutdown()
+
+# END
self._os = OSType.FEDORA_12
elif out.find("Fedora release 14") == 0:
self._os = OSType.FEDORA_14
+ elif out.find("Fedora release") == 0:
+ self._os = OSType.FEDORA
elif out.find("Debian") == 0:
self._os = OSType.DEBIAN
elif out.find("Ubuntu") ==0:
# until the result is not empty string
out = ""
retrydelay = 1.0
- for i in xrange(10):
+ for i in xrange(2):
try:
(out, err), proc = self.execute("cat /etc/issue",
retry = 5,
pid = ppid = None
delay = 1.0
- for i in xrange(4):
+ for i in xrange(2):
pidtuple = self.getpid(home = home, pidfile = pidfile)
if pidtuple:
# To work arround this, repeat the operation N times or
# until the result is not empty string
retrydelay = 1.0
- for i in xrange(10):
+ for i in xrange(2):
try:
(out, err), proc = self.execute("echo 'ALIVE'",
retry = 5,
# To work arround this, repeat the operation N times or
# until the result is not empty string
retrydelay = 1.0
- for i in xrange(10):
+ for i in xrange(2):
try:
(out, err), proc = self.execute("echo ${HOME}",
retry = 5,
RPM_FUSION_URL = 'http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-stable.noarch.rpm'
RPM_FUSION_URL_F12 = 'http://download1.rpmfusion.org/free/fedora/releases/12/Everything/x86_64/os/rpmfusion-free-release-12-1.noarch.rpm'
+RPM_FUSION_URL_F14 = 'http://download1.rpmfusion.org/free/fedora/releases/14/Everything/x86_64/os/rpmfusion-free-release-14-0.4.noarch.rpm'
+
# TODO: Investigate using http://nixos.org/nix/
packages = [packages]
cmd = install_rpmfusion_command(os)
- if cmd: cmd += " && "
+ if cmd: cmd += " ; "
cmd += " && ".join(map(lambda p:
- " { rpm -q %(package)s || sudo -S yum -y install %(package)s ; } " % {
+ " { rpm -q %(package)s || sudo -S yum --nogpgcheck -y install %(package)s ; } " % {
'package': p}, packages))
#cmd = { rpm -q rpmfusion-free-release || sudo -s rpm -i ... ; } && { rpm -q vim || sudo yum -y install vim ; } && ..
def install_rpmfusion_command(os):
from nepi.resources.linux.node import OSType
- cmd = " { rpm -q rpmfusion-free-release || sudo -S rpm -i %(package)s ; } "
+ cmd = " { rpm -q rpmfusion-free-release || sudo -S rpm -i %(package)s ; } "
if os in [OSType.FEDORA, OSType.FEDORA_12]:
# For f12
cmd = cmd % {'package': RPM_FUSION_URL_F12}
elif os == OSType.FEDORA_14:
- # For f13+
+ # For f14
+ cmd = cmd % {'package': RPM_FUSION_URL_F14}
+ elif os == OSType.FEDORA:
+ # For f14+
cmd = cmd % {'package': RPM_FUSION_URL}
else:
# Fedora 8 is unmaintained
from nepi.resources.omf.node import OMFNode
from nepi.resources.omf.omf_api import OMFAPIFactory
-#from nepi.util.sshfuncs import ProcStatus
-from nepi.util import sshfuncs
-
-
@clsinit_copy
class OMFApplication(OMFResource):
"""
args = Attribute("args", "Argument of the application")
env = Attribute("env", "Environnement variable of the application")
stdin = Attribute("stdin", "Input of the application", default = "")
- sources = Attribute("sources", "Sources of the application",
- flags = Flags.ExecReadOnly)
- sshuser = Attribute("sshUser", "user to connect with ssh",
- flags = Flags.ExecReadOnly)
- sshkey = Attribute("sshKey", "key to use for ssh",
- flags = Flags.ExecReadOnly)
cls._register_attribute(appid)
cls._register_attribute(path)
cls._register_attribute(args)
cls._register_attribute(env)
cls._register_attribute(stdin)
- cls._register_attribute(sources)
- cls._register_attribute(sshuser)
- cls._register_attribute(sshkey)
def __init__(self, ec, guid):
"""
"This Application is already connected" ) % \
(self.rtype(), self._guid, rm.rtype(), guid)
self.debug(msg)
+
return False
+
else :
msg = "Connection between %s %s and %s %s accepted" % (
self.rtype(), self._guid, rm.rtype(), guid)
It becomes DEPLOYED after getting the xmpp client.
"""
- self.set('xmppSlice', self.node.get('xmppSlice'))
- self.set('xmppHost', self.node.get('xmppHost'))
- self.set('xmppPort', self.node.get('xmppPort'))
- self.set('xmppPassword', self.node.get('xmppPassword'))
-
if not self._omf_api :
self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'),
self.get('xmppHost'), self.get('xmppPort'),
self.error(msg)
raise RuntimeError, msg
- if self.get('sources'):
- gateway = ResourceGateway.AMtoGateway[self.get('xmppHost')]
- user = self.get('sshUser') or self.get('xmppSlice')
- dst = user + "@"+ gateway + ":"
- (out, err), proc = sshfuncs.rcopy(self.get('sources'), dst)
-
super(OMFApplication, self).deploy()
@failtrap
using OMF 5.4 protocol to configure the interface.
It becomes DEPLOYED after sending messages to configure the interface
"""
- self.set('xmppSlice', self.node.get('xmppSlice'))
- self.set('xmppHost', self.node.get('xmppHost'))
- self.set('xmppPort', self.node.get('xmppPort'))
- self.set('xmppPassword', self.node.get('xmppPassword'))
-
if not self._omf_api :
self._omf_api = OMFAPIFactory.get_api(self.get('xmppSlice'),
self.get('xmppHost'), self.get('xmppPort'),
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
# Julien Tribino <julien.tribino@inria.fr>
-
from nepi.execution.resource import ResourceManager, clsinit_copy, \
ResourceState, reschedule_delay, failtrap
from nepi.execution.attribute import Attribute, Flags
except AttributeError:
msg = "Credentials are not initialzed. XMPP Connections impossible"
self.error(msg)
- raise
+ raise
super(OMFNode, self).deploy()
from nepi.resources.omf.omf_client import OMFClient
from nepi.resources.omf.messages_5_4 import MessageHandler
-from nepi.util.timefuncs import tsformat
-
class OMFAPI(Logger):
"""
.. class:: Class Args :
"""
super(OMFAPI, self).__init__("OMFAPI")
- date = tsformat()
- tz = -time.altzone if time.daylight != 0 else -time.timezone
- date += "%+06.2f" % (tz / 3600) # timezone difference is in seconds
- self._exp_id = exp_id or date
+ self._exp_id = exp_id
self._user = "%s-%s" % (slice, self._exp_id)
self._slice = slice
self._host = host
xmpp_node = self._host_session_id(hostname)
self._client.publish(payload, xmpp_node)
+
+ def send_stdin(self, hostname, value, app_id):
+ """ Send to the stdin of the application the value
+
+ :param hostname: Full hrn of the node
+ :type hostname: str
+ :param appid: Application Id (Any id that represents in a unique
+ way the application)
+ :type appid: str
+ :param value: parameter to execute in the stdin of the application
+ :type value: str
+
+ """
+ payload = self._message.stdin_function(hostname, value, app_id)
+ xmpp_node = self._host_session_id(hostname)
+ self._client.publish(payload, xmpp_node)
+
+
def execute(self, hostname, app_id, arguments, path, env):
""" Execute command on the node
from nepi.execution.resource import ResourceManager, clsinit_copy, \
ResourceState, reschedule_delay
+
class ResourceGateway:
"""
Dictionary used to set OMF gateway depending on Testbed information.
"""
+ #XXX: A.Q. COMMENT: This looks a bit hardcoded
+ # SHOULDN'T THIS BE IN A SEPARATED FILE RATHER THAN IN THE
+ # BASE CLASS FOR ALL OMF RESOURCES?
TestbedtoGateway = dict({
"wilabt" : "ops.wilab2.ilabt.iminds.be",
"nitos" : "nitlab.inf.uth.gr",
"nicta" : "??.??.??",
+
})
AMtoGateway = dict({
from nepi.resources.linux.node import LinuxNode
from nepi.resources.planetlab.plcapi import PLCAPIFactory
from nepi.util.execfuncs import lexec
+from nepi.util import sshfuncs
from random import randint
import time
"associated to a PlanetLab user account"
_backend = "planetlab"
- blacklist = list()
- provisionlist = list()
-
- lock_blist = threading.Lock()
- lock_plist = threading.Lock()
-
- lock_slice = threading.Lock()
-
+ lock = threading.Lock()
@classmethod
def _register_attributes(cls):
authenticate in the website) ",
flags = Flags.Credential)
- pl_password = Attribute("password",
+ pl_password = Attribute("plpassword",
"PlanetLab account password, as \
the one to authenticate in the website) ",
flags = Flags.Credential)
"other"],
flags = Flags.Filter)
- site = Attribute("site", "Constrain the PlanetLab site this node \
- should reside on.",
- type = Types.Enumerate,
- allowed = ["PLE",
- "PLC",
- "PLJ"],
- flags = Flags.Filter)
+ #site = Attribute("site", "Constrain the PlanetLab site this node \
+ # should reside on.",
+ # type = Types.Enumerate,
+ # allowed = ["PLE",
+ # "PLC",
+ # "PLJ"],
+ # flags = Flags.Filter)
min_reliability = Attribute("minReliability", "Constrain reliability \
while picking PlanetLab nodes. Specifies a lower \
cls._register_attribute(pl_ptn)
cls._register_attribute(pl_user)
cls._register_attribute(pl_password)
- cls._register_attribute(site)
+ #cls._register_attribute(site)
cls._register_attribute(city)
cls._register_attribute(country)
cls._register_attribute(region)
def plapi(self):
if not self._plapi:
pl_user = self.get("pluser")
- pl_pass = self.get("password")
+ pl_pass = self.get("plpassword")
pl_url = self.get("plcApiUrl")
pl_ptn = self.get("plcApiPattern")
return self._plapi
- def discoverl(self):
+ def discover(self):
"""
Based on the attributes defined by the user, discover the suitable nodes
"""
- hostname = self.get("hostname")
+ hostname = self._get_hostname()
+ print self.guid, hostname
if hostname:
# the user specified one particular node to be provisioned
# check with PLCAPI if it is alvive
node_id = self._query_if_alive(hostname=hostname)
node_id = node_id.pop()
+ print self.guid, node_id
- # check that the node is not blacklisted or already being provision
+ # check that the node is not blacklisted or being provisioned
# by other RM
- blist = PlanetlabNode.blacklist
- plist = PlanetlabNode.provisionlist
- if node_id not in blist and node_id not in plist:
+ with PlanetlabNode.lock:
+ plist = self.plapi.reserved()
+ blist = self.plapi.blacklisted()
+ print self.guid,plist
+ print self.guid,blist
+ if node_id not in blist and node_id not in plist:
- # check that is really alive, by performing ping
- ping_ok = self._do_ping(node_id)
- if not ping_ok:
- self._blacklist_node(node_id)
- self.fail_node_not_alive(hostname)
- else:
- self._node_to_provision = node_id
- self._put_node_in_provision(node_id)
- super(PlanetlabNode, self).discover()
+ # check that is really alive, by performing ping
+ ping_ok = self._do_ping(node_id)
+ if not ping_ok:
+ self._blacklist_node(node_id)
+ self.fail_node_not_alive(hostname)
+ else:
+ self._put_node_in_provision(node_id)
+ self._node_to_provision = node_id
+ super(PlanetlabNode, self).discover()
- else:
- self.fail_node_not_available(hostname)
+ else:
+ self.fail_node_not_available(hostname)
else:
# the user specifies constraints based on attributes, zero, one or
if nodes_inslice:
node_id = self._choose_random_node(nodes_inslice)
- if not node_id and nodes_not_inslice:
+ if not node_id:
# Either there were no matching nodes in the user's slice, or
# the nodes in the slice were blacklisted or being provisioned
# by other RM. Note nodes_not_inslice is never empty
node_id = self._choose_random_node(nodes_not_inslice)
- if not node_id:
- self.fail_not_enough_nodes()
- self._node_to_provision = node_id
- super(PlanetlabNode, self).discover()
+ if node_id:
+ self._node_to_provision = node_id
+ super(PlanetlabNode, self).discover()
+ else:
+ self.fail_not_enough_nodes()
- def provisionl(self):
+ def provision(self):
"""
Add node to user's slice after verifing that the node is functioning
correctly
provision_ok = False
ssh_ok = False
proc_ok = False
- timeout = 1200
+ timeout = 120
while not provision_ok:
node = self._node_to_provision
- self._set_hostname_attr(node)
+ # Adding try catch to set hostname because sometimes MyPLC fails
+ # when trying to retrive node's hostname
+ try:
+ self._set_hostname_attr(node)
+ except:
+ with PlanetlabNode.lock:
+ self._blacklist_node(node)
+ self.discover()
+ continue
+
self._add_node_to_slice(node)
# check ssh connection
# the timeout was reach without establishing ssh connection
# the node is blacklisted, deleted from the slice, and a new
# node to provision is discovered
- self._blacklist_node(node)
- self._delete_node_from_slice(node)
+ with PlanetlabNode.lock:
+ self._blacklist_node(node)
+ self._delete_node_from_slice(node)
+ self.set('hostname', None)
self.discover()
continue
cmd = 'mount |grep proc'
((out, err), proc) = self.execute(cmd)
if out.find("/proc type proc") < 0:
- self._blacklist_node(node)
- self._delete_node_from_slice(node)
+ with PlanetlabNode.lock:
+ self._blacklist_node(node)
+ self._delete_node_from_slice(node)
+ self.set('hostname', None)
self.discover()
continue
# filter nodes by range constraints e.g. max bandwidth
elif ('min' or 'max') in attr_name:
nodes_id = self._filter_by_range_attr(attr_name, attr_value, filters, nodes_id)
+
+ if not filters:
+ nodes = self.plapi.get_nodes()
+ for node in nodes:
+ nodes_id.append(node['node_id'])
return nodes_id
filters['run_level'] = 'boot'
filters['boot_state'] = 'boot'
filters['node_type'] = 'regular'
- filters['>last_contact'] = int(time.time()) - 2*3600
+ #filters['>last_contact'] = int(time.time()) - 2*3600
# adding node_id or hostname to the filters to check for the particular
# node
alive_nodes_id = self._get_nodes_id(filters)
if len(alive_nodes_id) == 0:
- self.fail_discovery()
+ self.fail_node_not_alive(self, hostname)
else:
nodes_id = list()
for node_id in alive_nodes_id:
From the possible nodes for provision, choose randomly to decrese the
probability of different RMs choosing the same node for provision
"""
- blist = PlanetlabNode.blacklist
- plist = PlanetlabNode.provisionlist
-
size = len(nodes)
while size:
size = size - 1
# check the node is not blacklisted or being provision by other RM
# and perform ping to check that is really alive
- if node_id not in blist and node_id not in plist:
- ping_ok = self._do_ping(node_id)
- if not ping_ok:
- self._blacklist_node(node_id)
- else:
- # discovered node for provision, added to provision list
- self._put_node_in_provision(node_id)
- return node_id
+ with PlanetlabNode.lock:
+
+ blist = self.plapi.blacklisted()
+ plist = self.plapi.reserved()
+ if node_id not in blist and node_id not in plist:
+ ping_ok = self._do_ping(node_id)
+ print " ### ping_ok #### %s guid %s" % (ping_ok, self.guid)
+ if not ping_ok:
+ self._blacklist_node(node_id)
+ else:
+ # discovered node for provision, added to provision list
+ self._put_node_in_provision(node_id)
+ print "node_id %s , guid %s" % (node_id, self.guid)
+ return node_id
def _get_nodes_id(self, filters):
return self.plapi.get_nodes(filters, fields=['node_id'])
def _add_node_to_slice(self, node_id):
- self.warn(" Adding node to slice ")
+ self.info(" Selected node to provision ")
slicename = self.get("username")
- with PlanetlabNode.lock_slice:
+ with PlanetlabNode.lock:
slice_nodes = self.plapi.get_slice_nodes(slicename)
slice_nodes.append(node_id)
self.plapi.add_slice_nodes(slicename, slice_nodes)
slicename = self.get("username")
self.plapi.delete_slice_node(slicename, [node])
+ def _get_hostname(self):
+ hostname = self.get("hostname")
+ ip = self.get("ip")
+ if hostname:
+ return hostname
+ elif ip:
+ hostname = sshfuncs.gethostbyname(ip)
+ return hostname
+ else:
+ return None
+
def _set_hostname_attr(self, node):
"""
Query PLCAPI for the hostname of a certain node id and sets the
slicename = self.get("username")
slice_nodes = self.plapi.get_slice_nodes(slicename)
nodes_inslice = list(set(nodes_id) & set(slice_nodes))
-
return nodes_inslice
def _do_ping(self, node_id):
"""
ping_ok = False
ip = self._get_ip(node_id)
- command = "ping -c2 %s | echo \"PING OK\"" % ip
+ print "ip de do_ping %s, guid %s" % (ip, self.guid)
+ if not ip: return ping_ok
+
+ command = "ping -c2 %s" % ip
(out, err) = lexec(command)
- if not out.find("PING OK") < 0:
+ print "out de do_ping %s, guid %s" % (out, self.guid)
+ if not out.find("2 received") < 0:
ping_ok = True
-
+
+ print "ping_ok de do_ping %s, guid %s" % (ping_ok, self.guid)
return ping_ok
def _blacklist_node(self, node):
"""
Add node mal functioning node to blacklist
"""
- blist = PlanetlabNode.blacklist
-
self.warn(" Blacklisting malfunctioning node ")
- with PlanetlabNode.lock_blist:
- blist.append(node)
+ self._plapi.blacklist_host(node)
def _put_node_in_provision(self, node):
"""
Add node to the list of nodes being provisioned, in order for other RMs
to not try to provision the same one again
"""
- plist = PlanetlabNode.provisionlist
-
- self.warn(" Provisioning node ")
- with PlanetlabNode.lock_plist:
- plist.append(node)
+ self._plapi.reserve_host(node)
def _get_ip(self, node_id):
"""
Query PLCAPI for the IP of a node with certain node id
"""
- ip = self.plapi.get_interfaces({'node_id':node_id}, fields=['ip'])
- ip = ip[0]['ip']
+ hostname = self.plapi.get_nodes(node_id, ['hostname'])[0]
+ print "#### HOSTNAME ##### %s ### guid %s " % (hostname['hostname'], self.guid)
+ ip = sshfuncs.gethostbyname(hostname['hostname'])
+ if not ip:
+ # Fail while trying to find the IP
+ return None
return ip
def fail_discovery(self):
self.error(msg)
raise RuntimeError, msg
- def fail_node_not_alive(self, hostname):
- msg = "Node %s not alive, pick another node" % hostname
+ def fail_node_not_alive(self, hostname=None):
+ self.fail()
+ msg = "Node %s not alive" % hostname
raise RuntimeError, msg
def fail_node_not_available(self, hostname):
- msg = "Node %s not available for provisioning, pick another \
- node" % hostname
+ self.fail()
+ msg = "Node %s not available for provisioning" % hostname
raise RuntimeError, msg
def fail_not_enough_nodes(self):
+ self.fail()
msg = "Not enough nodes available for provisioning"
raise RuntimeError, msg
_apis = dict()
@classmethod
- def get_api(cls, slicename, pl_pass, pl_host,
+ def get_api(cls, pl_user, pl_pass, pl_host,
pl_ptn = "https://%(hostname)s:443/PLCAPI/",
proxy = None):
""" Get existing PLCAPI instance
- :param slicename: Planelab slice name
- :type slicename: str
+ :param pl_user: Planelab user name (used for web login)
+ :type pl_user: str
:param pl_pass: Planetlab password (used for web login)
:type pl_pass: str
:param pl_host: Planetlab registry host (e.g. "www.planet-lab.eu")
:param proxy: Proxy service url
:type pl_ptn: str
"""
- if slice and pl_pass and pl_host:
- key = cls._make_key(slicename, pl_host)
+ if pl_user and pl_pass and pl_host:
+ key = cls._make_key(pl_user, pl_host)
with cls._lock:
api = cls._apis.get(key)
if not api:
- api = cls.create_api(slicename, pl_pass, pl_host, pl_ptn, proxy)
+ api = cls.create_api(pl_user, pl_pass, pl_host, pl_ptn, proxy)
return api
return None
@classmethod
- def create_api(cls, slicename, pl_pass, pl_host,
+ def create_api(cls, pl_user, pl_pass, pl_host,
pl_ptn = "https://%(hostname)s:443/PLCAPI/",
proxy = None):
""" Create an PLCAPI instance
- :param slicename: Planelab slice name
- :type slicename: str
+ :param pl_user: Planelab user name (used for web login)
+ :type pl_user: str
:param pl_pass: Planetlab password (used for web login)
:type pl_pass: str
:param pl_host: Planetlab registry host (e.g. "www.planet-lab.eu")
:type pl_ptn: str
"""
api = PLCAPI(
- username = slicename,
+ username = pl_user,
password = pl_pass,
hostname = pl_host,
urlpattern = pl_ptn,
proxy = proxy
)
- key = cls._make_key(slicename, pl_host)
+ key = cls._make_key(pl_user, pl_host)
cls._apis[key] = api
return api
--- /dev/null
+#!/usr/bin/env python
+#
+# NEPI, a framework to manage network experiments
+# Copyright (C) 2013 INRIA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Julien Tribino <julien.tribino@inria.fr>
+
+
+from nepi.execution.resource import ResourceFactory, clsinit_copy, ResourceManager, ResourceAction, ResourceState
+from nepi.execution.ec import ExperimentController
+from nepi.execution.attribute import Attribute, Flags
+
+from nepi.resources.omf.node import OMFNode
+from nepi.resources.omf.application import OMFApplication
+
+from nepi.util.timefuncs import *
+
+import time
+import unittest
+
+class DummyEC(ExperimentController):
+ pass
+
+@clsinit_copy
+class DummyOMFApplication(OMFApplication):
+ _rtype = "DummyOMFApplication"
+
+ @classmethod
+ def _register_attributes(cls):
+ test = Attribute("test", "Input of the application", default = 0, set_hook = cls.test_hook)
+ cls._register_attribute(test)
+
+ @classmethod
+ def test_hook(cls, old_value, new_value):
+ new_value *= 10
+ print "Change the value of test from "+ str(old_value) +" to : " + str(new_value)
+ return new_value
+
+
+class OMFTestSet(unittest.TestCase):
+
+ def test_set_hook(self):
+ self.ec = DummyEC(exp_id = "30")
+
+ ResourceFactory.register_type(DummyOMFApplication)
+
+ self.node1 = self.ec.register_resource("OMFNode")
+ self.ec.set(self.node1, 'hostname', 'omf.plexus.wlab17')
+ self.ec.set(self.node1, 'xmppSlice', "nepi")
+ self.ec.set(self.node1, 'xmppHost', "xmpp-plexus.onelab.eu")
+ self.ec.set(self.node1, 'xmppPort', "5222")
+ self.ec.set(self.node1, 'xmppPassword', "1234")
+
+ self.app1 = self.ec.register_resource("DummyOMFApplication")
+ self.ec.set(self.app1, 'appid', 'Test#1')
+ self.ec.set(self.app1, 'path', "/usr/bin/ping")
+ self.ec.set(self.app1, 'args', "")
+ self.ec.set(self.app1, 'env', "")
+ self.ec.set(self.app1, 'xmppSlice', "nepi")
+ self.ec.set(self.app1, 'xmppHost', "xmpp-plexus.onelab.eu")
+ self.ec.set(self.app1, 'xmppPort', "5222")
+ self.ec.set(self.app1, 'xmppPassword', "1234")
+
+ self.ec.register_connection(self.app1, self.node1)
+
+ self.ec.register_condition(self.app1, ResourceAction.STOP, self.app1, ResourceState.STARTED , "10s")
+
+ self.ec.deploy()
+
+ time.sleep(3)
+ print "First try to change the STDIN"
+ self.ec.set(self.app1, 'test', 3)
+
+ self.assertEquals(self.ec.get(self.app1, 'test'), 30)
+
+ time.sleep(3)
+ print "Second try to change the STDIN"
+ self.ec.set(self.app1, 'test', 101)
+ self.assertEquals(self.ec.get(self.app1, 'test'), 1010)
+
+ self.ec.wait_finished([self.app1])
+
+ # Stop Experiment
+ self.ec.shutdown()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+
class DummyRM(ResourceManager):
pass
+
class OMFResourceFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
self.ec.set(self.iface1, 'type', "g")
self.ec.set(self.iface1, 'essid', "vlcexp")
self.ec.set(self.iface1, 'ip', "10.0.0.17")
-
+
self.channel = self.ec.register_resource("OMFChannel")
self.ec.set(self.channel, 'channel', "6")
self.ec.set(self.channel, 'xmppSlice', "nepi")
# Author: Lucia Guevgeozian <lucia.guevgeozian_odizzio@inria.fr>
from nepi.execution.ec import ExperimentController
-#from nepi.execution.resource import ResourceAction, ResourceState, populate_factory
from nepi.resources.planetlab.node import PlanetlabNode
-from nepi.resources.planetlab.plcapi import PLCAPI
+from nepi.resources.planetlab.plcapi import PLCAPI, PLCAPIFactory
import os
import time
import unittest
-
+import multiprocessing
class DummyEC(ExperimentController):
pass
if pl_user:
ec.set(node, "pluser", pl_user)
if pl_password:
- ec.set(node, "password", pl_password)
+ ec.set(node, "plpassword", pl_password)
if hostname:
ec.set(node, "hostname", hostname)
ec.set(node, "cleanHome", True)
ec.set(node, "cleanProcesses", True)
- return ec
+ return node
class PLNodeFactoryTestCase(unittest.TestCase):
def test_creation_phase(self):
self.assertEquals(PlanetlabNode.rtype(), "PlanetlabNode")
- self.assertEquals(len(PlanetlabNode._attributes), 30)
- self.assertEquals(len(PlanetlabNode.blacklist), 0)
- self.assertEquals(len(PlanetlabNode.provisionlist), 0)
+ self.assertEquals(len(PlanetlabNode._attributes), 29)
class PLNodeTestCase(unittest.TestCase):
"""
- This tests use inria_sfatest slice, and certain nodes already added to the
- slice, and ONLY those in order for the test not to fail.
+ This tests use inria_sfatest slice, and planetlab2.utt.fr already added to
+ the slice, and ONLY this one in order for the test not to fail.
"""
def setUp(self):
self.username = "inria_sfatest"
self.pl_user = os.environ.get("PL_USER")
self.pl_password = os.environ.get("PL_PASS")
+ self.pl_url = "www.planet-lab.eu"
+ self.pl_ptn = "https://%(hostname)s:443/PLCAPI/"
def test_plapi(self):
"""
instanciated, and is an instance of PLCAPI. Ignore error while
executing the ec.shutdown method, the error is due to the name
of the host not being defined yet for this test.
- """
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ """
+ node1 = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France")
- plnode_rm = self.ec.get_resource(1)
- hostname = plnode_rm.get("hostname")
+ plnode_rm1 = self.ec.get_resource(node1)
+ hostname = plnode_rm1.get("hostname")
self.assertIsNone(hostname)
- self.assertIsNone(plnode_rm._node_to_provision)
+ self.assertIsNone(plnode_rm1._node_to_provision)
- api = plnode_rm.plapi
- self.assertIsInstance(api, PLCAPI)
+ api1 = plnode_rm1.plapi
+ self.assertIsInstance(api1, PLCAPI)
+ self.assertEquals(len(api1.reserved()), 0)
+ self.assertEquals(len(api1.blacklisted()), 0)
+
+ node2 = create_node(self.ec, self.username, self.pl_user,
+ self.pl_password, country="France")
+
+ plnode_rm2 = self.ec.get_resource(node2)
+ api2 = plnode_rm2.plapi
+ self.assertEquals(api1, api2)
# Set hostname attribute in order for the shutdown method not to fail
- plnode_rm._set_hostname_attr(7057)
+ plnode_rm1._set_hostname_attr(7057)
+ plnode_rm2._set_hostname_attr(7057)
def test_discover_inslice(self):
"""
the slice and match the constraints OS Fedora12 and country France.
Check planetlab2.utt.fr is alive if the test fails.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f12")
- plnode_rm = self.ec.get_resource(1)
+ plnode_rm = self.ec.get_resource(node)
hostname = plnode_rm.get("hostname")
self.assertIsNone(hostname)
- plnode_rm.discoverl()
+ plnode_rm.discover()
self.assertEquals(plnode_rm._node_to_provision, 7057)
# Set hostname attribute in order for the shutdown method not to fail
discover method picks one that match constraints outside from the
slice.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f14")
- plnode_rm = self.ec.get_resource(1)
- plnode_rm.discoverl()
+ plnode_rm = self.ec.get_resource(node)
+ plnode_rm.discover()
result = [14281, 1034, 7035] # nodes matching f14 and France
self.assertIn(plnode_rm._node_to_provision, result)
- self.assertIsNot(PlanetlabNode.provisionlist, list())
+ self.assertIsNot(plnode_rm.plapi.reserved(), set())
# Set hostname attribute in order for the shutdown method not to fail
plnode_rm._set_hostname_attr(plnode_rm._node_to_provision)
This test checks that if the user specify the hostname, only that node
is discovered.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, hostname="planetlab1.sics.se")
- plnode_rm = self.ec.get_resource(1)
- plnode_rm.discoverl()
+ plnode_rm = self.ec.get_resource(node)
+ plnode_rm.discover()
self.assertEquals(plnode_rm._node_to_provision, 14871)
- self.assertEquals(PlanetlabNode.provisionlist, [14871])
+ self.assertEquals(plnode_rm.plapi.reserved(), set([14871]))
def test_discover_with_ranges(self):
"""
Checks that defining max or min attributes, the discover method works.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, minCpu=50) #minBandwidth=500)
- plnode_rm = self.ec.get_resource(1)
- plnode_rm.discoverl()
+ plnode_rm = self.ec.get_resource(node)
+ plnode_rm.discover()
#result = [15815, 15814, 425, 417, 1054, 1102, 1107, 505, 1031]
result = [425, 15815, 15814, 14842, 427, 41, 14466]
self.assertIn(plnode_rm._node_to_provision, result)
- self.assertIsNot(PlanetlabNode.provisionlist, list())
+ self.assertIsNot(plnode_rm.plapi.reserved(), set())
# Set hostname attribute in order for the shutdown method not to fail
plnode_rm._set_hostname_attr(plnode_rm._node_to_provision)
planetlab-1a.ics.uci.edu is used, if the test fails, check that the
result of the plcapi query is actually empty.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, hostname="planetlab-1a.ics.uci.edu")
- plnode_rm = self.ec.get_resource(1)
- self.assertEquals(PlanetlabNode.blacklist, list())
+ plnode_rm = self.ec.get_resource(node)
+ self.assertEquals(plnode_rm.plapi.blacklisted(), set())
# check that the node is actually malfunctioning
api = plnode_rm.plapi
if not node_id:
with self.assertRaises(RuntimeError):
- plnode_rm.discoverl()
- self.assertEquals(PlanetlabNode.blacklist, [14871])
+ plnode_rm.discover()
+ self.assertEquals(plnode_rm.plapi.blacklisted(), set([14871]))
def test_provision_node_inslice(self):
"""
Check provision of the node planetlab2.utt.fr.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f12")
- plnode_rm = self.ec.get_resource(1)
- self.assertEquals(len(PlanetlabNode.blacklist), 0)
- self.assertEquals(len(PlanetlabNode.provisionlist), 0)
+ plnode_rm = self.ec.get_resource(node)
+ self.assertEquals(len(plnode_rm.plapi.blacklisted()), 0)
+ self.assertEquals(len(plnode_rm.plapi.reserved()), 0)
- plnode_rm.discoverl()
- plnode_rm.provisionl()
+ plnode_rm.discover()
+ plnode_rm.provision()
ip = plnode_rm.get("ip")
self.assertEquals(ip, "194.254.215.12")
+ self.assertEquals(len(plnode_rm.plapi.reserved()), 1)
def test_provision_node_not_inslice(self):
"""
ple5.ipv6.lip6.fr
node2pl.planet-lab.telecom-lille1.eu
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f14")
- plnode_rm = self.ec.get_resource(1)
- self.assertEquals(PlanetlabNode.blacklist, list())
- self.assertEquals(PlanetlabNode.provisionlist, list())
+ plnode_rm = self.ec.get_resource(node)
+ self.assertEquals(plnode_rm.plapi.blacklisted(), set())
+ self.assertEquals(plnode_rm.plapi.reserved(), set())
- plnode_rm.discoverl()
- plnode_rm.provisionl()
+ plnode_rm.discover()
+ plnode_rm.provision()
ip = plnode_rm.get("ip")
result = ["194.167.254.18","132.227.62.123","194.167.254.19"]
self.assertIn(ip, result)
-
def test_provision_more_than_available(self):
"""
Check that if the user wants to provision 4 nodes with fedora 14, he
gets RuntimeError, there are only 3 nodes f14.
"""
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node1 = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f14")
- plnode_rm1 = self.ec.get_resource(1)
- plnode_rm1.discoverl()
- plnode_rm1.provisionl()
+ plnode_rm1 = self.ec.get_resource(node1)
+ plnode_rm1.discover()
+ plnode_rm1.provision()
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node2 = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f14")
- plnode_rm2 = self.ec.get_resource(2)
- plnode_rm2.discoverl()
- plnode_rm2.provisionl()
+ plnode_rm2 = self.ec.get_resource(node2)
+ plnode_rm2.discover()
+ plnode_rm2.provision()
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node3 = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f14")
- plnode_rm3 = self.ec.get_resource(3)
+ plnode_rm3 = self.ec.get_resource(node3)
with self.assertRaises(RuntimeError):
- plnode_rm3.discoverl()
+ plnode_rm3.discover()
with self.assertRaises(RuntimeError):
- plnode_rm3.provisionl()
+ plnode_rm3.provision()
- self.ec = create_node(self.ec, self.username, self.pl_user,
+ node4 = create_node(self.ec, self.username, self.pl_user,
self.pl_password, country="France", operatingSystem="f14")
- plnode_rm4 = self.ec.get_resource(4)
+ plnode_rm4 = self.ec.get_resource(node4)
with self.assertRaises(RuntimeError):
- plnode_rm4.discoverl()
- with self.assertRaises(RuntimeError):
- plnode_rm4.provisionl()
+ plnode_rm4.discover()
+
+ host1 = plnode_rm1.get('hostname')
+
+ plnode_rm3._set_hostname_attr(host1)
+ plnode_rm4._set_hostname_attr(host1)
+
+ def test_concurrence(self):
+ """
+ Test with the nodes being discover and provision at the same time.
+ """
+ node1 = create_node(self.ec, self.username, self.pl_user,
+ self.pl_password, country="France", operatingSystem="f14")
+
+ node2 = create_node(self.ec, self.username, self.pl_user,
+ self.pl_password, country="France", operatingSystem="f14")
+
+ node3 = create_node(self.ec, self.username, self.pl_user,
+ self.pl_password, country="France", operatingSystem="f14")
+
+ node4 = create_node(self.ec, self.username, self.pl_user,
+ self.pl_password, country="France", operatingSystem="f14")
+ self.ec.deploy()
+ self.ec.wait_finished([node1, node2, node3, node4])
+ state = self.ec.ecstate
+ self.assertEquals(state, 2)
def tearDown(self):
- PlanetlabNode.provisionlist = list()
- PlanetlabNode.blacklist = list()
+ commonapi=PLCAPIFactory.get_api(self.pl_user, self.pl_password,
+ self.pl_url, self.pl_ptn)
+ commonapi._reserved = set()
+ commonapi._blacklist = set()
self.ec.shutdown()