2 # -*- coding: utf-8 -*-
4 from constants import TESTBED_ID, TESTBED_VERSION
5 from nepi.core import testbed_impl
6 from nepi.core.metadata import Parallel
7 from nepi.util.constants import TIME_NOW
8 from nepi.util.graphtools import mst
9 from nepi.util import ipaddr2
10 from nepi.util import environ
11 from nepi.util.parallel import ParallelRun
30 class TempKeyError(Exception):
33 class TestbedController(testbed_impl.TestbedController):
35 super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
36 self._home_directory = None
40 import node, interfaces, application, multicast
42 self._interfaces = interfaces
43 self._app = application
44 self._multicast = multicast
46 self._blacklist = set()
47 self._just_provisioned = set()
49 self._load_blacklist()
51 self._logger = logging.getLogger('nepi.testbeds.planetlab')
53 self.recovering = False
56 def home_directory(self):
57 return self._home_directory
61 if not hasattr(self, '_plapi'):
65 self._plapi = plcapi.PLCAPI(
66 username = self.authUser,
67 password = self.authString,
68 hostname = self.plcHost,
69 urlpattern = self.plcUrl
72 # anonymous access - may not be enough for much
73 self._plapi = plcapi.PLCAPI()
78 if not hasattr(self, '_slice_id'):
79 slices = self.plapi.GetSlices(self.slicename, fields=('slice_id',))
81 self._slice_id = slices[0]['slice_id']
83 # If it wasn't found, don't remember this failure, keep trying
89 if not hasattr(self, '_vsys_vnet'):
90 slicetags = self.plapi.GetSliceTags(
91 name = self.slicename,
92 tagname = 'vsys_vnet',
95 self._vsys_vnet = slicetags[0]['value']
97 # If it wasn't found, don't remember this failure, keep trying
99 return self._vsys_vnet
101 def _load_blacklist(self):
102 blpath = environ.homepath('plblacklist')
105 bl = open(blpath, "r")
107 self._blacklist = set()
111 self._blacklist = set(
113 map(str.strip, bl.readlines())
119 def _save_blacklist(self):
120 blpath = environ.homepath('plblacklist')
121 bl = open(blpath, "w")
124 map('%s\n'.__mod__, self._blacklist))
129 self._home_directory = self._attributes.\
130 get_attribute_value("homeDirectory")
131 self.slicename = self._attributes.\
132 get_attribute_value("slice")
133 self.authUser = self._attributes.\
134 get_attribute_value("authUser")
135 self.authString = self._attributes.\
136 get_attribute_value("authPass")
137 self.sliceSSHKey = self._attributes.\
138 get_attribute_value("sliceSSHKey")
139 self.sliceSSHKeyPass = None
140 self.plcHost = self._attributes.\
141 get_attribute_value("plcHost")
142 self.plcUrl = self._attributes.\
143 get_attribute_value("plcUrl")
144 self.logLevel = self._attributes.\
145 get_attribute_value("plLogLevel")
146 self.tapPortBase = self._attributes.\
147 get_attribute_value("tapPortBase")
148 self.p2pDeployment = self._attributes.\
149 get_attribute_value("p2pDeployment")
150 self.dedicatedSlice = self._attributes.\
151 get_attribute_value("dedicatedSlice")
153 if not self.slicename:
154 raise RuntimeError, "Slice not set"
155 if not self.authUser:
156 raise RuntimeError, "PlanetLab account username not set"
157 if not self.authString:
158 raise RuntimeError, "PlanetLab account passphrase not set"
159 if not self.sliceSSHKey:
160 raise RuntimeError, "PlanetLab account key not specified"
161 if not os.path.exists(self.sliceSSHKey):
162 raise RuntimeError, "PlanetLab account key cannot be opened: %s" % (self.sliceSSHKey,)
164 self._logger.setLevel(getattr(logging,self.logLevel))
166 super(TestbedController, self).do_setup()
168 def do_post_asynclaunch(self, guid):
169 # Dependencies were launched asynchronously,
171 dep = self._elements[guid]
172 if isinstance(dep, self._app.Dependency):
173 dep.async_setup_wait()
175 # Two-phase configuration for asynchronous launch
176 do_poststep_preconfigure = staticmethod(do_post_asynclaunch)
177 do_poststep_configure = staticmethod(do_post_asynclaunch)
179 def do_preconfigure(self):
181 # Perform resource discovery if we don't have
182 # specific resources assigned yet
183 self.do_resource_discovery()
185 # Create PlanetLab slivers
186 self.do_provisioning()
189 # Wait for provisioning
194 except self._node.UnresponsiveNodeError:
198 if self.p2pDeployment:
199 # Plan application deployment
200 self.do_spanning_deployment_plan()
202 # Configure elements per XML data
203 super(TestbedController, self).do_preconfigure()
205 def do_resource_discovery(self, recover = False):
206 to_provision = self._to_provision = set()
208 reserved = set(self._blacklist)
209 for guid, node in self._elements.iteritems():
210 if isinstance(node, self._node.Node) and node._node_id is not None:
211 reserved.add(node._node_id)
214 # look for perfectly defined nodes
215 # (ie: those with only one candidate)
216 for guid, node in self._elements.iteritems():
217 if isinstance(node, self._node.Node) and node._node_id is None:
218 # Try existing nodes first
219 # If we have only one candidate, simply use it
220 candidates = node.find_candidates(
221 filter_slice_id = self.slice_id)
222 candidates -= reserved
223 if len(candidates) == 1:
224 node_id = iter(candidates).next()
225 node.assign_node_id(node_id)
226 reserved.add(node_id)
228 # Try again including unassigned nodes
229 candidates = node.find_candidates()
230 candidates -= reserved
231 if len(candidates) > 1:
233 if len(candidates) == 1:
234 node_id = iter(candidates).next()
235 node.assign_node_id(node_id)
236 to_provision.add(node_id)
237 reserved.add(node_id)
239 raise RuntimeError, "Cannot assign resources for node %s, no candidates sith %s" % (guid,
240 node.make_filter_description())
242 # Now do the backtracking search for a suitable solution
243 # First with existing slice nodes
246 for guid, node in self._elements.iteritems():
247 if isinstance(node, self._node.Node) and node._node_id is None:
248 # Try existing nodes first
249 # If we have only one candidate, simply use it
250 candidates = node.find_candidates(
251 filter_slice_id = self.slice_id)
252 candidates -= reserved
253 reqs.append(candidates)
258 raise RuntimeError, "Impossible to recover: unassigned host for Nodes %r" % (nodes,)
261 solution = resourcealloc.alloc(reqs)
262 except resourcealloc.ResourceAllocationError:
263 # Failed, try again with all nodes
266 candidates = node.find_candidates()
267 candidates -= reserved
268 reqs.append(candidates)
270 solution = resourcealloc.alloc(reqs)
271 to_provision.update(solution)
274 for node, node_id in zip(nodes, solution):
275 node.assign_node_id(node_id)
277 def do_provisioning(self):
278 if self._to_provision:
279 # Add new nodes to the slice
280 cur_nodes = self.plapi.GetSlices(self.slicename, ['node_ids'])[0]['node_ids']
281 new_nodes = list(set(cur_nodes) | self._to_provision)
282 self.plapi.UpdateSlice(self.slicename, nodes=new_nodes)
285 self._just_provisioned = self._to_provision
286 del self._to_provision
288 def do_wait_nodes(self):
289 for guid, node in self._elements.iteritems():
290 if isinstance(node, self._node.Node):
291 # Just inject configuration stuff
292 node.home_path = "nepi-node-%s" % (guid,)
293 node.ident_path = self.sliceSSHKey
294 node.slicename = self.slicename
297 self._logger.info("PlanetLab Node %s configured at %s", guid, node.hostname)
300 for guid, node in self._elements.iteritems():
301 if isinstance(node, self._node.Node):
302 self._logger.info("Waiting for Node %s configured at %s", guid, node.hostname)
304 node.wait_provisioning(
305 (20*60 if node._node_id in self._just_provisioned else 60)
308 self._logger.info("READY Node %s at %s", guid, node.hostname)
310 # Prepare dependency installer now
311 node.prepare_dependencies()
312 except self._node.UnresponsiveNodeError:
314 self._logger.warn("UNRESPONSIVE Node %s", node.hostname)
316 # Mark all dead nodes (which are unresponsive) on the blacklist
318 for guid, node in self._elements.iteritems():
319 if isinstance(node, self._node.Node):
320 if not node.is_alive():
321 self._logger.warn("Blacklisting %s for unresponsiveness", node.hostname)
322 self._blacklist.add(node._node_id)
326 self._save_blacklist()
330 traceback.print_exc()
334 def do_spanning_deployment_plan(self):
335 # Create application groups by collecting all applications
336 # based on their hash - the hash should contain everything that
337 # defines them and the platform they're built
341 frozenset((app.depends or "").split(' ')),
342 frozenset((app.sources or "").split(' ')),
345 app.node.architecture,
346 app.node.operatingSystem,
350 depgroups = collections.defaultdict(list)
352 for element in self._elements.itervalues():
353 if isinstance(element, self._app.Dependency):
354 depgroups[dephash(element)].append(element)
355 elif isinstance(element, self._node.Node):
356 deps = element._yum_dependencies
358 depgroups[dephash(deps)].append(deps)
360 # Set up spanning deployment for those applications that
361 # have been deployed in several nodes.
362 for dh, group in depgroups.iteritems():
364 # Pick root (deterministically)
365 root = min(group, key=lambda app:app.node.hostname)
367 # Obtain all IPs in numeric format
368 # (which means faster distance computations)
370 dep._ip = socket.gethostbyname(dep.node.hostname)
371 dep._ip_n = struct.unpack('!L', socket.inet_aton(dep._ip))[0]
374 # NOTE: the plan is an iterator
377 lambda a,b : ipaddr2.ipdistn(a._ip_n, b._ip_n),
381 # Re-sign private key
383 tempprk, temppuk, tmppass = self._make_temp_private_key()
389 for slave, master in plan:
390 slave.set_master(master)
391 slave.install_keys(tempprk, temppuk, tmppass)
393 # We don't need the user's passphrase anymore
394 self.sliceSSHKeyPass = None
396 def _make_temp_private_key(self):
397 # Get the user's key's passphrase
398 if not self.sliceSSHKeyPass:
399 if 'SSH_ASKPASS' in os.environ:
400 proc = subprocess.Popen(
401 [ os.environ['SSH_ASKPASS'],
402 "Please type the passphrase for the %s SSH identity file. "
403 "The passphrase will be used to re-cipher the identity file with "
404 "a random 256-bit key for automated chain deployment on the "
405 "%s PlanetLab slice" % (
406 os.path.basename(self.sliceSSHKey),
409 stdin = open("/dev/null"),
410 stdout = subprocess.PIPE,
411 stderr = subprocess.PIPE)
412 out,err = proc.communicate()
413 self.sliceSSHKeyPass = out.strip()
415 if not self.sliceSSHKeyPass:
418 # Create temporary key files
419 prk = tempfile.NamedTemporaryFile(
420 dir = self.root_directory,
421 prefix = "pl_deploy_tmpk_",
424 puk = tempfile.NamedTemporaryFile(
425 dir = self.root_directory,
426 prefix = "pl_deploy_tmpk_",
429 # Create secure 256-bits temporary passphrase
430 passphrase = ''.join(map(chr,[rng.randint(0,255)
431 for rng in (random.SystemRandom(),)
432 for i in xrange(32)] )).encode("hex")
435 oprk = open(self.sliceSSHKey, "rb")
436 opuk = open(self.sliceSSHKey+".pub", "rb")
437 shutil.copymode(oprk.name, prk.name)
438 shutil.copymode(opuk.name, puk.name)
439 shutil.copyfileobj(oprk, prk)
440 shutil.copyfileobj(opuk, puk)
446 # A descriptive comment
447 comment = "%s#NEPI_INTERNAL@%s" % (self.authUser, self.slicename)
450 proc = subprocess.Popen(
453 "-P", self.sliceSSHKeyPass,
456 stdout = subprocess.PIPE,
457 stderr = subprocess.PIPE,
458 stdin = subprocess.PIPE
460 out, err = proc.communicate()
463 raise RuntimeError, "Problem generating keys: \n%s\n%r" % (
469 # Change comment on public key
470 puklines = puk.readlines()
471 puklines[0] = puklines[0].split(' ')
472 puklines[0][-1] = comment+'\n'
473 puklines[0] = ' '.join(puklines[0])
476 puk.writelines(puklines)
480 return prk, puk, passphrase
482 def set(self, guid, name, value, time = TIME_NOW):
483 super(TestbedController, self).set(guid, name, value, time)
484 # TODO: take on account schedule time for the task
485 element = self._elements[guid]
488 setattr(element, name, value)
490 # We ignore these errors while recovering.
491 # Some attributes are immutable, and setting
492 # them is necessary (to recover the state), but
493 # some are not (they throw an exception).
494 if not self.recovering:
497 if hasattr(element, 'refresh'):
498 # invoke attribute refresh hook
501 def get(self, guid, name, time = TIME_NOW):
502 value = super(TestbedController, self).get(guid, name, time)
503 # TODO: take on account schedule time for the task
504 factory_id = self._create[guid]
505 factory = self._factories[factory_id]
506 element = self._elements.get(guid)
508 return getattr(element, name)
509 except (KeyError, AttributeError):
512 def get_address(self, guid, index, attribute='Address'):
516 iface = self._elements.get(guid)
517 if iface and index == 0:
518 if attribute == 'Address':
520 elif attribute == 'NetPrefix':
521 return iface.netprefix
522 elif attribute == 'Broadcast':
523 return iface.broadcast
525 # if all else fails, query box
526 return super(TestbedController, self).get_address(guid, index, attribute)
528 def action(self, time, guid, action):
529 raise NotImplementedError
532 for trace in self._traces.itervalues():
535 def invokeif(action, testbed, guid):
536 element = self._elements[guid]
537 if hasattr(element, action):
538 getattr(element, action)()
540 self._do_in_factory_order(
541 functools.partial(invokeif, 'cleanup'),
542 metadata.shutdown_order)
544 self._do_in_factory_order(
545 functools.partial(invokeif, 'destroy'),
546 metadata.shutdown_order)
548 self._elements.clear()
551 def trace(self, guid, trace_id, attribute='value'):
552 elem = self._elements[guid]
554 if attribute == 'value':
555 path = elem.sync_trace(self.home_directory, trace_id)
562 elif attribute == 'path':
563 content = elem.remote_trace_path(trace_id)
564 elif attribute == 'name':
565 content = elem.remote_trace_name(trace_id)
570 def follow_trace(self, trace_id, trace):
571 self._traces[trace_id] = trace
575 # An internal flag, so we know to behave differently in
576 # a few corner cases.
577 self.recovering = True
579 # Create and connect do not perform any real tasks against
580 # the nodes, it only sets up the object hierarchy,
581 # so we can run them normally
583 self.do_connect_init()
584 self.do_connect_compl()
586 # Manually recover nodes, to mark dependencies installed
587 # and clean up mutable attributes
588 self._do_in_factory_order(
589 lambda self, guid : self._elements[guid].recover(),
594 # Assign nodes - since we're working off exeucte XML, nodes
595 # have specific hostnames assigned and we don't need to do
596 # real assignment, only find out node ids and check liveliness
597 self.do_resource_discovery(recover = True)
600 # Pre/post configure, however, tends to set up tunnels
601 # Execute configuration steps only for those object
602 # kinds that do not have side effects
604 # Do the ones without side effects,
605 # including nodes that need to set up home
606 # folders and all that
607 self._do_in_factory_order(
608 "preconfigure_function",
611 Parallel(metadata.NODE),
615 # Tunnels require a home path that is configured
616 # at this step. Since we cannot run the step itself,
617 # we need to inject this homepath ourselves
618 for guid, element in self._elements.iteritems():
619 if isinstance(element, self._interfaces.TunIface):
620 element._home_path = "tun-%s" % (guid,)
622 # Manually recover tunnels, applications and
623 # netpipes, negating the side effects
624 self._do_in_factory_order(
625 lambda self, guid : self._elements[guid].recover(),
627 Parallel(metadata.TAPIFACE),
628 Parallel(metadata.TUNIFACE),
630 Parallel(metadata.NEPIDEPENDENCY),
631 Parallel(metadata.NS3DEPENDENCY),
632 Parallel(metadata.DEPENDENCY),
633 Parallel(metadata.APPLICATION),
636 # Tunnels are not harmed by configuration after
637 # recovery, and some attributes get set this way
638 # like external_iface
639 self._do_in_factory_order(
640 "preconfigure_function",
642 Parallel(metadata.TAPIFACE),
643 Parallel(metadata.TUNIFACE),
646 # Post-do the ones without side effects
647 self._do_in_factory_order(
648 "configure_function",
651 Parallel(metadata.NODE),
653 Parallel(metadata.TAPIFACE),
654 Parallel(metadata.TUNIFACE),
657 # There are no required prestart steps
658 # to call upon recovery, so we're done
660 self.recovering = True
662 def _make_generic(self, parameters, kind):
663 app = kind(self.plapi)
664 app.testbed = weakref.ref(self)
666 # Note: there is 1-to-1 correspondence between attribute names
667 # If that changes, this has to change as well
668 for attr,val in parameters.iteritems():
670 setattr(app, attr, val)
672 # We ignore these errors while recovering.
673 # Some attributes are immutable, and setting
674 # them is necessary (to recover the state), but
675 # some are not (they throw an exception).
676 if not self.recovering:
681 def _make_node(self, parameters):
682 node = self._make_generic(parameters, self._node.Node)
683 node.enable_cleanup = self.dedicatedSlice
686 def _make_node_iface(self, parameters):
687 return self._make_generic(parameters, self._interfaces.NodeIface)
689 def _make_tun_iface(self, parameters):
690 return self._make_generic(parameters, self._interfaces.TunIface)
692 def _make_tap_iface(self, parameters):
693 return self._make_generic(parameters, self._interfaces.TapIface)
695 def _make_netpipe(self, parameters):
696 return self._make_generic(parameters, self._interfaces.NetPipe)
698 def _make_internet(self, parameters):
699 return self._make_generic(parameters, self._interfaces.Internet)
701 def _make_application(self, parameters):
702 return self._make_generic(parameters, self._app.Application)
704 def _make_dependency(self, parameters):
705 return self._make_generic(parameters, self._app.Dependency)
707 def _make_nepi_dependency(self, parameters):
708 return self._make_generic(parameters, self._app.NepiDependency)
710 def _make_ns3_dependency(self, parameters):
711 return self._make_generic(parameters, self._app.NS3Dependency)
713 def _make_tun_filter(self, parameters):
714 return self._make_generic(parameters, self._interfaces.TunFilter)
716 def _make_class_queue_filter(self, parameters):
717 return self._make_generic(parameters, self._interfaces.ClassQueueFilter)
719 def _make_tos_queue_filter(self, parameters):
720 return self._make_generic(parameters, self._interfaces.ToSQueueFilter)
722 def _make_multicast_forwarder(self, parameters):
723 return self._make_generic(parameters, self._multicast.MulticastForwarder)
725 def _make_multicast_announcer(self, parameters):
726 return self._make_generic(parameters, self._multicast.MulticastAnnouncer)
728 def _make_multicast_router(self, parameters):
729 return self._make_generic(parameters, self._multicast.MulticastRouter)