e1095ee4bc5f772365726a79923555d10288b090
[nepi.git] / src / nepi / testbeds / planetlab / execute.py
1 # -*- coding: utf-8 -*-
2
3 from constants import TESTBED_ID, TESTBED_VERSION
4 from nepi.core import testbed_impl
5 from nepi.core.metadata import Parallel
6 from nepi.util.constants import TIME_NOW
7 from nepi.util.graphtools import mst
8 from nepi.util import ipaddr2
9 from nepi.util import environ
10 from nepi.util.parallel import ParallelRun
11 import threading
12 import sys
13 import os
14 import os.path
15 import time
16 import resourcealloc
17 import collections
18 import operator
19 import functools
20 import socket
21 import struct
22 import tempfile
23 import subprocess
24 import random
25 import shutil
26 import logging
27 import metadata
28 import weakref
29 import util as plutil
30
31 class TempKeyError(Exception):
32     pass
33
34 class TestbedController(testbed_impl.TestbedController):
35     def __init__(self):
36         super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
37         self._home_directory = None
38         self.slicename = None
39         self._traces = dict()
40
41         import node, interfaces, application, multicast
42         self._node = node
43         self._interfaces = interfaces
44         self._app = application
45         self._multicast = multicast
46         
47         self._blacklist = set()
48         self._just_provisioned = set()
49         
50         self._load_blacklist()
51
52         self._slice_id = None
53         self._plcapi = None
54         self._sliceapi = None
55         self._vsys_vnet = None
56
57         self._logger = logging.getLogger('nepi.testbeds.planetlab')
58         
59         self.recovering = False
60
61     @property
62     def home_directory(self):
63         return self._home_directory
64
65     @property
66     def plcapi(self):
67         if not self._plcapi:
68             import plcapi
69             self._plcapi = plcapi.plcapi(
70                     self.authUser,
71                     self.authString,
72                     self.plcHost,
73                     self.plcUrl
74                     )
75         return self._plcapi
76
77     @property
78     def sliceapi(self):
79         if not self._sliceapi:
80             if not self.sfa:
81                 self._sliceapi = self.plcapi
82             else:
83                 from nepi.util import sfiapi
84                 self._sliceapi = sfiapi.sfiapi(self.slice_id)
85         return self._sliceapi
86
87     @property
88     def slice_id(self):
89         if not self._slice_id:
90             self._slice_id = self.sliceapi.GetSliceId(self.slicename)
91         return self._slice_id
92     
93     @property
94     def vsys_vnet(self):
95         if not self._vsys_vnet:
96             self._vsys_vnet = self.sliceapi.GetSliceVnetSysTag(self.slicename)
97         return self._vsys_vnet
98
99     def _load_blacklist(self):
100         blpath = environ.homepath('plblacklist')
101         
102         try:
103             bl = open(blpath, "r")
104         except:
105             self._blacklist = set()
106             return
107             
108         try:
109             self._blacklist = set(
110                 map(str.strip, bl.readlines())
111             )
112         finally:
113             bl.close()
114     
115     def _save_blacklist(self):
116         blpath = environ.homepath('plblacklist')
117         bl = open(blpath, "w")
118         try:
119             bl.writelines(
120                 map('%s\n'.__mod__, self._blacklist))
121         finally:
122             bl.close()
123     
124     def do_setup(self):
125         self._home_directory = self._attributes.\
126             get_attribute_value("homeDirectory")
127         self.slicename = self._attributes.\
128             get_attribute_value("slice")
129         self.authUser = self._attributes.\
130             get_attribute_value("authUser")
131         self.authString = self._attributes.\
132             get_attribute_value("authPass")
133         self.sliceSSHKey = self._attributes.\
134             get_attribute_value("sliceSSHKey")
135         self.sliceSSHKeyPass = None
136         self.plcHost = self._attributes.\
137             get_attribute_value("plcHost")
138         self.plcUrl = self._attributes.\
139             get_attribute_value("plcUrl")
140         self.logLevel = self._attributes.\
141             get_attribute_value("plLogLevel")
142         self.tapPortBase = self._attributes.\
143             get_attribute_value("tapPortBase")
144         self.p2pDeployment = self._attributes.\
145             get_attribute_value("p2pDeployment")
146         self.dedicatedSlice = self._attributes.\
147             get_attribute_value("dedicatedSlice")
148         self.sfa = self._attributes.\
149             get_attribute_value("sfa")
150         if self.sfa:
151             self._slice_id = self._attributes.\
152             get_attribute_value("sliceHrn")
153
154         if not self.slicename:
155             raise RuntimeError, "Slice not set"
156         if not self.authUser:
157             raise RuntimeError, "PlanetLab account username not set"
158         if not self.authString:
159             raise RuntimeError, "PlanetLab account passphrase not set"
160         if not self.sliceSSHKey:
161             raise RuntimeError, "PlanetLab account key not specified"
162         if not os.path.exists(self.sliceSSHKey):
163             raise RuntimeError, "PlanetLab account key cannot be opened: %s" % (self.sliceSSHKey,)
164         
165         self._logger.setLevel(getattr(logging,self.logLevel))
166         
167         super(TestbedController, self).do_setup()
168
169     def do_post_asynclaunch(self, guid):
170         # Dependencies were launched asynchronously,
171         # so wait for them
172         dep = self._elements[guid]
173         if isinstance(dep, self._app.Dependency):
174             dep.async_setup_wait()
175     
176     # Two-phase configuration for asynchronous launch
177     do_poststep_preconfigure = staticmethod(do_post_asynclaunch)
178     do_poststep_configure = staticmethod(do_post_asynclaunch)
179
180     def do_preconfigure(self):
181         while True:
182             # Perform resource discovery if we don't have
183             # specific resources assigned yet
184             self.do_resource_discovery()
185
186             # Create PlanetLab slivers
187             self.do_provisioning()
188             
189             try:
190                 # Wait for provisioning
191                 self.do_wait_nodes()
192                 
193                 # Okkey...
194                 break
195             except self._node.UnresponsiveNodeError:
196                 # Oh... retry...
197                 pass
198         
199         if self.p2pDeployment:
200             # Plan application deployment
201             self.do_spanning_deployment_plan()
202
203         # Configure elements per XML data
204         super(TestbedController, self).do_preconfigure()
205
206     def do_resource_discovery(self, recover = False):
207         to_provision = self._to_provision = set()
208         
209         reserved = set(self._blacklist)
210         for guid, node in self._elements.iteritems():
211             if isinstance(node, self._node.Node) and node._node_id is not None:
212                 reserved.add(node.hostname)
213         
214         # Initial algo:
215         #   look for perfectly defined nodes
216         #   (ie: those with only one candidate)
217         reserve_lock = threading.RLock()
218         def assignifunique(guid, node):
219             # Try existing nodes first
220             # If we have only one candidate, simply use it
221             candidates = node.find_candidates(
222                 filter_slice_id = self.slice_id)
223             
224             node_id = None
225             candidate_hosts = set(candidates.keys() if candidates else [])
226             reserve_lock.acquire()
227             try:
228                 candidate_hosts -= reserved
229                 if len(candidate_hosts) == 1:
230                     hostname = iter(candidate_hosts).next()
231                     node_id = candidates[hostname]
232                     reserved.add(hostname)
233                 elif not candidate_hosts:
234                     # Try again including unassigned nodes
235                     reserve_lock.release()
236                     try:
237                         candidates = node.find_candidates()
238                     finally:
239                         reserve_lock.acquire()
240                     candidate_hosts = set(candidates.keys() if candidates else [])
241                     candidate_hosts -= reserved
242                     if len(candidate_hosts) > 1:
243                         return
244                     if len(candidate_hosts) == 1:
245                         hostname = iter(candidate_hosts).next()
246                         node_id = candidates[hostname]
247                         to_provision.add(node_id)
248                         reserved.add(hostname)
249                     elif not candidates:
250                         raise RuntimeError, "Cannot assign resources for node %s, no candidates with %s" % (guid,
251                             node.make_filter_description())
252             finally:
253                 reserve_lock.release()
254            
255             if node_id is not None:
256                 node.assign_node_id(node_id)
257         
258         runner = ParallelRun(maxthreads=4) # don't overload the PLC API, just 4 threads to hide latencies and that's it
259         runner.start()
260         for guid, node in self._elements.iteritems():
261             if isinstance(node, self._node.Node) and node._node_id is None:
262                 runner.put(assignifunique, guid, node)
263         runner.sync()
264         
265         # Now do the backtracking search for a suitable solution
266         # First with existing slice nodes
267         reqs = []
268         nodes = []
269         def genreqs(node, filter_slice_id=None):
270             # Try existing nodes first
271             # If we have only one candidate, simply use it
272             candidates = node.find_candidates(
273                 filter_slice_id = filter_slice_id)
274             for r in reserved:
275                 if candidates.has_key(r):
276                     del candidates[r]
277             reqs.append(candidates.values())
278             nodes.append(node)
279         for guid, node in self._elements.iteritems():
280             if isinstance(node, self._node.Node) and node._node_id is None:
281                 runner.put(genreqs, node, self.slice_id)
282         runner.sync()
283        
284         if nodes and reqs:
285             if recover:
286                 raise RuntimeError, "Impossible to recover: unassigned host for Nodes %r" % (nodes,)
287
288             def pickbest(fullset, nreq, node=nodes[0]):
289                 if len(fullset) > nreq:
290                     fullset = zip(node.rate_nodes(fullset),fullset)
291                     fullset.sort(reverse=True)
292                     del fullset[nreq:]
293                     return set(map(operator.itemgetter(1),fullset))
294                 else:
295                     return fullset
296             
297             try:
298                 solution = resourcealloc.alloc(reqs, sample=pickbest)
299             except resourcealloc.ResourceAllocationError:
300                 # Failed, try again with all nodes
301                 reqs = []
302                 for node in nodes:
303                     runner.put(genreqs, node)
304                 runner.sync()
305                 solution = resourcealloc.alloc(reqs, sample=pickbest)
306                 to_provision.update(solution)
307             
308             # Do assign nodes
309             for node, node_id in zip(nodes, solution):
310                 runner.put(node.assign_node_id, node_id)
311             runner.join()
312
313     def do_provisioning(self):
314         if self._to_provision:
315             # Add new nodes to the slice
316             cur_nodes = self.sliceapi.GetSliceNodes(self.slice_id)
317             new_nodes = list(set(cur_nodes) | self._to_provision)
318             self.sliceapi.AddSliceNodes(self.slice_id, nodes=new_nodes)
319
320         # cleanup
321         self._just_provisioned = self._to_provision
322         del self._to_provision
323     
324     def do_wait_nodes(self):
325         for guid, node in self._elements.iteritems():
326             if isinstance(node, self._node.Node):
327                 # Just inject configuration stuff
328                 node.home_path = "nepi-node-%s" % (guid,)
329                 node.ident_path = self.sliceSSHKey
330                 node.slicename = self.slicename
331             
332                 # Show the magic
333                 self._logger.info("PlanetLab Node %s configured at %s", guid, node.hostname)
334         
335         try:
336             runner = ParallelRun(maxthreads=64, maxqueue=1)
337             abort = []
338             def waitforit(guid, node):
339                 try:
340                     node.wait_provisioning(
341                         (20*60 if node._node_id in self._just_provisioned else 60)
342                     )
343                     
344                     self._logger.info("READY Node %s at %s", guid, node.hostname)
345                     
346                     # Prepare dependency installer now
347                     node.prepare_dependencies()
348                 except:
349                     abort.append(None)
350                     raise
351                 
352             for guid, node in self._elements.iteritems():
353                 if abort:
354                     break
355                 if isinstance(node, self._node.Node):
356                     self._logger.info("Waiting for Node %s configured at %s", guid, node.hostname)
357                     runner.put(waitforit, guid, node)
358             runner.join()
359                     
360         except self._node.UnresponsiveNodeError:
361             # Uh... 
362             self._logger.warn("UNRESPONSIVE Nodes")
363             
364             # Mark all dead nodes (which are unresponsive) on the blacklist
365             # and re-raise
366             for guid, node in self._elements.iteritems():
367                 if isinstance(node, self._node.Node):
368                     if not node.is_alive():
369                         self._logger.warn("Blacklisting %s for unresponsiveness", node.hostname)
370                         self._blacklist.add(node.hostname)
371                         node.unassign_node()
372             
373             try:
374                 self._save_blacklist()
375             except:
376                 # not important...
377                 import traceback
378                 traceback.print_exc()
379             
380             raise
381     
382     def do_spanning_deployment_plan(self):
383         # Create application groups by collecting all applications
384         # based on their hash - the hash should contain everything that
385         # defines them and the platform they're built
386         
387         def dephash(app):
388             return (
389                 frozenset((app.depends or "").split(' ')),
390                 frozenset((app.sources or "").split(' ')),
391                 app.build,
392                 app.install,
393                 app.node.architecture,
394                 app.node.operatingSystem,
395                 app.node.pl_distro,
396                 app.__class__,
397             )
398         
399         depgroups = collections.defaultdict(list)
400         
401         for element in self._elements.itervalues():
402             if isinstance(element, self._app.Dependency):
403                 depgroups[dephash(element)].append(element)
404             elif isinstance(element, self._node.Node):
405                 deps = element._yum_dependencies
406                 if deps:
407                     depgroups[dephash(deps)].append(deps)
408         
409         # Set up spanning deployment for those applications that
410         # have been deployed in several nodes.
411         for dh, group in depgroups.iteritems():
412             if len(group) > 1:
413                 # Pick root (deterministically)
414                 root = min(group, key=lambda app:app.node.hostname)
415                 
416                 # Obtain all IPs in numeric format
417                 # (which means faster distance computations)
418                 for dep in group:
419                     dep._ip = socket.gethostbyname(dep.node.hostname)
420                     dep._ip_n = struct.unpack('!L', socket.inet_aton(dep._ip))[0]
421                 
422                 # Compute plan
423                 # NOTE: the plan is an iterator
424                 plan = mst.mst(
425                     group,
426                     lambda a,b : ipaddr2.ipdistn(a._ip_n, b._ip_n),
427                     root = root,
428                     maxbranching = 2)
429                 
430                 # Re-sign private key
431                 try:
432                     tempprk, temppuk, tmppass = self._make_temp_private_key()
433                 except TempKeyError:
434                     continue
435                 
436                 # Set up slaves
437                 plan = list(plan)
438                 for slave, master in plan:
439                     slave.set_master(master)
440                     slave.install_keys(tempprk, temppuk, tmppass)
441                     
442         # We don't need the user's passphrase anymore
443         self.sliceSSHKeyPass = None
444     
445     def _make_temp_private_key(self):
446         # Get the user's key's passphrase
447         if not self.sliceSSHKeyPass:
448             if 'SSH_ASKPASS' in os.environ:
449                 proc = subprocess.Popen(
450                     [ os.environ['SSH_ASKPASS'],
451                       "Please type the passphrase for the %s SSH identity file. "
452                       "The passphrase will be used to re-cipher the identity file with "
453                       "a random 256-bit key for automated chain deployment on the "
454                       "%s PlanetLab slice" % ( 
455                         os.path.basename(self.sliceSSHKey), 
456                         self.slicename
457                     ) ],
458                     stdin = open("/dev/null"),
459                     stdout = subprocess.PIPE,
460                     stderr = subprocess.PIPE)
461                 out,err = proc.communicate()
462                 self.sliceSSHKeyPass = out.strip()
463         
464         if not self.sliceSSHKeyPass:
465             raise TempKeyError
466         
467         # Create temporary key files
468         prk = tempfile.NamedTemporaryFile(
469             dir = self.root_directory,
470             prefix = "pl_deploy_tmpk_",
471             suffix = "")
472
473         puk = tempfile.NamedTemporaryFile(
474             dir = self.root_directory,
475             prefix = "pl_deploy_tmpk_",
476             suffix = ".pub")
477             
478         # Create secure 256-bits temporary passphrase
479         passphrase = os.urandom(32).encode("hex")
480                 
481         # Copy keys
482         oprk = open(self.sliceSSHKey, "rb")
483         opuk = open(self.sliceSSHKey+".pub", "rb")
484         shutil.copymode(oprk.name, prk.name)
485         shutil.copymode(opuk.name, puk.name)
486         shutil.copyfileobj(oprk, prk)
487         shutil.copyfileobj(opuk, puk)
488         prk.flush()
489         puk.flush()
490         oprk.close()
491         opuk.close()
492         
493         # A descriptive comment
494         comment = "%s#NEPI_INTERNAL@%s" % (self.authUser, self.slicename)
495         
496         # Recipher keys
497         proc = subprocess.Popen(
498             ["ssh-keygen", "-p",
499              "-f", prk.name,
500              "-P", self.sliceSSHKeyPass,
501              "-N", passphrase,
502              "-C", comment ],
503             stdout = subprocess.PIPE,
504             stderr = subprocess.PIPE,
505             stdin = subprocess.PIPE
506         )
507         out, err = proc.communicate()
508         
509         if err:
510             raise RuntimeError, "Problem generating keys: \n%s\n%r" % (
511                 out, err)
512         
513         prk.seek(0)
514         puk.seek(0)
515         
516         # Change comment on public key
517         puklines = puk.readlines()
518         puklines[0] = puklines[0].split(' ')
519         puklines[0][-1] = comment+'\n'
520         puklines[0] = ' '.join(puklines[0])
521         puk.seek(0)
522         puk.truncate()
523         puk.writelines(puklines)
524         del puklines
525         puk.flush()
526         
527         return prk, puk, passphrase
528     
529     def set(self, guid, name, value, time = TIME_NOW):
530         super(TestbedController, self).set(guid, name, value, time)
531         # TODO: take on account schedule time for the task
532         element = self._elements[guid]
533         if element:
534             if name == "up":
535                 if value == True:
536                     element.if_up()
537                 else:
538                     element.if_down()
539
540             try:
541                 setattr(element, name, value)
542             except:
543                 # We ignore these errors while recovering.
544                 # Some attributes are immutable, and setting
545                 # them is necessary (to recover the state), but
546                 # some are not (they throw an exception).
547                 if not self.recovering:
548                     raise
549
550             if hasattr(element, 'refresh'):
551                 # invoke attribute refresh hook
552                 element.refresh()
553
554     def get(self, guid, name, time = TIME_NOW):
555         value = super(TestbedController, self).get(guid, name, time)
556         # TODO: take on account schedule time for the task
557         factory_id = self._create[guid]
558         factory = self._factories[factory_id]
559         element = self._elements.get(guid)
560         try:
561             return getattr(element, name)
562         except (KeyError, AttributeError):
563             return value
564
565     def get_address(self, guid, index, attribute='Address'):
566         index = int(index)
567
568         # try the real stuff
569         iface = self._elements.get(guid)
570         if iface and index == 0:
571             if attribute == 'Address':
572                 return iface.address
573             elif attribute == 'NetPrefix':
574                 return iface.netprefix
575             elif attribute == 'Broadcast':
576                 return iface.broadcast
577
578         # if all else fails, query box
579         return super(TestbedController, self).get_address(guid, index, attribute)
580
581     def action(self, time, guid, action):
582         raise NotImplementedError
583
584     def shutdown(self):
585         for trace in self._traces.itervalues():
586             trace.close()
587         
588         def invokeif(action, testbed, guid):
589             element = self._elements[guid]
590             if hasattr(element, action):
591                 getattr(element, action)()
592         
593         self._do_in_factory_order(
594             functools.partial(invokeif, 'cleanup'),
595             metadata.shutdown_order)
596
597         self._do_in_factory_order(
598             functools.partial(invokeif, 'destroy'),
599             metadata.shutdown_order)
600             
601         self._elements.clear()
602         self._traces.clear()
603
604     def trace(self, guid, trace_id, attribute='value'):
605         elem = self._elements[guid]
606
607         if attribute == 'value':
608             path = elem.sync_trace(self.home_directory, trace_id)
609             if path:
610                 fd = open(path, "r")
611                 content = fd.read()
612                 fd.close()
613             else:
614                 content = None
615         elif attribute == 'path':
616             content = elem.remote_trace_path(trace_id)
617         elif attribute == 'name':
618             content = elem.remote_trace_name(trace_id)
619         else:
620             content = None
621         return content
622
623     def follow_trace(self, trace_id, trace):
624         self._traces[trace_id] = trace
625
626     def recover(self):
627         try:
628             # An internal flag, so we know to behave differently in
629             # a few corner cases.
630             self.recovering = True
631             
632             # Create and connect do not perform any real tasks against
633             # the nodes, it only sets up the object hierarchy,
634             # so we can run them normally
635             self.do_create()
636             self.do_connect_init()
637             self.do_connect_compl()
638             
639             # Manually recover nodes, to mark dependencies installed
640             # and clean up mutable attributes
641             self._do_in_factory_order(
642                 lambda self, guid : self._elements[guid].recover(), 
643                 [
644                     metadata.NODE,
645                 ])
646             
647             # Assign nodes - since we're working off exeucte XML, nodes
648             # have specific hostnames assigned and we don't need to do
649             # real assignment, only find out node ids and check liveliness
650             self.do_resource_discovery(recover = True)
651             self.do_wait_nodes()
652             
653             # Pre/post configure, however, tends to set up tunnels
654             # Execute configuration steps only for those object
655             # kinds that do not have side effects
656             
657             # Do the ones without side effects,
658             # including nodes that need to set up home 
659             # folders and all that
660             self._do_in_factory_order(
661                 "preconfigure_function", 
662                 [
663                     metadata.INTERNET,
664                     Parallel(metadata.NODE),
665                     metadata.NODEIFACE,
666                 ])
667             
668             # Tunnels require a home path that is configured
669             # at this step. Since we cannot run the step itself,
670             # we need to inject this homepath ourselves
671             for guid, element in self._elements.iteritems():
672                 if isinstance(element, self._interfaces.TunIface):
673                     element._home_path = "tun-%s" % (guid,)
674             
675             # Manually recover tunnels, applications and
676             # netpipes, negating the side effects
677             self._do_in_factory_order(
678                 lambda self, guid : self._elements[guid].recover(), 
679                 [
680                     Parallel(metadata.TAPIFACE),
681                     Parallel(metadata.TUNIFACE),
682                     metadata.NETPIPE,
683                     Parallel(metadata.NEPIDEPENDENCY),
684                     Parallel(metadata.NS3DEPENDENCY),
685                     Parallel(metadata.DEPENDENCY),
686                     Parallel(metadata.APPLICATION),
687                     Parallel(metadata.CCNXDAEMON),
688                 ])
689
690             # Tunnels are not harmed by configuration after
691             # recovery, and some attributes get set this way
692             # like external_iface
693             self._do_in_factory_order(
694                 "preconfigure_function", 
695                 [
696                     Parallel(metadata.TAPIFACE),
697                     Parallel(metadata.TUNIFACE),
698                 ])
699
700             # Post-do the ones without side effects
701             self._do_in_factory_order(
702                 "configure_function", 
703                 [
704                     metadata.INTERNET,
705                     Parallel(metadata.NODE),
706                     metadata.NODEIFACE,
707                     Parallel(metadata.TAPIFACE),
708                     Parallel(metadata.TUNIFACE),
709                 ])
710             
711             # There are no required prestart steps
712             # to call upon recovery, so we're done
713         finally:
714             self.recovering = True
715     
716     def _make_generic(self, parameters, kind, **kwargs):
717         args = dict({'api': self.plcapi})
718         args.update(kwargs)
719         app = kind(**args)
720         app.testbed = weakref.ref(self)
721
722         # Note: there is 1-to-1 correspondence between attribute names
723         #   If that changes, this has to change as well
724         for attr,val in parameters.iteritems():
725             try:
726                 setattr(app, attr, val)
727             except:
728                 # We ignore these errors while recovering.
729                 # Some attributes are immutable, and setting
730                 # them is necessary (to recover the state), but
731                 # some are not (they throw an exception).
732                 if not self.recovering:
733                     raise
734
735         return app
736
737     def _make_node(self, parameters):
738         args = dict({'sliceapi': self.sliceapi})
739         node = self._make_generic(parameters, self._node.Node, **args)
740         node.enable_cleanup = self.dedicatedSlice
741         return node
742
743     def _make_node_iface(self, parameters):
744         return self._make_generic(parameters, self._interfaces.NodeIface)
745
746     def _make_tun_iface(self, parameters):
747         return self._make_generic(parameters, self._interfaces.TunIface)
748
749     def _make_tap_iface(self, parameters):
750         return self._make_generic(parameters, self._interfaces.TapIface)
751
752     def _make_netpipe(self, parameters):
753         return self._make_generic(parameters, self._interfaces.NetPipe)
754
755     def _make_internet(self, parameters):
756         return self._make_generic(parameters, self._interfaces.Internet)
757
758     def _make_application(self, parameters, clazz = None):
759         if not clazz:
760             clazz = self._app.Application
761         return self._make_generic(parameters, clazz)
762
763     def _make_dependency(self, parameters):
764         return self._make_generic(parameters, self._app.Dependency)
765
766     def _make_nepi_dependency(self, parameters):
767         return self._make_generic(parameters, self._app.NepiDependency)
768
769     def _make_ns3_dependency(self, parameters):
770         return self._make_generic(parameters, self._app.NS3Dependency)
771
772     def _make_tun_filter(self, parameters):
773         return self._make_generic(parameters, self._interfaces.TunFilter)
774
775     def _make_class_queue_filter(self, parameters):
776         return self._make_generic(parameters, self._interfaces.ClassQueueFilter)
777
778     def _make_tos_queue_filter(self, parameters):
779         return self._make_generic(parameters, self._interfaces.ToSQueueFilter)
780
781     def _make_multicast_forwarder(self, parameters):
782         return self._make_generic(parameters, self._multicast.MulticastForwarder)
783
784     def _make_multicast_announcer(self, parameters):
785         return self._make_generic(parameters, self._multicast.MulticastAnnouncer)
786
787     def _make_multicast_router(self, parameters):
788         return self._make_generic(parameters, self._multicast.MulticastRouter)
789
790