9ae17b44a9a9a8bf88d7db51499f7877cc21c182
[nepi.git] / src / nepi / testbeds / planetlab / execute.py
1 # -*- coding: utf-8 -*-
2
3 from constants import TESTBED_ID, TESTBED_VERSION
4 from nepi.core import testbed_impl
5 from nepi.core.metadata import Parallel
6 from nepi.util.constants import TIME_NOW
7 from nepi.util.graphtools import mst
8 from nepi.util import ipaddr2
9 from nepi.util import environ
10 from nepi.util.parallel import ParallelRun
11 import threading
12 import sys
13 import os
14 import os.path
15 import time
16 import resourcealloc
17 import collections
18 import operator
19 import functools
20 import socket
21 import struct
22 import tempfile
23 import subprocess
24 import random
25 import shutil
26 import logging
27 import metadata
28 import weakref
29 import util as plutil
30
31 class TempKeyError(Exception):
32     pass
33
34 class TestbedController(testbed_impl.TestbedController):
35     def __init__(self):
36         super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
37         self._home_directory = None
38         self.slicename = None
39         self._traces = dict()
40
41         import node, interfaces, application, multicast
42         self._node = node
43         self._interfaces = interfaces
44         self._app = application
45         self._multicast = multicast
46         
47         self._blacklist = set()
48         self._just_provisioned = set()
49         
50         self._load_blacklist()
51
52         self._slice_id = None
53         self._plcapi = None
54         self._sliceapi = None
55         self._vsys_vnet = None
56
57         self._logger = logging.getLogger('nepi.testbeds.planetlab')
58         
59         self.recovering = False
60
61     @property
62     def home_directory(self):
63         return self._home_directory
64
65     @property
66     def plcapi(self):
67         if not self._plcapi:
68             import plcapi
69             self._plcapi = plcapi.plcapi(
70                     self.authUser,
71                     self.authString,
72                     self.plcHost,
73                     self.plcUrl,
74                     self.proxy
75                     )
76         return self._plcapi
77
78     @property
79     def sliceapi(self):
80         if not self._sliceapi:
81             if not self.sfa:
82                 self._sliceapi = self.plcapi
83             else:
84                 from nepi.util import sfiapi
85                 self._sliceapi = sfiapi.sfiapi(self.slice_id)
86         return self._sliceapi
87
88     @property
89     def slice_id(self):
90         if not self._slice_id:
91             self._slice_id = self.sliceapi.GetSliceId(self.slicename)
92         return self._slice_id
93     
94     @property
95     def vsys_vnet(self):
96         if not self._vsys_vnet:
97             self._vsys_vnet = self.sliceapi.GetSliceVnetSysTag(self.slicename)
98         return self._vsys_vnet
99
100     def _load_blacklist(self):
101         blpath = environ.homepath('plblacklist')
102         
103         try:
104             bl = open(blpath, "r")
105         except:
106             self._blacklist = set()
107             return
108             
109         try:
110             self._blacklist = set(
111                 map(str.strip, bl.readlines())
112             )
113         finally:
114             bl.close()
115     
116     def _save_blacklist(self):
117         blpath = environ.homepath('plblacklist')
118         bl = open(blpath, "w")
119         try:
120             bl.writelines(
121                 map('%s\n'.__mod__, self._blacklist))
122         finally:
123             bl.close()
124     
125     def do_setup(self):
126         self._home_directory = self._attributes.\
127             get_attribute_value("homeDirectory")
128         self.slicename = self._attributes.\
129             get_attribute_value("slice")
130         self.authUser = self._attributes.\
131             get_attribute_value("authUser")
132         self.authString = self._attributes.\
133             get_attribute_value("authPass")
134         self.sliceSSHKey = self._attributes.\
135             get_attribute_value("sliceSSHKey")
136         self.sliceSSHKeyPass = None
137         self.plcHost = self._attributes.\
138             get_attribute_value("plcHost")
139         self.plcUrl = self._attributes.\
140             get_attribute_value("plcUrl")
141         self.logLevel = self._attributes.\
142             get_attribute_value("plLogLevel")
143         self.proxy = self._attributes.\
144             get_attribute_value("proxy")
145         self.tapPortBase = self._attributes.\
146             get_attribute_value("tapPortBase")
147         self.p2pDeployment = self._attributes.\
148             get_attribute_value("p2pDeployment")
149         self.cleanProc = self._attributes.\
150             get_attribute_value("cleanProc")
151         self.cleanHome = self._attributes.\
152             get_attribute_value("cleanHome")
153         self.sfa = self._attributes.\
154             get_attribute_value("sfa")
155         if self.sfa:
156             self._slice_id = self._attributes.\
157             get_attribute_value("sliceHrn")
158
159         if not self.slicename:
160             raise RuntimeError, "Slice not set"
161         if not self.authUser:
162             raise RuntimeError, "PlanetLab account username not set"
163         if not self.authString:
164             raise RuntimeError, "PlanetLab account passphrase not set"
165         if not self.sliceSSHKey:
166             raise RuntimeError, "PlanetLab account key not specified"
167         if not os.path.exists(self.sliceSSHKey):
168             raise RuntimeError, "PlanetLab account key cannot be opened: %s" % (self.sliceSSHKey,)
169         
170         self._logger.setLevel(getattr(logging,self.logLevel))
171         
172         super(TestbedController, self).do_setup()
173
174     def do_post_asynclaunch(self, guid):
175         # Dependencies were launched asynchronously,
176         # so wait for them
177         dep = self._elements[guid]
178         if isinstance(dep, self._app.Dependency):
179             dep.async_setup_wait()
180     
181     # Two-phase configuration for asynchronous launch
182     do_poststep_preconfigure = staticmethod(do_post_asynclaunch)
183     do_poststep_configure = staticmethod(do_post_asynclaunch)
184
185     def do_preconfigure(self):
186         while True:
187             # Perform resource discovery if we don't have
188             # specific resources assigned yet
189             self.do_resource_discovery()
190
191             # Create PlanetLab slivers
192             self.do_provisioning()
193             
194             try:
195                 # Wait for provisioning
196                 self.do_wait_nodes()
197                 
198                 # Okkey...
199                 break
200             except self._node.UnresponsiveNodeError:
201                 # Oh... retry...
202                 pass
203         
204         if self.p2pDeployment:
205             # Plan application deployment
206             self.do_spanning_deployment_plan()
207
208         # Configure elements per XML data
209         super(TestbedController, self).do_preconfigure()
210
211     def do_resource_discovery(self, recover = False):
212         to_provision = self._to_provision = set()
213         
214         reserved = set(self._blacklist)
215         for guid, node in self._elements.iteritems():
216             if isinstance(node, self._node.Node) and node._node_id is not None:
217                 reserved.add(node.hostname)
218         
219         # Initial algo:
220         #   look for perfectly defined nodes
221         #   (ie: those with only one candidate)
222         reserve_lock = threading.RLock()
223         def assignifunique(guid, node):
224             # Try existing nodes first
225             # If we have only one candidate, simply use it
226             candidates = node.find_candidates(
227                 filter_slice_id = self.slice_id)
228             
229             node_id = None
230             candidate_hosts = set(candidates.keys() if candidates else [])
231             reserve_lock.acquire()
232             try:
233                 candidate_hosts -= reserved
234                 if len(candidate_hosts) == 1:
235                     hostname = iter(candidate_hosts).next()
236                     node_id = candidates[hostname]
237                     reserved.add(hostname)
238                 elif not candidate_hosts:
239                     # Try again including unassigned nodes
240                     reserve_lock.release()
241                     try:
242                         candidates = node.find_candidates()
243                     finally:
244                         reserve_lock.acquire()
245                     candidate_hosts = set(candidates.keys() if candidates else [])
246                     candidate_hosts -= reserved
247                     if len(candidate_hosts) > 1:
248                         return
249                     if len(candidate_hosts) == 1:
250                         hostname = iter(candidate_hosts).next()
251                         node_id = candidates[hostname]
252                         to_provision.add(node_id)
253                         reserved.add(hostname)
254                     elif not candidates:
255                         raise RuntimeError, "Cannot assign resources for node %s, no candidates with %s" % (guid,
256                             node.make_filter_description())
257             finally:
258                 reserve_lock.release()
259            
260             if node_id is not None:
261                 node.assign_node_id(node_id)
262         
263         runner = ParallelRun(maxthreads=4) # don't overload the PLC API, just 4 threads to hide latencies and that's it
264         runner.start()
265         for guid, node in self._elements.iteritems():
266             if isinstance(node, self._node.Node) and node._node_id is None:
267                 runner.put(assignifunique, guid, node)
268         runner.sync()
269         
270         # Now do the backtracking search for a suitable solution
271         # First with existing slice nodes
272         reqs = []
273         nodes = []
274         def genreqs(node, filter_slice_id=None):
275             # Try existing nodes first
276             # If we have only one candidate, simply use it
277             candidates = node.find_candidates(
278                 filter_slice_id = filter_slice_id)
279             for r in reserved:
280                 if candidates.has_key(r):
281                     del candidates[r]
282             reqs.append(candidates.values())
283             nodes.append(node)
284         for guid, node in self._elements.iteritems():
285             if isinstance(node, self._node.Node) and node._node_id is None:
286                 runner.put(genreqs, node, self.slice_id)
287         runner.sync()
288        
289         if nodes and reqs:
290             if recover:
291                 raise RuntimeError, "Impossible to recover: unassigned host for Nodes %r" % (nodes,)
292
293             def pickbest(fullset, nreq, node=nodes[0]):
294                 if len(fullset) > nreq:
295                     fullset = zip(node.rate_nodes(fullset),fullset)
296                     fullset.sort(reverse=True)
297                     del fullset[nreq:]
298                     return set(map(operator.itemgetter(1),fullset))
299                 else:
300                     return fullset
301             
302             try:
303                 solution = resourcealloc.alloc(reqs, sample=pickbest)
304             except resourcealloc.ResourceAllocationError:
305                 # Failed, try again with all nodes
306                 reqs = []
307                 for node in nodes:
308                     runner.put(genreqs, node)
309                 runner.sync()
310                 solution = resourcealloc.alloc(reqs, sample=pickbest)
311                 to_provision.update(solution)
312             
313             # Do assign nodes
314             for node, node_id in zip(nodes, solution):
315                 runner.put(node.assign_node_id, node_id)
316             runner.join()
317
318     def do_provisioning(self):
319         if self._to_provision:
320             # Add new nodes to the slice
321             cur_nodes = self.sliceapi.GetSliceNodes(self.slice_id)
322             new_nodes = list(set(cur_nodes) | self._to_provision)
323             self.sliceapi.AddSliceNodes(self.slice_id, nodes=new_nodes)
324
325         # cleanup
326         self._just_provisioned = self._to_provision
327         del self._to_provision
328     
329     def do_wait_nodes(self):
330         for guid, node in self._elements.iteritems():
331             if isinstance(node, self._node.Node):
332                 # Just inject configuration stuff
333                 node.home_path = "nepi-node-%s" % (guid,)
334                 node.ident_path = self.sliceSSHKey
335                 node.slicename = self.slicename
336             
337                 # Show the magic
338                 self._logger.info("PlanetLab Node %s configured at %s", guid, node.hostname)
339         
340         try:
341             runner = ParallelRun(maxthreads=64, maxqueue=1)
342             abort = []
343             def waitforit(guid, node):
344                 try:
345                     node.wait_provisioning(
346                         (20*60 if node._node_id in self._just_provisioned else 60)
347                     )
348                     
349                     self._logger.info("READY Node %s at %s", guid, node.hostname)
350                     
351                     # Prepare dependency installer now
352                     node.prepare_dependencies()
353                 except:
354                     abort.append(None)
355                     raise
356                 
357             for guid, node in self._elements.iteritems():
358                 if abort:
359                     break
360                 if isinstance(node, self._node.Node):
361                     self._logger.info("Waiting for Node %s configured at %s", guid, node.hostname)
362                     runner.put(waitforit, guid, node)
363             runner.join()
364                     
365         except self._node.UnresponsiveNodeError:
366             # Uh... 
367             self._logger.warn("UNRESPONSIVE Nodes")
368             
369             # Mark all dead nodes (which are unresponsive) on the blacklist
370             # and re-raise
371             for guid, node in self._elements.iteritems():
372                 if isinstance(node, self._node.Node):
373                     if not node.is_alive():
374                         self._logger.warn("Blacklisting %s for unresponsiveness", node.hostname)
375                         self._blacklist.add(node.hostname)
376                         node.unassign_node()
377             
378             try:
379                 self._save_blacklist()
380             except:
381                 # not important...
382                 import traceback
383                 traceback.print_exc()
384             
385             raise
386     
387     def do_spanning_deployment_plan(self):
388         # Create application groups by collecting all applications
389         # based on their hash - the hash should contain everything that
390         # defines them and the platform they're built
391         
392         def dephash(app):
393             return (
394                 frozenset((app.depends or "").split(' ')),
395                 frozenset((app.sources or "").split(' ')),
396                 app.build,
397                 app.install,
398                 app.node.architecture,
399                 app.node.operatingSystem,
400                 app.node.pl_distro,
401                 app.__class__,
402             )
403         
404         depgroups = collections.defaultdict(list)
405         
406         for element in self._elements.itervalues():
407             if isinstance(element, self._app.Dependency):
408                 depgroups[dephash(element)].append(element)
409             elif isinstance(element, self._node.Node):
410                 deps = element._yum_dependencies
411                 if deps:
412                     depgroups[dephash(deps)].append(deps)
413         
414         # Set up spanning deployment for those applications that
415         # have been deployed in several nodes.
416         for dh, group in depgroups.iteritems():
417             if len(group) > 1:
418                 # Pick root (deterministically)
419                 root = min(group, key=lambda app:app.node.hostname)
420                 
421                 # Obtain all IPs in numeric format
422                 # (which means faster distance computations)
423                 for dep in group:
424                     dep._ip = socket.gethostbyname(dep.node.hostname)
425                     dep._ip_n = struct.unpack('!L', socket.inet_aton(dep._ip))[0]
426                 
427                 # Compute plan
428                 # NOTE: the plan is an iterator
429                 plan = mst.mst(
430                     group,
431                     lambda a,b : ipaddr2.ipdistn(a._ip_n, b._ip_n),
432                     root = root,
433                     maxbranching = 2)
434                 
435                 # Re-sign private key
436                 try:
437                     tempprk, temppuk, tmppass = self._make_temp_private_key()
438                 except TempKeyError:
439                     continue
440                 
441                 # Set up slaves
442                 plan = list(plan)
443                 for slave, master in plan:
444                     slave.set_master(master)
445                     slave.install_keys(tempprk, temppuk, tmppass)
446                     
447         # We don't need the user's passphrase anymore
448         self.sliceSSHKeyPass = None
449     
450     def _make_temp_private_key(self):
451         # Get the user's key's passphrase
452         if not self.sliceSSHKeyPass:
453             if 'SSH_ASKPASS' in os.environ:
454                 proc = subprocess.Popen(
455                     [ os.environ['SSH_ASKPASS'],
456                       "Please type the passphrase for the %s SSH identity file. "
457                       "The passphrase will be used to re-cipher the identity file with "
458                       "a random 256-bit key for automated chain deployment on the "
459                       "%s PlanetLab slice" % ( 
460                         os.path.basename(self.sliceSSHKey), 
461                         self.slicename
462                     ) ],
463                     stdin = open("/dev/null"),
464                     stdout = subprocess.PIPE,
465                     stderr = subprocess.PIPE)
466                 out,err = proc.communicate()
467                 self.sliceSSHKeyPass = out.strip()
468         
469         if not self.sliceSSHKeyPass:
470             raise TempKeyError
471         
472         # Create temporary key files
473         prk = tempfile.NamedTemporaryFile(
474             dir = self.root_directory,
475             prefix = "pl_deploy_tmpk_",
476             suffix = "")
477
478         puk = tempfile.NamedTemporaryFile(
479             dir = self.root_directory,
480             prefix = "pl_deploy_tmpk_",
481             suffix = ".pub")
482             
483         # Create secure 256-bits temporary passphrase
484         passphrase = os.urandom(32).encode("hex")
485                 
486         # Copy keys
487         oprk = open(self.sliceSSHKey, "rb")
488         opuk = open(self.sliceSSHKey+".pub", "rb")
489         shutil.copymode(oprk.name, prk.name)
490         shutil.copymode(opuk.name, puk.name)
491         shutil.copyfileobj(oprk, prk)
492         shutil.copyfileobj(opuk, puk)
493         prk.flush()
494         puk.flush()
495         oprk.close()
496         opuk.close()
497         
498         # A descriptive comment
499         comment = "%s#NEPI_INTERNAL@%s" % (self.authUser, self.slicename)
500         
501         # Recipher keys
502         proc = subprocess.Popen(
503             ["ssh-keygen", "-p",
504              "-f", prk.name,
505              "-P", self.sliceSSHKeyPass,
506              "-N", passphrase,
507              "-C", comment ],
508             stdout = subprocess.PIPE,
509             stderr = subprocess.PIPE,
510             stdin = subprocess.PIPE
511         )
512         out, err = proc.communicate()
513         
514         if err:
515             raise RuntimeError, "Problem generating keys: \n%s\n%r" % (
516                 out, err)
517         
518         prk.seek(0)
519         puk.seek(0)
520         
521         # Change comment on public key
522         puklines = puk.readlines()
523         puklines[0] = puklines[0].split(' ')
524         puklines[0][-1] = comment+'\n'
525         puklines[0] = ' '.join(puklines[0])
526         puk.seek(0)
527         puk.truncate()
528         puk.writelines(puklines)
529         del puklines
530         puk.flush()
531         
532         return prk, puk, passphrase
533     
534     def set(self, guid, name, value, time = TIME_NOW):
535         super(TestbedController, self).set(guid, name, value, time)
536         # TODO: take on account schedule time for the task
537         element = self._elements[guid]
538         if element:
539             if name == "up":
540                 if value == True:
541                     element.if_up()
542                 else:
543                     element.if_down()
544
545             try:
546                 setattr(element, name, value)
547             except:
548                 # We ignore these errors while recovering.
549                 # Some attributes are immutable, and setting
550                 # them is necessary (to recover the state), but
551                 # some are not (they throw an exception).
552                 if not self.recovering:
553                     raise
554
555             if hasattr(element, 'refresh'):
556                 # invoke attribute refresh hook
557                 element.refresh()
558
559     def get(self, guid, name, time = TIME_NOW):
560         value = super(TestbedController, self).get(guid, name, time)
561         # TODO: take on account schedule time for the task
562         factory_id = self._create[guid]
563         factory = self._factories[factory_id]
564         element = self._elements.get(guid)
565         try:
566             return getattr(element, name)
567         except (KeyError, AttributeError):
568             return value
569
570     def get_address(self, guid, index, attribute='Address'):
571         index = int(index)
572
573         # try the real stuff
574         iface = self._elements.get(guid)
575         if iface and index == 0:
576             if attribute == 'Address':
577                 return iface.address
578             elif attribute == 'NetPrefix':
579                 return iface.netprefix
580             elif attribute == 'Broadcast':
581                 return iface.broadcast
582
583         # if all else fails, query box
584         return super(TestbedController, self).get_address(guid, index, attribute)
585
586     def action(self, time, guid, action):
587         raise NotImplementedError
588
589     def shutdown(self):
590         for trace in self._traces.itervalues():
591             trace.close()
592         
593         def invokeif(action, testbed, guid):
594             element = self._elements[guid]
595             if hasattr(element, action):
596                 getattr(element, action)()
597         
598         self._do_in_factory_order(
599             functools.partial(invokeif, 'cleanup'),
600             metadata.shutdown_order)
601
602         self._do_in_factory_order(
603             functools.partial(invokeif, 'destroy'),
604             metadata.shutdown_order)
605             
606         self._elements.clear()
607         self._traces.clear()
608
609     def trace(self, guid, trace_id, attribute='value'):
610         elem = self._elements[guid]
611
612         if attribute == 'value':
613             path = elem.sync_trace(self.home_directory, trace_id)
614             if path:
615                 fd = open(path, "r")
616                 content = fd.read()
617                 fd.close()
618             else:
619                 content = None
620         elif attribute == 'path':
621             content = elem.remote_trace_path(trace_id)
622         elif attribute == 'name':
623             content = elem.remote_trace_name(trace_id)
624         else:
625             content = None
626         return content
627
628     def follow_trace(self, trace_id, trace):
629         self._traces[trace_id] = trace
630
631     def recover(self):
632         try:
633             # An internal flag, so we know to behave differently in
634             # a few corner cases.
635             self.recovering = True
636             
637             # Create and connect do not perform any real tasks against
638             # the nodes, it only sets up the object hierarchy,
639             # so we can run them normally
640             self.do_create()
641             self.do_connect_init()
642             self.do_connect_compl()
643             
644             # Manually recover nodes, to mark dependencies installed
645             # and clean up mutable attributes
646             self._do_in_factory_order(
647                 lambda self, guid : self._elements[guid].recover(), 
648                 [
649                     metadata.NODE,
650                 ])
651             
652             # Assign nodes - since we're working off exeucte XML, nodes
653             # have specific hostnames assigned and we don't need to do
654             # real assignment, only find out node ids and check liveliness
655             self.do_resource_discovery(recover = True)
656             self.do_wait_nodes()
657             
658             # Pre/post configure, however, tends to set up tunnels
659             # Execute configuration steps only for those object
660             # kinds that do not have side effects
661             
662             # Do the ones without side effects,
663             # including nodes that need to set up home 
664             # folders and all that
665             self._do_in_factory_order(
666                 "preconfigure_function", 
667                 [
668                     metadata.INTERNET,
669                     Parallel(metadata.NODE),
670                     metadata.NODEIFACE,
671                 ])
672             
673             # Tunnels require a home path that is configured
674             # at this step. Since we cannot run the step itself,
675             # we need to inject this homepath ourselves
676             for guid, element in self._elements.iteritems():
677                 if isinstance(element, self._interfaces.TunIface):
678                     element._home_path = "tun-%s" % (guid,)
679             
680             # Manually recover tunnels, applications and
681             # netpipes, negating the side effects
682             self._do_in_factory_order(
683                 lambda self, guid : self._elements[guid].recover(), 
684                 [
685                     Parallel(metadata.TAPIFACE),
686                     Parallel(metadata.TUNIFACE),
687                     metadata.NETPIPE,
688                     Parallel(metadata.NEPIDEPENDENCY),
689                     Parallel(metadata.NS3DEPENDENCY),
690                     Parallel(metadata.DEPENDENCY),
691                     Parallel(metadata.APPLICATION),
692                     Parallel(metadata.CCNXDAEMON),
693                 ])
694
695             # Tunnels are not harmed by configuration after
696             # recovery, and some attributes get set this way
697             # like external_iface
698             self._do_in_factory_order(
699                 "preconfigure_function", 
700                 [
701                     Parallel(metadata.TAPIFACE),
702                     Parallel(metadata.TUNIFACE),
703                 ])
704
705             # Post-do the ones without side effects
706             self._do_in_factory_order(
707                 "configure_function", 
708                 [
709                     metadata.INTERNET,
710                     Parallel(metadata.NODE),
711                     metadata.NODEIFACE,
712                     Parallel(metadata.TAPIFACE),
713                     Parallel(metadata.TUNIFACE),
714                 ])
715             
716             # There are no required prestart steps
717             # to call upon recovery, so we're done
718         finally:
719             self.recovering = True
720     
721     def _make_generic(self, parameters, kind, **kwargs):
722         args = dict({'api': self.plcapi})
723         args.update(kwargs)
724         app = kind(**args)
725         app.testbed = weakref.ref(self)
726
727         # Note: there is 1-to-1 correspondence between attribute names
728         #   If that changes, this has to change as well
729         for attr,val in parameters.iteritems():
730             try:
731                 setattr(app, attr, val)
732             except:
733                 # We ignore these errors while recovering.
734                 # Some attributes are immutable, and setting
735                 # them is necessary (to recover the state), but
736                 # some are not (they throw an exception).
737                 if not self.recovering:
738                     raise
739
740         return app
741
742     def _make_node(self, parameters):
743         args = dict({'sliceapi': self.sliceapi})
744         node = self._make_generic(parameters, self._node.Node, **args)
745         node.enable_proc_cleanup = self.cleanProc
746         node.enable_home_cleanup = self.cleanHome
747         return node
748
749     def _make_node_iface(self, parameters):
750         return self._make_generic(parameters, self._interfaces.NodeIface)
751
752     def _make_tun_iface(self, parameters):
753         return self._make_generic(parameters, self._interfaces.TunIface)
754
755     def _make_tap_iface(self, parameters):
756         return self._make_generic(parameters, self._interfaces.TapIface)
757
758     def _make_netpipe(self, parameters):
759         return self._make_generic(parameters, self._interfaces.NetPipe)
760
761     def _make_internet(self, parameters):
762         return self._make_generic(parameters, self._interfaces.Internet)
763
764     def _make_application(self, parameters, clazz = None):
765         if not clazz:
766             clazz = self._app.Application
767         return self._make_generic(parameters, clazz)
768
769     def _make_dependency(self, parameters):
770         return self._make_generic(parameters, self._app.Dependency)
771
772     def _make_nepi_dependency(self, parameters):
773         return self._make_generic(parameters, self._app.NepiDependency)
774
775     def _make_ns3_dependency(self, parameters):
776         return self._make_generic(parameters, self._app.NS3Dependency)
777
778     def _make_tun_filter(self, parameters):
779         return self._make_generic(parameters, self._interfaces.TunFilter)
780
781     def _make_class_queue_filter(self, parameters):
782         return self._make_generic(parameters, self._interfaces.ClassQueueFilter)
783
784     def _make_logging_class_queue_filter(self, parameters):
785         return self._make_generic(parameters, self._interfaces.LoggingClassQueueFilter)
786
787     def _make_tos_queue_filter(self, parameters):
788         return self._make_generic(parameters, self._interfaces.ToSQueueFilter)
789
790     def _make_multicast_forwarder(self, parameters):
791         return self._make_generic(parameters, self._multicast.MulticastForwarder)
792
793     def _make_multicast_announcer(self, parameters):
794         return self._make_generic(parameters, self._multicast.MulticastAnnouncer)
795
796     def _make_multicast_router(self, parameters):
797         return self._make_generic(parameters, self._multicast.MulticastRouter)
798
799