store hostname in pblacklisted instead of node_id
[nepi.git] / src / nepi / testbeds / planetlab / execute.py
1 # -*- coding: utf-8 -*-
2
3 from constants import TESTBED_ID, TESTBED_VERSION
4 from nepi.core import testbed_impl
5 from nepi.core.metadata import Parallel
6 from nepi.util.constants import TIME_NOW
7 from nepi.util.graphtools import mst
8 from nepi.util import ipaddr2
9 from nepi.util import environ
10 from nepi.util.parallel import ParallelRun
11 import threading
12 import sys
13 import os
14 import os.path
15 import time
16 import resourcealloc
17 import collections
18 import operator
19 import functools
20 import socket
21 import struct
22 import tempfile
23 import subprocess
24 import random
25 import shutil
26 import logging
27 import metadata
28 import weakref
29 import util as plutil
30
31 class TempKeyError(Exception):
32     pass
33
34 class TestbedController(testbed_impl.TestbedController):
35     def __init__(self):
36         super(TestbedController, self).__init__(TESTBED_ID, TESTBED_VERSION)
37         self._home_directory = None
38         self.slicename = None
39         self._traces = dict()
40
41         import node, interfaces, application, multicast
42         self._node = node
43         self._interfaces = interfaces
44         self._app = application
45         self._multicast = multicast
46         
47         self._blacklist = set()
48         self._just_provisioned = set()
49         
50         self._load_blacklist()
51
52         self._slice_id = None
53         self._plcapi = None
54         self._sliceapi = None
55         self._vsys_vnet = None
56
57         self._logger = logging.getLogger('nepi.testbeds.planetlab')
58         
59         self.recovering = False
60
61     @property
62     def home_directory(self):
63         return self._home_directory
64
65     @property
66     def plcapi(self):
67         if not self._plcapi:
68             import plcapi
69             self._plcapi = plcapi.plcapi(
70                     self.authUser,
71                     self.authString,
72                     self.plcHost,
73                     self.plcUrl
74                     )
75         return self._plcapi
76
77     @property
78     def sliceapi(self):
79         if not self._sliceapi:
80             if not self.sfa:
81                 self._sliceapi = self.plcapi
82             else:
83                 import sfiapi
84                 self._sliceapi = sfiapi.sfiapi()
85         return self._sliceapi
86
87     @property
88     def slice_id(self):
89         if not self._slice_id:
90             self._slice_id = self.plcapi.GetSliceId(self.slicename)
91         return self._slice_id
92     
93     @property
94     def vsys_vnet(self):
95         if not self._vsys_vnet:
96             self._vsys_vnet = self.sliceapi.GetSliceVnetSysTag(self.slicename)
97         return self._vsys_vnet
98
99     def _load_blacklist(self):
100         blpath = environ.homepath('plblacklist')
101         
102         try:
103             bl = open(blpath, "r")
104         except:
105             self._blacklist = set()
106             return
107             
108         try:
109             self._blacklist = set(
110                 map(str.strip, bl.readlines())
111             )
112         finally:
113             bl.close()
114     
115     def _save_blacklist(self):
116         blpath = environ.homepath('plblacklist')
117         bl = open(blpath, "w")
118         try:
119             bl.writelines(
120                 map('%s\n'.__mod__, self._blacklist))
121         finally:
122             bl.close()
123     
124     def do_setup(self):
125         self._home_directory = self._attributes.\
126             get_attribute_value("homeDirectory")
127         self.slicename = self._attributes.\
128             get_attribute_value("slice")
129         self.authUser = self._attributes.\
130             get_attribute_value("authUser")
131         self.authString = self._attributes.\
132             get_attribute_value("authPass")
133         self.sliceSSHKey = self._attributes.\
134             get_attribute_value("sliceSSHKey")
135         self.sliceSSHKeyPass = None
136         self.plcHost = self._attributes.\
137             get_attribute_value("plcHost")
138         self.plcUrl = self._attributes.\
139             get_attribute_value("plcUrl")
140         self.logLevel = self._attributes.\
141             get_attribute_value("plLogLevel")
142         self.tapPortBase = self._attributes.\
143             get_attribute_value("tapPortBase")
144         self.p2pDeployment = self._attributes.\
145             get_attribute_value("p2pDeployment")
146         self.dedicatedSlice = self._attributes.\
147             get_attribute_value("dedicatedSlice")
148         self.sfa = self._attributes.\
149             get_attribute_value("sfa")
150         if self.sfa:
151             self._slice_id = self._attributes.\
152             get_attribute_value("sliceHrn")
153
154         if not self.slicename:
155             raise RuntimeError, "Slice not set"
156         if not self.authUser:
157             raise RuntimeError, "PlanetLab account username not set"
158         if not self.authString:
159             raise RuntimeError, "PlanetLab account passphrase not set"
160         if not self.sliceSSHKey:
161             raise RuntimeError, "PlanetLab account key not specified"
162         if not os.path.exists(self.sliceSSHKey):
163             raise RuntimeError, "PlanetLab account key cannot be opened: %s" % (self.sliceSSHKey,)
164         
165         self._logger.setLevel(getattr(logging,self.logLevel))
166         
167         super(TestbedController, self).do_setup()
168
169     def do_post_asynclaunch(self, guid):
170         # Dependencies were launched asynchronously,
171         # so wait for them
172         dep = self._elements[guid]
173         if isinstance(dep, self._app.Dependency):
174             dep.async_setup_wait()
175     
176     # Two-phase configuration for asynchronous launch
177     do_poststep_preconfigure = staticmethod(do_post_asynclaunch)
178     do_poststep_configure = staticmethod(do_post_asynclaunch)
179
180     def do_preconfigure(self):
181         while True:
182             # Perform resource discovery if we don't have
183             # specific resources assigned yet
184             self.do_resource_discovery()
185
186             # Create PlanetLab slivers
187             self.do_provisioning()
188             
189             try:
190                 # Wait for provisioning
191                 self.do_wait_nodes()
192                 
193                 # Okkey...
194                 break
195             except self._node.UnresponsiveNodeError:
196                 # Oh... retry...
197                 pass
198         
199         if self.p2pDeployment:
200             # Plan application deployment
201             self.do_spanning_deployment_plan()
202
203         # Configure elements per XML data
204         super(TestbedController, self).do_preconfigure()
205
206     def do_resource_discovery(self, recover = False):
207         to_provision = self._to_provision = set()
208         
209         reserved = set(self._blacklist)
210         for guid, node in self._elements.iteritems():
211             if isinstance(node, self._node.Node) and node._node_id is not None:
212                 reserved.add(node.hostname)
213         
214         # Initial algo:
215         #   look for perfectly defined nodes
216         #   (ie: those with only one candidate)
217         reserve_lock = threading.RLock()
218         def assignifunique(guid, node):
219             # Try existing nodes first
220             # If we have only one candidate, simply use it
221             candidates = node.find_candidates(
222                 filter_slice_id = self.slice_id)
223             
224             node_id = None
225             candidate_hosts = set(candidates.keys() if candidates else [])
226             reserve_lock.acquire()
227             try:
228                 candidate_hosts -= reserved
229                 if len(candidate_hosts) == 1:
230                     hostname = iter(candidate_hosts).next()
231                     node_id = candidates[hostname]
232                     reserved.add(hostname)
233                 elif not candidate_hosts:
234                     # Try again including unassigned nodes
235                     reserve_lock.release()
236                     try:
237                         candidates = node.find_candidates()
238                     finally:
239                         reserve_lock.acquire()
240                     candidate_hosts = set(candidates.keys() if candidates else [])
241                     candidate_hosts -= reserved
242                     if len(candidate_hosts) > 1:
243                         return
244                     if len(candidate_hosts) == 1:
245                         hostname = iter(candidate_hosts).next()
246                         node_id = candidates[hostname]
247                         to_provision.add(node_id)
248                         reserved.add(hostname)
249                     elif not candidates:
250                         raise RuntimeError, "Cannot assign resources for node %s, no candidates sith %s" % (guid,
251                             node.make_filter_description())
252             finally:
253                 reserve_lock.release()
254            
255             if node_id is not None:
256                 node.assign_node_id(node_id)
257         
258         runner = ParallelRun(maxthreads=4) # don't overload the PLC API, just 4 threads to hide latencies and that's it
259         runner.start()
260         for guid, node in self._elements.iteritems():
261             if isinstance(node, self._node.Node) and node._node_id is None:
262                 runner.put(assignifunique, guid, node)
263         runner.sync()
264         
265         # Now do the backtracking search for a suitable solution
266         # First with existing slice nodes
267         reqs = []
268         nodes = []
269         def genreqs(node, filter_slice_id=None):
270             # Try existing nodes first
271             # If we have only one candidate, simply use it
272             candidates = node.find_candidates(
273                 filter_slice_id = filter_slice_id)
274             for r in reserved:
275                 if candidates.has_key(r):
276                     del candidates[r]
277             reqs.append(candidates.values())
278             nodes.append(node)
279         for guid, node in self._elements.iteritems():
280             if isinstance(node, self._node.Node) and node._node_id is None:
281                 runner.put(genreqs, node, self.slice_id)
282         runner.sync()
283        
284         if nodes and reqs:
285             if recover:
286                 raise RuntimeError, "Impossible to recover: unassigned host for Nodes %r" % (nodes,)
287
288             def pickbest(fullset, nreq, node=nodes[0]):
289                 if len(fullset) > nreq:
290                     fullset = zip(node.rate_nodes(fullset),fullset)
291                     fullset.sort(reverse=True)
292                     del fullset[nreq:]
293                     return set(map(operator.itemgetter(1),fullset))
294                 else:
295                     return fullset
296             
297             try:
298                 solution = resourcealloc.alloc(reqs, sample=pickbest)
299             except resourcealloc.ResourceAllocationError:
300                 # Failed, try again with all nodes
301                 reqs = []
302                 for node in nodes:
303                     runner.put(genreqs, node)
304                 runner.sync()
305                 solution = resourcealloc.alloc(reqs, sample=pickbest)
306                 to_provision.update(solution)
307             
308             # Do assign nodes
309             for node, node_id in zip(nodes, solution):
310                 runner.put(node.assign_node_id, node_id)
311             runner.join()
312
313     def do_provisioning(self):
314         if self._to_provision:
315             # Add new nodes to the slice
316             cur_nodes = self.sliceapi.GetSliceNodes(self.slice_id)
317             new_nodes = list(set(cur_nodes) | self._to_provision)
318             self.sliceapi.AddSliceNodes(self.slice_id, nodes=new_nodes)
319
320         # cleanup
321         self._just_provisioned = self._to_provision
322         del self._to_provision
323     
324     def do_wait_nodes(self):
325         for guid, node in self._elements.iteritems():
326             if isinstance(node, self._node.Node):
327                 # Just inject configuration stuff
328                 node.home_path = "nepi-node-%s" % (guid,)
329                 node.ident_path = self.sliceSSHKey
330                 node.slicename = self.slicename
331             
332                 # Show the magic
333                 self._logger.info("PlanetLab Node %s configured at %s", guid, node.hostname)
334         
335         try:
336             runner = ParallelRun(maxthreads=64, maxqueue=1)
337             abort = []
338             def waitforit(guid, node):
339                 try:
340                     node.wait_provisioning(
341                         (20*60 if node._node_id in self._just_provisioned else 60)
342                     )
343                     
344                     self._logger.info("READY Node %s at %s", guid, node.hostname)
345                     
346                     # Prepare dependency installer now
347                     node.prepare_dependencies()
348                 except:
349                     abort.append(None)
350                     raise
351                 
352             for guid, node in self._elements.iteritems():
353                 if abort:
354                     break
355                 if isinstance(node, self._node.Node):
356                     self._logger.info("Waiting for Node %s configured at %s", guid, node.hostname)
357                     runner.put(waitforit, guid, node)
358             runner.join()
359                     
360         except self._node.UnresponsiveNodeError:
361             # Uh... 
362             self._logger.warn("UNRESPONSIVE Nodes")
363             
364             # Mark all dead nodes (which are unresponsive) on the blacklist
365             # and re-raise
366             for guid, node in self._elements.iteritems():
367                 if isinstance(node, self._node.Node):
368                     if not node.is_alive():
369                         self._logger.warn("Blacklisting %s for unresponsiveness", node.hostname)
370                         self._blacklist.add(node.hostname)
371                         node.unassign_node()
372             
373             try:
374                 self._save_blacklist()
375             except:
376                 # not important...
377                 import traceback
378                 traceback.print_exc()
379             
380             raise
381     
382     def do_spanning_deployment_plan(self):
383         # Create application groups by collecting all applications
384         # based on their hash - the hash should contain everything that
385         # defines them and the platform they're built
386         
387         def dephash(app):
388             return (
389                 frozenset((app.depends or "").split(' ')),
390                 frozenset((app.sources or "").split(' ')),
391                 app.build,
392                 app.install,
393                 app.node.architecture,
394                 app.node.operatingSystem,
395                 app.node.pl_distro,
396                 app.__class__,
397             )
398         
399         depgroups = collections.defaultdict(list)
400         
401         for element in self._elements.itervalues():
402             if isinstance(element, self._app.Dependency):
403                 depgroups[dephash(element)].append(element)
404             elif isinstance(element, self._node.Node):
405                 deps = element._yum_dependencies
406                 if deps:
407                     depgroups[dephash(deps)].append(deps)
408         
409         # Set up spanning deployment for those applications that
410         # have been deployed in several nodes.
411         for dh, group in depgroups.iteritems():
412             if len(group) > 1:
413                 # Pick root (deterministically)
414                 root = min(group, key=lambda app:app.node.hostname)
415                 
416                 # Obtain all IPs in numeric format
417                 # (which means faster distance computations)
418                 for dep in group:
419                     dep._ip = socket.gethostbyname(dep.node.hostname)
420                     dep._ip_n = struct.unpack('!L', socket.inet_aton(dep._ip))[0]
421                 
422                 # Compute plan
423                 # NOTE: the plan is an iterator
424                 plan = mst.mst(
425                     group,
426                     lambda a,b : ipaddr2.ipdistn(a._ip_n, b._ip_n),
427                     root = root,
428                     maxbranching = 2)
429                 
430                 # Re-sign private key
431                 try:
432                     tempprk, temppuk, tmppass = self._make_temp_private_key()
433                 except TempKeyError:
434                     continue
435                 
436                 # Set up slaves
437                 plan = list(plan)
438                 for slave, master in plan:
439                     slave.set_master(master)
440                     slave.install_keys(tempprk, temppuk, tmppass)
441                     
442         # We don't need the user's passphrase anymore
443         self.sliceSSHKeyPass = None
444     
445     def _make_temp_private_key(self):
446         # Get the user's key's passphrase
447         if not self.sliceSSHKeyPass:
448             if 'SSH_ASKPASS' in os.environ:
449                 proc = subprocess.Popen(
450                     [ os.environ['SSH_ASKPASS'],
451                       "Please type the passphrase for the %s SSH identity file. "
452                       "The passphrase will be used to re-cipher the identity file with "
453                       "a random 256-bit key for automated chain deployment on the "
454                       "%s PlanetLab slice" % ( 
455                         os.path.basename(self.sliceSSHKey), 
456                         self.slicename
457                     ) ],
458                     stdin = open("/dev/null"),
459                     stdout = subprocess.PIPE,
460                     stderr = subprocess.PIPE)
461                 out,err = proc.communicate()
462                 self.sliceSSHKeyPass = out.strip()
463         
464         if not self.sliceSSHKeyPass:
465             raise TempKeyError
466         
467         # Create temporary key files
468         prk = tempfile.NamedTemporaryFile(
469             dir = self.root_directory,
470             prefix = "pl_deploy_tmpk_",
471             suffix = "")
472
473         puk = tempfile.NamedTemporaryFile(
474             dir = self.root_directory,
475             prefix = "pl_deploy_tmpk_",
476             suffix = ".pub")
477             
478         # Create secure 256-bits temporary passphrase
479         passphrase = os.urandom(32).encode("hex")
480                 
481         # Copy keys
482         oprk = open(self.sliceSSHKey, "rb")
483         opuk = open(self.sliceSSHKey+".pub", "rb")
484         shutil.copymode(oprk.name, prk.name)
485         shutil.copymode(opuk.name, puk.name)
486         shutil.copyfileobj(oprk, prk)
487         shutil.copyfileobj(opuk, puk)
488         prk.flush()
489         puk.flush()
490         oprk.close()
491         opuk.close()
492         
493         # A descriptive comment
494         comment = "%s#NEPI_INTERNAL@%s" % (self.authUser, self.slicename)
495         
496         # Recipher keys
497         proc = subprocess.Popen(
498             ["ssh-keygen", "-p",
499              "-f", prk.name,
500              "-P", self.sliceSSHKeyPass,
501              "-N", passphrase,
502              "-C", comment ],
503             stdout = subprocess.PIPE,
504             stderr = subprocess.PIPE,
505             stdin = subprocess.PIPE
506         )
507         out, err = proc.communicate()
508         
509         if err:
510             raise RuntimeError, "Problem generating keys: \n%s\n%r" % (
511                 out, err)
512         
513         prk.seek(0)
514         puk.seek(0)
515         
516         # Change comment on public key
517         puklines = puk.readlines()
518         puklines[0] = puklines[0].split(' ')
519         puklines[0][-1] = comment+'\n'
520         puklines[0] = ' '.join(puklines[0])
521         puk.seek(0)
522         puk.truncate()
523         puk.writelines(puklines)
524         del puklines
525         puk.flush()
526         
527         return prk, puk, passphrase
528     
529     def set(self, guid, name, value, time = TIME_NOW):
530         super(TestbedController, self).set(guid, name, value, time)
531         # TODO: take on account schedule time for the task
532         element = self._elements[guid]
533         if element:
534             try:
535                 setattr(element, name, value)
536             except:
537                 # We ignore these errors while recovering.
538                 # Some attributes are immutable, and setting
539                 # them is necessary (to recover the state), but
540                 # some are not (they throw an exception).
541                 if not self.recovering:
542                     raise
543
544             if hasattr(element, 'refresh'):
545                 # invoke attribute refresh hook
546                 element.refresh()
547
548     def get(self, guid, name, time = TIME_NOW):
549         value = super(TestbedController, self).get(guid, name, time)
550         # TODO: take on account schedule time for the task
551         factory_id = self._create[guid]
552         factory = self._factories[factory_id]
553         element = self._elements.get(guid)
554         try:
555             return getattr(element, name)
556         except (KeyError, AttributeError):
557             return value
558
559     def get_address(self, guid, index, attribute='Address'):
560         index = int(index)
561
562         # try the real stuff
563         iface = self._elements.get(guid)
564         if iface and index == 0:
565             if attribute == 'Address':
566                 return iface.address
567             elif attribute == 'NetPrefix':
568                 return iface.netprefix
569             elif attribute == 'Broadcast':
570                 return iface.broadcast
571
572         # if all else fails, query box
573         return super(TestbedController, self).get_address(guid, index, attribute)
574
575     def action(self, time, guid, action):
576         raise NotImplementedError
577
578     def shutdown(self):
579         for trace in self._traces.itervalues():
580             trace.close()
581         
582         def invokeif(action, testbed, guid):
583             element = self._elements[guid]
584             if hasattr(element, action):
585                 getattr(element, action)()
586         
587         self._do_in_factory_order(
588             functools.partial(invokeif, 'cleanup'),
589             metadata.shutdown_order)
590
591         self._do_in_factory_order(
592             functools.partial(invokeif, 'destroy'),
593             metadata.shutdown_order)
594             
595         self._elements.clear()
596         self._traces.clear()
597
598     def trace(self, guid, trace_id, attribute='value'):
599         elem = self._elements[guid]
600
601         if attribute == 'value':
602             path = elem.sync_trace(self.home_directory, trace_id)
603             if path:
604                 fd = open(path, "r")
605                 content = fd.read()
606                 fd.close()
607             else:
608                 content = None
609         elif attribute == 'path':
610             content = elem.remote_trace_path(trace_id)
611         elif attribute == 'name':
612             content = elem.remote_trace_name(trace_id)
613         else:
614             content = None
615         return content
616
617     def follow_trace(self, trace_id, trace):
618         self._traces[trace_id] = trace
619
620     def recover(self):
621         try:
622             # An internal flag, so we know to behave differently in
623             # a few corner cases.
624             self.recovering = True
625             
626             # Create and connect do not perform any real tasks against
627             # the nodes, it only sets up the object hierarchy,
628             # so we can run them normally
629             self.do_create()
630             self.do_connect_init()
631             self.do_connect_compl()
632             
633             # Manually recover nodes, to mark dependencies installed
634             # and clean up mutable attributes
635             self._do_in_factory_order(
636                 lambda self, guid : self._elements[guid].recover(), 
637                 [
638                     metadata.NODE,
639                 ])
640             
641             # Assign nodes - since we're working off exeucte XML, nodes
642             # have specific hostnames assigned and we don't need to do
643             # real assignment, only find out node ids and check liveliness
644             self.do_resource_discovery(recover = True)
645             self.do_wait_nodes()
646             
647             # Pre/post configure, however, tends to set up tunnels
648             # Execute configuration steps only for those object
649             # kinds that do not have side effects
650             
651             # Do the ones without side effects,
652             # including nodes that need to set up home 
653             # folders and all that
654             self._do_in_factory_order(
655                 "preconfigure_function", 
656                 [
657                     metadata.INTERNET,
658                     Parallel(metadata.NODE),
659                     metadata.NODEIFACE,
660                 ])
661             
662             # Tunnels require a home path that is configured
663             # at this step. Since we cannot run the step itself,
664             # we need to inject this homepath ourselves
665             for guid, element in self._elements.iteritems():
666                 if isinstance(element, self._interfaces.TunIface):
667                     element._home_path = "tun-%s" % (guid,)
668             
669             # Manually recover tunnels, applications and
670             # netpipes, negating the side effects
671             self._do_in_factory_order(
672                 lambda self, guid : self._elements[guid].recover(), 
673                 [
674                     Parallel(metadata.TAPIFACE),
675                     Parallel(metadata.TUNIFACE),
676                     metadata.NETPIPE,
677                     Parallel(metadata.NEPIDEPENDENCY),
678                     Parallel(metadata.NS3DEPENDENCY),
679                     Parallel(metadata.DEPENDENCY),
680                     Parallel(metadata.APPLICATION),
681                 ])
682
683             # Tunnels are not harmed by configuration after
684             # recovery, and some attributes get set this way
685             # like external_iface
686             self._do_in_factory_order(
687                 "preconfigure_function", 
688                 [
689                     Parallel(metadata.TAPIFACE),
690                     Parallel(metadata.TUNIFACE),
691                 ])
692
693             # Post-do the ones without side effects
694             self._do_in_factory_order(
695                 "configure_function", 
696                 [
697                     metadata.INTERNET,
698                     Parallel(metadata.NODE),
699                     metadata.NODEIFACE,
700                     Parallel(metadata.TAPIFACE),
701                     Parallel(metadata.TUNIFACE),
702                 ])
703             
704             # There are no required prestart steps
705             # to call upon recovery, so we're done
706         finally:
707             self.recovering = True
708     
709     def _make_generic(self, parameters, kind, **kwargs):
710         args = dict({'api': self.plcapi})
711         args.update(kwargs)
712         app = kind(**args)
713         app.testbed = weakref.ref(self)
714
715         # Note: there is 1-to-1 correspondence between attribute names
716         #   If that changes, this has to change as well
717         for attr,val in parameters.iteritems():
718             try:
719                 setattr(app, attr, val)
720             except:
721                 # We ignore these errors while recovering.
722                 # Some attributes are immutable, and setting
723                 # them is necessary (to recover the state), but
724                 # some are not (they throw an exception).
725                 if not self.recovering:
726                     raise
727
728         return app
729
730     def _make_node(self, parameters):
731         args = dict({'sliceapi': self.sliceapi})
732         node = self._make_generic(parameters, self._node.Node, **args)
733         node.enable_cleanup = self.dedicatedSlice
734         return node
735
736     def _make_node_iface(self, parameters):
737         return self._make_generic(parameters, self._interfaces.NodeIface)
738
739     def _make_tun_iface(self, parameters):
740         return self._make_generic(parameters, self._interfaces.TunIface)
741
742     def _make_tap_iface(self, parameters):
743         return self._make_generic(parameters, self._interfaces.TapIface)
744
745     def _make_netpipe(self, parameters):
746         return self._make_generic(parameters, self._interfaces.NetPipe)
747
748     def _make_internet(self, parameters):
749         return self._make_generic(parameters, self._interfaces.Internet)
750
751     def _make_application(self, parameters):
752         return self._make_generic(parameters, self._app.Application)
753
754     def _make_dependency(self, parameters):
755         return self._make_generic(parameters, self._app.Dependency)
756
757     def _make_nepi_dependency(self, parameters):
758         return self._make_generic(parameters, self._app.NepiDependency)
759
760     def _make_ns3_dependency(self, parameters):
761         return self._make_generic(parameters, self._app.NS3Dependency)
762
763     def _make_tun_filter(self, parameters):
764         return self._make_generic(parameters, self._interfaces.TunFilter)
765
766     def _make_class_queue_filter(self, parameters):
767         return self._make_generic(parameters, self._interfaces.ClassQueueFilter)
768
769     def _make_tos_queue_filter(self, parameters):
770         return self._make_generic(parameters, self._interfaces.ToSQueueFilter)
771
772     def _make_multicast_forwarder(self, parameters):
773         return self._make_generic(parameters, self._multicast.MulticastForwarder)
774
775     def _make_multicast_announcer(self, parameters):
776         return self._make_generic(parameters, self._multicast.MulticastAnnouncer)
777
778     def _make_multicast_router(self, parameters):
779         return self._make_generic(parameters, self._multicast.MulticastRouter)
780
781