1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls','speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
166 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
167 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
168 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
169 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
170 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
171 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
172 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
173 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
174 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
175 # but as the stress test might take a while, we sometimes missed the debug mode..
176 'probe_kvm_iptables',
177 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
178 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
179 'ssh_slice_sfa@1', SEPSFA,
180 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
181 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
182 'cross_check_tcp@1', 'check_system_slice', SEP,
183 # for inspecting the slice while it runs the first time
185 # check slices are turned off properly
186 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
187 # check they are properly re-created with the same name
188 'fill_slices', 'ssh_slice_again', SEP,
189 'gather_logs_force', SEP,
192 'export', 'show_boxes', 'super_speed_up_slices', SEP,
193 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
194 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
195 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
196 'delete_leases', 'list_leases', SEP,
198 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
199 'nodefcdistro_show','nodefcdistro_f14','nodefcdistro_f18', 'nodefcdistro_f20', 'nodefcdistro_f21', SEP,
200 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
201 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
202 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
203 'sfa_get_expires', SEPSFA,
204 'plc_db_dump' , 'plc_db_restore', SEP,
205 'check_netflow','check_drl', SEP,
206 'debug_nodemanager', 'slice_fs_present', SEP,
207 'standby_1_through_20','yes','no',SEP,
208 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
210 default_bonding_steps = [
211 'bonding_init_partial',
213 'bonding_install_rpms', SEP,
217 def printable_steps(list):
218 single_line = " ".join(list) + " "
219 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
221 def valid_step(step):
222 return step != SEP and step != SEPSFA
224 # turn off the sfa-related steps when build has skipped SFA
225 # this was originally for centos5 but is still valid
226 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
228 def _has_sfa_cached(rpms_url):
229 if os.path.isfile(has_sfa_cache_filename):
230 with open(has_sfa_cache_filename) as cache:
231 cached = cache.read() == "yes"
232 utils.header("build provides SFA (cached):{}".format(cached))
234 # warning, we're now building 'sface' so let's be a bit more picky
235 # full builds are expected to return with 0 here
236 utils.header("Checking if build provides SFA package...")
237 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
238 encoded = 'yes' if retcod else 'no'
239 with open(has_sfa_cache_filename,'w') as cache:
244 def check_whether_build_has_sfa(rpms_url):
245 has_sfa = TestPlc._has_sfa_cached(rpms_url)
247 utils.header("build does provide SFA")
249 # move all steps containing 'sfa' from default_steps to other_steps
250 utils.header("SFA package not found - removing steps with sfa or sfi")
251 sfa_steps = [ step for step in TestPlc.default_steps
252 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
253 TestPlc.other_steps += sfa_steps
254 for step in sfa_steps:
255 TestPlc.default_steps.remove(step)
257 def __init__(self, plc_spec, options):
258 self.plc_spec = plc_spec
259 self.options = options
260 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
261 self.vserverip = plc_spec['vserverip']
262 self.vservername = plc_spec['vservername']
263 self.vplchostname = self.vservername.split('-')[-1]
264 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
265 self.apiserver = TestApiserver(self.url, options.dry_run)
266 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
267 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
269 def has_addresses_api(self):
270 return self.apiserver.has_method('AddIpAddress')
273 name = self.plc_spec['name']
274 return "{}.{}".format(name,self.vservername)
277 return self.plc_spec['host_box']
280 return self.test_ssh.is_local()
282 # define the API methods on this object through xmlrpc
283 # would help, but not strictly necessary
287 def actual_command_in_guest(self,command, backslash=False):
288 raw1 = self.host_to_guest(command)
289 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
292 def start_guest(self):
293 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
294 dry_run=self.options.dry_run))
296 def stop_guest(self):
297 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
298 dry_run=self.options.dry_run))
300 def run_in_guest(self, command, backslash=False):
301 raw = self.actual_command_in_guest(command, backslash)
302 return utils.system(raw)
304 def run_in_host(self,command):
305 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
307 # backslashing turned out so awful at some point that I've turned off auto-backslashing
308 # see e.g. plc_start esp. the version for f14
309 #command gets run in the plc's vm
310 def host_to_guest(self, command):
311 ssh_leg = TestSsh(self.vplchostname)
312 return ssh_leg.actual_command(command, keep_stdin=True)
314 # this /vservers thing is legacy...
315 def vm_root_in_host(self):
316 return "/vservers/{}/".format(self.vservername)
318 def vm_timestamp_path(self):
319 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
321 #start/stop the vserver
322 def start_guest_in_host(self):
323 return "virsh -c lxc:/// start {}".format(self.vservername)
325 def stop_guest_in_host(self):
326 return "virsh -c lxc:/// destroy {}".format(self.vservername)
329 def run_in_guest_piped(self,local,remote):
330 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
333 def yum_check_installed(self, rpms):
334 if isinstance(rpms, list):
336 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
338 # does a yum install in the vs, ignore yum retcod, check with rpm
339 def yum_install(self, rpms):
340 if isinstance(rpms, list):
342 self.run_in_guest("yum -y install {}".format(rpms))
343 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
344 self.run_in_guest("yum-complete-transaction -y")
345 return self.yum_check_installed(rpms)
348 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
349 'AuthMethod' : 'password',
350 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
351 'Role' : self.plc_spec['role'],
354 def locate_site(self,sitename):
355 for site in self.plc_spec['sites']:
356 if site['site_fields']['name'] == sitename:
358 if site['site_fields']['login_base'] == sitename:
360 raise Exception("Cannot locate site {}".format(sitename))
362 def locate_node(self, nodename):
363 for site in self.plc_spec['sites']:
364 for node in site['nodes']:
365 if node['name'] == nodename:
367 raise Exception("Cannot locate node {}".format(nodename))
369 def locate_hostname(self, hostname):
370 for site in self.plc_spec['sites']:
371 for node in site['nodes']:
372 if node['node_fields']['hostname'] == hostname:
374 raise Exception("Cannot locate hostname {}".format(hostname))
376 def locate_key(self, key_name):
377 for key in self.plc_spec['keys']:
378 if key['key_name'] == key_name:
380 raise Exception("Cannot locate key {}".format(key_name))
382 def locate_private_key_from_key_names(self, key_names):
383 # locate the first avail. key
385 for key_name in key_names:
386 key_spec = self.locate_key(key_name)
387 test_key = TestKey(self,key_spec)
388 publickey = test_key.publicpath()
389 privatekey = test_key.privatepath()
390 if os.path.isfile(publickey) and os.path.isfile(privatekey):
397 def locate_slice(self, slicename):
398 for slice in self.plc_spec['slices']:
399 if slice['slice_fields']['name'] == slicename:
401 raise Exception("Cannot locate slice {}".format(slicename))
403 def all_sliver_objs(self):
405 for slice_spec in self.plc_spec['slices']:
406 slicename = slice_spec['slice_fields']['name']
407 for nodename in slice_spec['nodenames']:
408 result.append(self.locate_sliver_obj(nodename, slicename))
411 def locate_sliver_obj(self, nodename, slicename):
412 site,node = self.locate_node(nodename)
413 slice = self.locate_slice(slicename)
415 test_site = TestSite(self, site)
416 test_node = TestNode(self, test_site, node)
417 # xxx the slice site is assumed to be the node site - mhh - probably harmless
418 test_slice = TestSlice(self, test_site, slice)
419 return TestSliver(self, test_node, test_slice)
421 def locate_first_node(self):
422 nodename = self.plc_spec['slices'][0]['nodenames'][0]
423 site,node = self.locate_node(nodename)
424 test_site = TestSite(self, site)
425 test_node = TestNode(self, test_site, node)
428 def locate_first_sliver(self):
429 slice_spec = self.plc_spec['slices'][0]
430 slicename = slice_spec['slice_fields']['name']
431 nodename = slice_spec['nodenames'][0]
432 return self.locate_sliver_obj(nodename,slicename)
434 # all different hostboxes used in this plc
435 def get_BoxNodes(self):
436 # maps on sites and nodes, return [ (host_box,test_node) ]
438 for site_spec in self.plc_spec['sites']:
439 test_site = TestSite(self,site_spec)
440 for node_spec in site_spec['nodes']:
441 test_node = TestNode(self, test_site, node_spec)
442 if not test_node.is_real():
443 tuples.append( (test_node.host_box(),test_node) )
444 # transform into a dict { 'host_box' -> [ test_node .. ] }
446 for (box,node) in tuples:
447 if box not in result:
450 result[box].append(node)
453 # a step for checking this stuff
454 def show_boxes(self):
455 'print summary of nodes location'
456 for box,nodes in self.get_BoxNodes().items():
457 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
460 # make this a valid step
461 def qemu_kill_all(self):
462 'kill all qemu instances on the qemu boxes involved by this setup'
463 # this is the brute force version, kill all qemus on that host box
464 for (box,nodes) in self.get_BoxNodes().items():
465 # pass the first nodename, as we don't push template-qemu on testboxes
466 nodedir = nodes[0].nodedir()
467 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
470 # make this a valid step
471 def qemu_list_all(self):
472 'list all qemu instances on the qemu boxes involved by this setup'
473 for box,nodes in self.get_BoxNodes().items():
474 # this is the brute force version, kill all qemus on that host box
475 TestBoxQemu(box, self.options.buildname).qemu_list_all()
478 # kill only the qemus related to this test
479 def qemu_list_mine(self):
480 'list qemu instances for our nodes'
481 for (box,nodes) in self.get_BoxNodes().items():
482 # the fine-grain version
487 # kill only the qemus related to this test
488 def qemu_clean_mine(self):
489 'cleanup (rm -rf) qemu instances for our nodes'
490 for box,nodes in self.get_BoxNodes().items():
491 # the fine-grain version
496 # kill only the right qemus
497 def qemu_kill_mine(self):
498 'kill the qemu instances for our nodes'
499 for box,nodes in self.get_BoxNodes().items():
500 # the fine-grain version
505 #################### display config
507 "show test configuration after localization"
512 # uggly hack to make sure 'run export' only reports about the 1st plc
513 # to avoid confusion - also we use 'inri_slice1' in various aliases..
516 "print cut'n paste-able stuff to export env variables to your shell"
517 # guess local domain from hostname
518 if TestPlc.exported_id > 1:
519 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
521 TestPlc.exported_id += 1
522 domain = socket.gethostname().split('.',1)[1]
523 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
524 print("export BUILD={}".format(self.options.buildname))
525 print("export PLCHOSTLXC={}".format(fqdn))
526 print("export GUESTNAME={}".format(self.vservername))
527 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
528 # find hostname of first node
529 hostname, qemubox = self.all_node_infos()[0]
530 print("export KVMHOST={}.{}".format(qemubox, domain))
531 print("export NODE={}".format(hostname))
535 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
536 def show_pass(self, passno):
537 for (key,val) in self.plc_spec.items():
538 if not self.options.verbose and key not in TestPlc.always_display_keys:
543 self.display_site_spec(site)
544 for node in site['nodes']:
545 self.display_node_spec(node)
546 elif key == 'initscripts':
547 for initscript in val:
548 self.display_initscript_spec(initscript)
549 elif key == 'slices':
551 self.display_slice_spec(slice)
554 self.display_key_spec(key)
556 if key not in ['sites', 'initscripts', 'slices', 'keys']:
557 print('+ ', key, ':', val)
559 def display_site_spec(self, site):
560 print('+ ======== site', site['site_fields']['name'])
561 for k,v in site.items():
562 if not self.options.verbose and k not in TestPlc.always_display_keys:
566 print('+ ','nodes : ', end=' ')
568 print(node['node_fields']['hostname'],'', end=' ')
572 print('+ users : ', end=' ')
574 print(user['name'],'', end=' ')
576 elif k == 'site_fields':
577 print('+ login_base', ':', v['login_base'])
578 elif k == 'address_fields':
584 def display_initscript_spec(self, initscript):
585 print('+ ======== initscript', initscript['initscript_fields']['name'])
587 def display_key_spec(self, key):
588 print('+ ======== key', key['key_name'])
590 def display_slice_spec(self, slice):
591 print('+ ======== slice', slice['slice_fields']['name'])
592 for k,v in slice.items():
595 print('+ nodes : ', end=' ')
597 print(nodename,'', end=' ')
599 elif k == 'usernames':
601 print('+ users : ', end=' ')
603 print(username,'', end=' ')
605 elif k == 'slice_fields':
606 print('+ fields',':', end=' ')
607 print('max_nodes=',v['max_nodes'], end=' ')
612 def display_node_spec(self, node):
613 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
614 print("hostname=", node['node_fields']['hostname'], end=' ')
615 print("ip=", node['interface_fields']['ip'])
616 if self.options.verbose:
617 utils.pprint("node details", node, depth=3)
619 # another entry point for just showing the boxes involved
620 def display_mapping(self):
621 TestPlc.display_mapping_plc(self.plc_spec)
625 def display_mapping_plc(plc_spec):
626 print('+ MyPLC',plc_spec['name'])
627 # WARNING this would not be right for lxc-based PLC's - should be harmless though
628 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
629 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
630 for site_spec in plc_spec['sites']:
631 for node_spec in site_spec['nodes']:
632 TestPlc.display_mapping_node(node_spec)
635 def display_mapping_node(node_spec):
636 print('+ NODE {}'.format(node_spec['name']))
637 print('+\tqemu box {}'.format(node_spec['host_box']))
638 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
640 # write a timestamp in /vservers/<>.timestamp
641 # cannot be inside the vserver, that causes vserver .. build to cough
642 def plcvm_timestamp(self):
643 "Create a timestamp to remember creation date for this plc"
644 now = int(time.time())
645 # TODO-lxc check this one
646 # a first approx. is to store the timestamp close to the VM root like vs does
647 stamp_path = self.vm_timestamp_path()
648 stamp_dir = os.path.dirname(stamp_path)
649 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
650 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
652 # this is called inconditionnally at the beginning of the test sequence
653 # just in case this is a rerun, so if the vm is not running it's fine
654 def plcvm_delete(self):
655 "vserver delete the test myplc"
656 stamp_path = self.vm_timestamp_path()
657 self.run_in_host("rm -f {}".format(stamp_path))
658 self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
659 self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
660 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
664 # historically the build was being fetched by the tests
665 # now the build pushes itself as a subdir of the tests workdir
666 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
667 def plcvm_create(self):
668 "vserver creation (no install done)"
669 # push the local build/ dir to the testplc box
671 # a full path for the local calls
672 build_dir = os.path.dirname(sys.argv[0])
673 # sometimes this is empty - set to "." in such a case
676 build_dir += "/build"
678 # use a standard name - will be relative to remote buildname
680 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
681 self.test_ssh.rmdir(build_dir)
682 self.test_ssh.copy(build_dir, recursive=True)
683 # the repo url is taken from arch-rpms-url
684 # with the last step (i386) removed
685 repo_url = self.options.arch_rpms_url
686 for level in [ 'arch' ]:
687 repo_url = os.path.dirname(repo_url)
689 # invoke initvm (drop support for vs)
690 script = "lbuild-initvm.sh"
692 # pass the vbuild-nightly options to [lv]test-initvm
693 script_options += " -p {}".format(self.options.personality)
694 script_options += " -d {}".format(self.options.pldistro)
695 script_options += " -f {}".format(self.options.fcdistro)
696 script_options += " -r {}".format(repo_url)
697 vserver_name = self.vservername
699 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
700 script_options += " -n {}".format(vserver_hostname)
702 print("Cannot reverse lookup {}".format(self.vserverip))
703 print("This is considered fatal, as this might pollute the test results")
705 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
706 return self.run_in_host(create_vserver) == 0
709 def plc_install(self):
711 yum install myplc, noderepo
712 plain bootstrapfs is not installed anymore
716 if self.options.personality == "linux32":
718 elif self.options.personality == "linux64":
721 raise Exception("Unsupported personality {}".format(self.options.personality))
722 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
725 pkgs_list.append("slicerepo-{}".format(nodefamily))
726 pkgs_list.append("myplc")
727 pkgs_list.append("noderepo-{}".format(nodefamily))
728 pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
729 pkgs_string=" ".join(pkgs_list)
730 return self.yum_install(pkgs_list)
732 def install_syslinux6(self):
734 install syslinux6 from the fedora21 release
736 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
739 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
740 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
741 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
743 # this can be done several times
744 self.run_in_guest("rpm --import {key}".format(**locals()))
745 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
747 def bonding_builds(self):
749 list /etc/yum.repos.d on the myplc side
751 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
754 def bonding_nodes(self):
756 List nodes known to the myplc together with their nodefamiliy
758 print("---------------------------------------- nodes")
759 for node in self.apiserver.GetNodes(self.auth_root()):
760 print("{} -> {}".format(node['hostname'],
761 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
762 print("---------------------------------------- nodes")
766 def mod_python(self):
767 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
768 return self.yum_install( ['mod_python'] )
771 def plc_configure(self):
773 tmpname = '{}.plc-config-tty'.format(self.name())
774 with open(tmpname,'w') as fileconf:
775 for (var,value) in self.plc_spec['settings'].items():
776 fileconf.write('e {}\n{}\n'.format(var, value))
777 fileconf.write('w\n')
778 fileconf.write('q\n')
779 utils.system('cat {}'.format(tmpname))
780 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
781 utils.system('rm {}'.format(tmpname))
784 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
785 # however using a vplc guest under f20 requires this trick
786 # the symptom is this: service plc start
787 # Starting plc (via systemctl): Failed to get D-Bus connection: \
788 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
789 # weird thing is the doc says f14 uses upstart by default and not systemd
790 # so this sounds kind of harmless
791 def start_service(self, service):
792 return self.start_stop_service(service, 'start')
793 def stop_service(self, service):
794 return self.start_stop_service(service, 'stop')
796 def start_stop_service(self, service, start_or_stop):
797 "utility to start/stop a service with the special trick for f14"
798 if self.options.fcdistro != 'f14':
799 return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
801 # patch /sbin/service so it does not reset environment
802 self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
803 # this is because our own scripts in turn call service
804 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
805 .format(service, start_or_stop)) == 0
809 return self.start_service('plc')
813 return self.stop_service('plc')
815 def plcvm_start(self):
816 "start the PLC vserver"
820 def plcvm_stop(self):
821 "stop the PLC vserver"
825 # stores the keys from the config for further use
826 def keys_store(self):
827 "stores test users ssh keys in keys/"
828 for key_spec in self.plc_spec['keys']:
829 TestKey(self,key_spec).store_key()
832 def keys_clean(self):
833 "removes keys cached in keys/"
834 utils.system("rm -rf ./keys")
837 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
838 # for later direct access to the nodes
839 def keys_fetch(self):
840 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
842 if not os.path.isdir(dir):
844 vservername = self.vservername
845 vm_root = self.vm_root_in_host()
847 prefix = 'debug_ssh_key'
848 for ext in ['pub', 'rsa'] :
849 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
850 dst = "keys/{vservername}-debug.{ext}".format(**locals())
851 if self.test_ssh.fetch(src, dst) != 0:
856 "create sites with PLCAPI"
857 return self.do_sites()
859 def delete_sites(self):
860 "delete sites with PLCAPI"
861 return self.do_sites(action="delete")
863 def do_sites(self, action="add"):
864 for site_spec in self.plc_spec['sites']:
865 test_site = TestSite(self,site_spec)
866 if (action != "add"):
867 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
868 test_site.delete_site()
869 # deleted with the site
870 #test_site.delete_users()
873 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
874 test_site.create_site()
875 test_site.create_users()
878 def delete_all_sites(self):
879 "Delete all sites in PLC, and related objects"
880 print('auth_root', self.auth_root())
881 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
883 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
884 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
886 site_id = site['site_id']
887 print('Deleting site_id', site_id)
888 self.apiserver.DeleteSite(self.auth_root(), site_id)
892 "create nodes with PLCAPI"
893 return self.do_nodes()
894 def delete_nodes(self):
895 "delete nodes with PLCAPI"
896 return self.do_nodes(action="delete")
898 def do_nodes(self, action="add"):
899 for site_spec in self.plc_spec['sites']:
900 test_site = TestSite(self, site_spec)
902 utils.header("Deleting nodes in site {}".format(test_site.name()))
903 for node_spec in site_spec['nodes']:
904 test_node = TestNode(self, test_site, node_spec)
905 utils.header("Deleting {}".format(test_node.name()))
906 test_node.delete_node()
908 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
909 for node_spec in site_spec['nodes']:
910 utils.pprint('Creating node {}'.format(node_spec), node_spec)
911 test_node = TestNode(self, test_site, node_spec)
912 test_node.create_node()
915 def nodegroups(self):
916 "create nodegroups with PLCAPI"
917 return self.do_nodegroups("add")
918 def delete_nodegroups(self):
919 "delete nodegroups with PLCAPI"
920 return self.do_nodegroups("delete")
924 def translate_timestamp(start, grain, timestamp):
925 if timestamp < TestPlc.YEAR:
926 return start + timestamp*grain
931 def timestamp_printable(timestamp):
932 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
935 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
936 now = int(time.time())
937 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
938 print('API answered grain=', grain)
939 start = (now//grain)*grain
941 # find out all nodes that are reservable
942 nodes = self.all_reservable_nodenames()
944 utils.header("No reservable node found - proceeding without leases")
947 # attach them to the leases as specified in plc_specs
948 # this is where the 'leases' field gets interpreted as relative of absolute
949 for lease_spec in self.plc_spec['leases']:
950 # skip the ones that come with a null slice id
951 if not lease_spec['slice']:
953 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
954 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
955 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
956 lease_spec['t_from'], lease_spec['t_until'])
957 if lease_addition['errors']:
958 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
961 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
962 .format(nodes, lease_spec['slice'],
963 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
964 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
968 def delete_leases(self):
969 "remove all leases in the myplc side"
970 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
971 utils.header("Cleaning leases {}".format(lease_ids))
972 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
975 def list_leases(self):
976 "list all leases known to the myplc"
977 leases = self.apiserver.GetLeases(self.auth_root())
978 now = int(time.time())
980 current = l['t_until'] >= now
981 if self.options.verbose or current:
982 utils.header("{} {} from {} until {}"\
983 .format(l['hostname'], l['name'],
984 TestPlc.timestamp_printable(l['t_from']),
985 TestPlc.timestamp_printable(l['t_until'])))
988 # create nodegroups if needed, and populate
989 def do_nodegroups(self, action="add"):
990 # 1st pass to scan contents
992 for site_spec in self.plc_spec['sites']:
993 test_site = TestSite(self,site_spec)
994 for node_spec in site_spec['nodes']:
995 test_node = TestNode(self, test_site, node_spec)
996 if 'nodegroups' in node_spec:
997 nodegroupnames = node_spec['nodegroups']
998 if isinstance(nodegroupnames, str):
999 nodegroupnames = [ nodegroupnames ]
1000 for nodegroupname in nodegroupnames:
1001 if nodegroupname not in groups_dict:
1002 groups_dict[nodegroupname] = []
1003 groups_dict[nodegroupname].append(test_node.name())
1004 auth = self.auth_root()
1006 for (nodegroupname,group_nodes) in groups_dict.items():
1008 print('nodegroups:', 'dealing with nodegroup',\
1009 nodegroupname, 'on nodes', group_nodes)
1010 # first, check if the nodetagtype is here
1011 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1013 tag_type_id = tag_types[0]['tag_type_id']
1015 tag_type_id = self.apiserver.AddTagType(auth,
1016 {'tagname' : nodegroupname,
1017 'description' : 'for nodegroup {}'.format(nodegroupname),
1018 'category' : 'test'})
1019 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1021 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1023 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1024 print('created nodegroup', nodegroupname, \
1025 'from tagname', nodegroupname, 'and value', 'yes')
1026 # set node tag on all nodes, value='yes'
1027 for nodename in group_nodes:
1029 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1031 traceback.print_exc()
1032 print('node', nodename, 'seems to already have tag', nodegroupname)
1035 expect_yes = self.apiserver.GetNodeTags(auth,
1036 {'hostname' : nodename,
1037 'tagname' : nodegroupname},
1038 ['value'])[0]['value']
1039 if expect_yes != "yes":
1040 print('Mismatch node tag on node',nodename,'got',expect_yes)
1043 if not self.options.dry_run:
1044 print('Cannot find tag', nodegroupname, 'on node', nodename)
1048 print('cleaning nodegroup', nodegroupname)
1049 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1051 traceback.print_exc()
1055 # a list of TestNode objs
1056 def all_nodes(self):
1058 for site_spec in self.plc_spec['sites']:
1059 test_site = TestSite(self,site_spec)
1060 for node_spec in site_spec['nodes']:
1061 nodes.append(TestNode(self, test_site, node_spec))
1064 # return a list of tuples (nodename,qemuname)
1065 def all_node_infos(self) :
1067 for site_spec in self.plc_spec['sites']:
1068 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1069 for node_spec in site_spec['nodes'] ]
1072 def all_nodenames(self):
1073 return [ x[0] for x in self.all_node_infos() ]
1074 def all_reservable_nodenames(self):
1076 for site_spec in self.plc_spec['sites']:
1077 for node_spec in site_spec['nodes']:
1078 node_fields = node_spec['node_fields']
1079 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1080 res.append(node_fields['hostname'])
1083 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1084 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1085 silent_minutes, period_seconds = 15):
1086 if self.options.dry_run:
1090 class CompleterTaskBootState(CompleterTask):
1091 def __init__(self, test_plc, hostname):
1092 self.test_plc = test_plc
1093 self.hostname = hostname
1094 self.last_boot_state = 'undef'
1095 def actual_run(self):
1097 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1100 self.last_boot_state = node['boot_state']
1101 return self.last_boot_state == target_boot_state
1105 return "CompleterTaskBootState with node {}".format(self.hostname)
1106 def failure_epilogue(self):
1107 print("node {} in state {} - expected {}"\
1108 .format(self.hostname, self.last_boot_state, target_boot_state))
1110 timeout = timedelta(minutes=timeout_minutes)
1111 graceout = timedelta(minutes=silent_minutes)
1112 period = timedelta(seconds=period_seconds)
1113 # the nodes that haven't checked yet - start with a full list and shrink over time
1114 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1115 tasks = [ CompleterTaskBootState(self,hostname) \
1116 for (hostname,_) in self.all_node_infos() ]
1117 message = 'check_boot_state={}'.format(target_boot_state)
1118 return Completer(tasks, message=message).run(timeout, graceout, period)
1120 def nodes_booted(self):
1121 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1123 def probe_kvm_iptables(self):
1124 (_,kvmbox) = self.all_node_infos()[0]
1125 TestSsh(kvmbox).run("iptables-save")
1129 def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
1130 class CompleterTaskPingNode(CompleterTask):
1131 def __init__(self, hostname):
1132 self.hostname = hostname
1133 def run(self, silent):
1134 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1135 return utils.system(command, silent=silent) == 0
1136 def failure_epilogue(self):
1137 print("Cannot ping node with name {}".format(self.hostname))
1138 timeout = timedelta(seconds = timeout_seconds)
1140 period = timedelta(seconds = period_seconds)
1141 node_infos = self.all_node_infos()
1142 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1143 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1145 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1146 def ping_node(self):
1148 return self.check_nodes_ping()
1150 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1152 timeout = timedelta(minutes=timeout_minutes)
1153 graceout = timedelta(minutes=silent_minutes)
1154 period = timedelta(seconds=period_seconds)
1155 vservername = self.vservername
1158 completer_message = 'ssh_node_debug'
1159 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1162 completer_message = 'ssh_node_boot'
1163 local_key = "keys/key_admin.rsa"
1164 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1165 node_infos = self.all_node_infos()
1166 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1167 boot_state=message, dry_run=self.options.dry_run) \
1168 for (nodename, qemuname) in node_infos ]
1169 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1171 def ssh_node_debug(self):
1172 "Tries to ssh into nodes in debug mode with the debug ssh key"
1173 return self.check_nodes_ssh(debug = True,
1174 timeout_minutes = self.ssh_node_debug_timeout,
1175 silent_minutes = self.ssh_node_debug_silent)
1177 def ssh_node_boot(self):
1178 "Tries to ssh into nodes in production mode with the root ssh key"
1179 return self.check_nodes_ssh(debug = False,
1180 timeout_minutes = self.ssh_node_boot_timeout,
1181 silent_minutes = self.ssh_node_boot_silent)
1183 def node_bmlogs(self):
1184 "Checks that there's a non-empty dir. /var/log/bm/raw"
1185 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1188 def qemu_local_init(self): pass
1190 def bootcd(self): pass
1192 def qemu_local_config(self): pass
1194 def qemu_export(self): pass
1196 def nodestate_reinstall(self): pass
1198 def nodestate_upgrade(self): pass
1200 def nodestate_safeboot(self): pass
1202 def nodestate_boot(self): pass
1204 def nodestate_show(self): pass
1206 def nodefcdistro_f14(self): pass
1208 def nodefcdistro_f18(self): pass
1210 def nodefcdistro_f20(self): pass
1212 def nodefcdistro_f21(self): pass
1214 def nodefcdistro_show(self): pass
1216 ### check hooks : invoke scripts from hooks/{node,slice}
1217 def check_hooks_node(self):
1218 return self.locate_first_node().check_hooks()
1219 def check_hooks_sliver(self) :
1220 return self.locate_first_sliver().check_hooks()
1222 def check_hooks(self):
1223 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1224 return self.check_hooks_node() and self.check_hooks_sliver()
1227 def do_check_initscripts(self):
1228 class CompleterTaskInitscript(CompleterTask):
1229 def __init__(self, test_sliver, stamp):
1230 self.test_sliver = test_sliver
1232 def actual_run(self):
1233 return self.test_sliver.check_initscript_stamp(self.stamp)
1235 return "initscript checker for {}".format(self.test_sliver.name())
1236 def failure_epilogue(self):
1237 print("initscript stamp {} not found in sliver {}"\
1238 .format(self.stamp, self.test_sliver.name()))
1241 for slice_spec in self.plc_spec['slices']:
1242 if 'initscriptstamp' not in slice_spec:
1244 stamp = slice_spec['initscriptstamp']
1245 slicename = slice_spec['slice_fields']['name']
1246 for nodename in slice_spec['nodenames']:
1247 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1248 site,node = self.locate_node(nodename)
1249 # xxx - passing the wrong site - probably harmless
1250 test_site = TestSite(self, site)
1251 test_slice = TestSlice(self, test_site, slice_spec)
1252 test_node = TestNode(self, test_site, node)
1253 test_sliver = TestSliver(self, test_node, test_slice)
1254 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1255 return Completer(tasks, message='check_initscripts').\
1256 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1258 def check_initscripts(self):
1259 "check that the initscripts have triggered"
1260 return self.do_check_initscripts()
1262 def initscripts(self):
1263 "create initscripts with PLCAPI"
1264 for initscript in self.plc_spec['initscripts']:
1265 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1266 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1269 def delete_initscripts(self):
1270 "delete initscripts with PLCAPI"
1271 for initscript in self.plc_spec['initscripts']:
1272 initscript_name = initscript['initscript_fields']['name']
1273 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1275 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1276 print(initscript_name, 'deleted')
1278 print('deletion went wrong - probably did not exist')
1283 "create slices with PLCAPI"
1284 return self.do_slices(action="add")
1286 def delete_slices(self):
1287 "delete slices with PLCAPI"
1288 return self.do_slices(action="delete")
1290 def fill_slices(self):
1291 "add nodes in slices with PLCAPI"
1292 return self.do_slices(action="fill")
1294 def empty_slices(self):
1295 "remove nodes from slices with PLCAPI"
1296 return self.do_slices(action="empty")
1298 def do_slices(self, action="add"):
1299 for slice in self.plc_spec['slices']:
1300 site_spec = self.locate_site(slice['sitename'])
1301 test_site = TestSite(self,site_spec)
1302 test_slice=TestSlice(self,test_site,slice)
1303 if action == "delete":
1304 test_slice.delete_slice()
1305 elif action == "fill":
1306 test_slice.add_nodes()
1307 elif action == "empty":
1308 test_slice.delete_nodes()
1310 test_slice.create_slice()
1313 @slice_mapper__tasks(20, 10, 15)
1314 def ssh_slice(self): pass
1315 @slice_mapper__tasks(20, 19, 15)
1316 def ssh_slice_off(self): pass
1317 @slice_mapper__tasks(1, 1, 15)
1318 def slice_fs_present(self): pass
1319 @slice_mapper__tasks(1, 1, 15)
1320 def slice_fs_deleted(self): pass
1322 # use another name so we can exclude/ignore it from the tests on the nightly command line
1323 def ssh_slice_again(self): return self.ssh_slice()
1324 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1325 # but for some reason the ignore-wrapping thing would not
1328 def ssh_slice_basics(self): pass
1330 def check_vsys_defaults(self): pass
1333 def keys_clear_known_hosts(self): pass
1335 def plcapi_urls(self):
1337 attempts to reach the PLCAPI with various forms for the URL
1339 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1341 def speed_up_slices(self):
1342 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1343 return self._speed_up_slices (30, 10)
1344 def super_speed_up_slices(self):
1345 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1346 return self._speed_up_slices(5, 1)
1348 def _speed_up_slices(self, p, r):
1349 # create the template on the server-side
1350 template = "{}.nodemanager".format(self.name())
1351 with open(template,"w") as template_file:
1352 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1353 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1354 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1355 self.test_ssh.copy_abs(template, remote)
1357 if not self.apiserver.GetConfFiles(self.auth_root(),
1358 {'dest' : '/etc/sysconfig/nodemanager'}):
1359 self.apiserver.AddConfFile(self.auth_root(),
1360 {'dest' : '/etc/sysconfig/nodemanager',
1361 'source' : 'PlanetLabConf/nodemanager',
1362 'postinstall_cmd' : 'service nm restart',})
1365 def debug_nodemanager(self):
1366 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1367 template = "{}.nodemanager".format(self.name())
1368 with open(template,"w") as template_file:
1369 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1370 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1371 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1372 self.test_ssh.copy_abs(template, remote)
1376 def qemu_start(self) : pass
1379 def qemu_timestamp(self) : pass
1382 def qemu_nodefamily(self): pass
1384 # when a spec refers to a node possibly on another plc
1385 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1386 for plc in [ self ] + other_plcs:
1388 return plc.locate_sliver_obj(nodename, slicename)
1391 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1393 # implement this one as a cross step so that we can take advantage of different nodes
1394 # in multi-plcs mode
1395 def cross_check_tcp(self, other_plcs):
1396 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1397 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1398 utils.header("check_tcp: no/empty config found")
1400 specs = self.plc_spec['tcp_specs']
1403 # first wait for the network to be up and ready from the slices
1404 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1405 def __init__(self, test_sliver):
1406 self.test_sliver = test_sliver
1407 def actual_run(self):
1408 return self.test_sliver.check_tcp_ready(port = 9999)
1410 return "network ready checker for {}".format(self.test_sliver.name())
1411 def failure_epilogue(self):
1412 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1416 managed_sliver_names = set()
1418 # locate the TestSliver instances involved, and cache them in the spec instance
1419 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1420 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1421 message = "Will check TCP between s={} and c={}"\
1422 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1423 if 'client_connect' in spec:
1424 message += " (using {})".format(spec['client_connect'])
1425 utils.header(message)
1426 # we need to check network presence in both slivers, but also
1427 # avoid to insert a sliver several times
1428 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1429 if sliver.name() not in managed_sliver_names:
1430 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1431 # add this sliver's name in the set
1432 managed_sliver_names .update( {sliver.name()} )
1434 # wait for the netork to be OK in all server sides
1435 if not Completer(tasks, message='check for network readiness in slivers').\
1436 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1439 # run server and client
1443 # the issue here is that we have the server run in background
1444 # and so we have no clue if it took off properly or not
1445 # looks like in some cases it does not
1446 if not spec['s_sliver'].run_tcp_server(port, timeout=20):
1450 # idem for the client side
1451 # use nodename from located sliver, unless 'client_connect' is set
1452 if 'client_connect' in spec:
1453 destination = spec['client_connect']
1455 destination = spec['s_sliver'].test_node.name()
1456 if not spec['c_sliver'].run_tcp_client(destination, port):
1460 # painfully enough, we need to allow for some time as netflow might show up last
1461 def check_system_slice(self):
1462 "all nodes: check that a system slice is alive"
1463 # netflow currently not working in the lxc distro
1464 # drl not built at all in the wtx distro
1465 # if we find either of them we're happy
1466 return self.check_netflow() or self.check_drl()
1469 def check_netflow(self): return self._check_system_slice('netflow')
1470 def check_drl(self): return self._check_system_slice('drl')
1472 # we have the slices up already here, so it should not take too long
1473 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1474 class CompleterTaskSystemSlice(CompleterTask):
1475 def __init__(self, test_node, dry_run):
1476 self.test_node = test_node
1477 self.dry_run = dry_run
1478 def actual_run(self):
1479 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1481 return "System slice {} @ {}".format(slicename, self.test_node.name())
1482 def failure_epilogue(self):
1483 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1484 timeout = timedelta(minutes=timeout_minutes)
1485 silent = timedelta(0)
1486 period = timedelta(seconds=period_seconds)
1487 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1488 for test_node in self.all_nodes() ]
1489 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1491 def plcsh_stress_test(self):
1492 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1493 # install the stress-test in the plc image
1494 location = "/usr/share/plc_api/plcsh_stress_test.py"
1495 remote = "{}/{}".format(self.vm_root_in_host(), location)
1496 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1498 command += " -- --check"
1499 if self.options.size == 1:
1500 command += " --tiny"
1501 return self.run_in_guest(command) == 0
1503 # populate runs the same utility without slightly different options
1504 # in particular runs with --preserve (dont cleanup) and without --check
1505 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1507 def sfa_install_all(self):
1508 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1509 return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
1511 def sfa_install_core(self):
1513 return self.yum_install("sfa")
1515 def sfa_install_plc(self):
1516 "yum install sfa-plc"
1517 return self.yum_install("sfa-plc")
1519 def sfa_install_sfatables(self):
1520 "yum install sfa-sfatables"
1521 return self.yum_install("sfa-sfatables")
1523 # for some very odd reason, this sometimes fails with the following symptom
1524 # # yum install sfa-client
1525 # Setting up Install Process
1527 # Downloading Packages:
1528 # Running rpm_check_debug
1529 # Running Transaction Test
1530 # Transaction Test Succeeded
1531 # Running Transaction
1532 # Transaction couldn't start:
1533 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1534 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1535 # even though in the same context I have
1536 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1537 # Filesystem Size Used Avail Use% Mounted on
1538 # /dev/hdv1 806G 264G 501G 35% /
1539 # none 16M 36K 16M 1% /tmp
1541 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1542 def sfa_install_client(self):
1543 "yum install sfa-client"
1544 first_try = self.yum_install("sfa-client")
1547 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1548 code, cached_rpm_path = \
1549 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1550 utils.header("rpm_path=<<{}>>".format(rpm_path))
1552 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1553 return self.yum_check_installed("sfa-client")
1555 def sfa_dbclean(self):
1556 "thoroughly wipes off the SFA database"
1557 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1558 self.run_in_guest("sfa-nuke.py") == 0 or \
1559 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1560 self.run_in_guest("sfaadmin registry nuke") == 0
1562 def sfa_fsclean(self):
1563 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1564 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1567 def sfa_plcclean(self):
1568 "cleans the PLC entries that were created as a side effect of running the script"
1570 sfa_spec = self.plc_spec['sfa']
1572 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1573 login_base = auth_sfa_spec['login_base']
1575 self.apiserver.DeleteSite(self.auth_root(),login_base)
1577 print("Site {} already absent from PLC db".format(login_base))
1579 for spec_name in ['pi_spec','user_spec']:
1580 user_spec = auth_sfa_spec[spec_name]
1581 username = user_spec['email']
1583 self.apiserver.DeletePerson(self.auth_root(),username)
1585 # this in fact is expected as sites delete their members
1586 #print "User {} already absent from PLC db".format(username)
1589 print("REMEMBER TO RUN sfa_import AGAIN")
1592 def sfa_uninstall(self):
1593 "uses rpm to uninstall sfa - ignore result"
1594 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1595 self.run_in_guest("rm -rf /var/lib/sfa")
1596 self.run_in_guest("rm -rf /etc/sfa")
1597 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1599 self.run_in_guest("rpm -e --noscripts sfa-plc")
1602 ### run unit tests for SFA
1603 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1604 # Running Transaction
1605 # Transaction couldn't start:
1606 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1607 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1608 # no matter how many Gbs are available on the testplc
1609 # could not figure out what's wrong, so...
1610 # if the yum install phase fails, consider the test is successful
1611 # other combinations will eventually run it hopefully
1612 def sfa_utest(self):
1613 "yum install sfa-tests and run SFA unittests"
1614 self.run_in_guest("yum -y install sfa-tests")
1615 # failed to install - forget it
1616 if self.run_in_guest("rpm -q sfa-tests") != 0:
1617 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1619 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1623 dirname = "conf.{}".format(self.plc_spec['name'])
1624 if not os.path.isdir(dirname):
1625 utils.system("mkdir -p {}".format(dirname))
1626 if not os.path.isdir(dirname):
1627 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1630 def conffile(self, filename):
1631 return "{}/{}".format(self.confdir(), filename)
1632 def confsubdir(self, dirname, clean, dry_run=False):
1633 subdirname = "{}/{}".format(self.confdir(), dirname)
1635 utils.system("rm -rf {}".format(subdirname))
1636 if not os.path.isdir(subdirname):
1637 utils.system("mkdir -p {}".format(subdirname))
1638 if not dry_run and not os.path.isdir(subdirname):
1639 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1642 def conffile_clean(self, filename):
1643 filename=self.conffile(filename)
1644 return utils.system("rm -rf {}".format(filename))==0
1647 def sfa_configure(self):
1648 "run sfa-config-tty"
1649 tmpname = self.conffile("sfa-config-tty")
1650 with open(tmpname,'w') as fileconf:
1651 for (var,value) in self.plc_spec['sfa']['settings'].items():
1652 fileconf.write('e {}\n{}\n'.format(var, value))
1653 fileconf.write('w\n')
1654 fileconf.write('R\n')
1655 fileconf.write('q\n')
1656 utils.system('cat {}'.format(tmpname))
1657 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1660 def aggregate_xml_line(self):
1661 port = self.plc_spec['sfa']['neighbours-port']
1662 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1663 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1665 def registry_xml_line(self):
1666 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1667 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1670 # a cross step that takes all other plcs in argument
1671 def cross_sfa_configure(self, other_plcs):
1672 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1673 # of course with a single plc, other_plcs is an empty list
1676 agg_fname = self.conffile("agg.xml")
1677 with open(agg_fname,"w") as out:
1678 out.write("<aggregates>{}</aggregates>\n"\
1679 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1680 utils.header("(Over)wrote {}".format(agg_fname))
1681 reg_fname=self.conffile("reg.xml")
1682 with open(reg_fname,"w") as out:
1683 out.write("<registries>{}</registries>\n"\
1684 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1685 utils.header("(Over)wrote {}".format(reg_fname))
1686 return self.test_ssh.copy_abs(agg_fname,
1687 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1688 and self.test_ssh.copy_abs(reg_fname,
1689 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1691 def sfa_import(self):
1692 "use sfaadmin to import from plc"
1693 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1694 return self.run_in_guest('sfaadmin reg import_registry') == 0
1696 def sfa_start(self):
1698 return self.start_service('sfa')
1701 def sfi_configure(self):
1702 "Create /root/sfi on the plc side for sfi client configuration"
1703 if self.options.dry_run:
1704 utils.header("DRY RUN - skipping step")
1706 sfa_spec = self.plc_spec['sfa']
1707 # cannot use auth_sfa_mapper to pass dir_name
1708 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1709 test_slice = TestAuthSfa(self, slice_spec)
1710 dir_basename = os.path.basename(test_slice.sfi_path())
1711 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1712 clean=True, dry_run=self.options.dry_run)
1713 test_slice.sfi_configure(dir_name)
1714 # push into the remote /root/sfi area
1715 location = test_slice.sfi_path()
1716 remote = "{}/{}".format(self.vm_root_in_host(), location)
1717 self.test_ssh.mkdir(remote, abs=True)
1718 # need to strip last level or remote otherwise we get an extra dir level
1719 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1723 def sfi_clean(self):
1724 "clean up /root/sfi on the plc side"
1725 self.run_in_guest("rm -rf /root/sfi")
1728 def sfa_rspec_empty(self):
1729 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1730 filename = "empty-rspec.xml"
1732 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1733 test_slice = TestAuthSfa(self, slice_spec)
1734 in_vm = test_slice.sfi_path()
1735 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1736 if self.test_ssh.copy_abs(filename, remote) !=0:
1741 def sfa_register_site(self): pass
1743 def sfa_register_pi(self): pass
1745 def sfa_register_user(self): pass
1747 def sfa_update_user(self): pass
1749 def sfa_register_slice(self): pass
1751 def sfa_renew_slice(self): pass
1753 def sfa_get_expires(self): pass
1755 def sfa_discover(self): pass
1757 def sfa_rspec(self): pass
1759 def sfa_allocate(self): pass
1761 def sfa_allocate_empty(self): pass
1763 def sfa_provision(self): pass
1765 def sfa_provision_empty(self): pass
1767 def sfa_check_slice_plc(self): pass
1769 def sfa_check_slice_plc_empty(self): pass
1771 def sfa_update_slice(self): pass
1773 def sfa_remove_user_from_slice(self): pass
1775 def sfa_insert_user_in_slice(self): pass
1777 def sfi_list(self): pass
1779 def sfi_show_site(self): pass
1781 def sfi_show_slice(self): pass
1783 def sfi_show_slice_researchers(self): pass
1785 def ssh_slice_sfa(self): pass
1787 def sfa_delete_user(self): pass
1789 def sfa_delete_slice(self): pass
1793 return self.stop_service('sfa')
1796 "creates random entries in the PLCAPI"
1797 # install the stress-test in the plc image
1798 location = "/usr/share/plc_api/plcsh_stress_test.py"
1799 remote = "{}/{}".format(self.vm_root_in_host(), location)
1800 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1802 command += " -- --preserve --short-names"
1803 local = (self.run_in_guest(command) == 0);
1804 # second run with --foreign
1805 command += ' --foreign'
1806 remote = (self.run_in_guest(command) == 0);
1807 return local and remote
1810 ####################
1812 def bonding_init_partial(self): pass
1815 def bonding_add_yum(self): pass
1818 def bonding_install_rpms(self): pass
1820 ####################
1822 def gather_logs(self):
1823 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1824 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1825 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1826 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1827 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1828 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1829 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1831 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1832 self.gather_var_logs()
1834 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1835 self.gather_pgsql_logs()
1837 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1838 self.gather_root_sfi()
1840 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1841 for site_spec in self.plc_spec['sites']:
1842 test_site = TestSite(self,site_spec)
1843 for node_spec in site_spec['nodes']:
1844 test_node = TestNode(self, test_site, node_spec)
1845 test_node.gather_qemu_logs()
1847 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1848 self.gather_nodes_var_logs()
1850 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1851 self.gather_slivers_var_logs()
1854 def gather_slivers_var_logs(self):
1855 for test_sliver in self.all_sliver_objs():
1856 remote = test_sliver.tar_var_logs()
1857 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1858 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1859 utils.system(command)
1862 def gather_var_logs(self):
1863 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1864 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1865 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1866 utils.system(command)
1867 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1868 utils.system(command)
1870 def gather_pgsql_logs(self):
1871 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1872 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1873 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1874 utils.system(command)
1876 def gather_root_sfi(self):
1877 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1878 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1879 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1880 utils.system(command)
1882 def gather_nodes_var_logs(self):
1883 for site_spec in self.plc_spec['sites']:
1884 test_site = TestSite(self, site_spec)
1885 for node_spec in site_spec['nodes']:
1886 test_node = TestNode(self, test_site, node_spec)
1887 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1888 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1889 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1890 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1891 utils.system(command)
1894 # returns the filename to use for sql dump/restore, using options.dbname if set
1895 def dbfile(self, database):
1896 # uses options.dbname if it is found
1898 name = self.options.dbname
1899 if not isinstance(name, str):
1905 return "/root/{}-{}.sql".format(database, name)
1907 def plc_db_dump(self):
1908 'dump the planetlab5 DB in /root in the PLC - filename has time'
1909 dump=self.dbfile("planetab5")
1910 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1911 utils.header('Dumped planetlab5 database in {}'.format(dump))
1914 def plc_db_restore(self):
1915 'restore the planetlab5 DB - looks broken, but run -n might help'
1916 dump = self.dbfile("planetab5")
1917 ##stop httpd service
1918 self.run_in_guest('service httpd stop')
1919 # xxx - need another wrapper
1920 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1921 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1922 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1923 ##starting httpd service
1924 self.run_in_guest('service httpd start')
1926 utils.header('Database restored from ' + dump)
1929 def create_ignore_steps():
1930 for step in TestPlc.default_steps + TestPlc.other_steps:
1931 # default step can have a plc qualifier
1933 step, qualifier = step.split('@')
1934 # or be defined as forced or ignored by default
1935 for keyword in ['_ignore','_force']:
1936 if step.endswith(keyword):
1937 step=step.replace(keyword,'')
1938 if step == SEP or step == SEPSFA :
1940 method = getattr(TestPlc,step)
1941 name = step + '_ignore'
1942 wrapped = ignore_result(method)
1943 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1944 setattr(TestPlc, name, wrapped)
1947 # def ssh_slice_again_ignore (self): pass
1949 # def check_initscripts_ignore (self): pass
1951 def standby_1_through_20(self):
1952 """convenience function to wait for a specified number of minutes"""
1955 def standby_1(): pass
1957 def standby_2(): pass
1959 def standby_3(): pass
1961 def standby_4(): pass
1963 def standby_5(): pass
1965 def standby_6(): pass
1967 def standby_7(): pass
1969 def standby_8(): pass
1971 def standby_9(): pass
1973 def standby_10(): pass
1975 def standby_11(): pass
1977 def standby_12(): pass
1979 def standby_13(): pass
1981 def standby_14(): pass
1983 def standby_15(): pass
1985 def standby_16(): pass
1987 def standby_17(): pass
1989 def standby_18(): pass
1991 def standby_19(): pass
1993 def standby_20(): pass
1995 # convenience for debugging the test logic
1996 def yes(self): return True
1997 def no(self): return False
1998 def fail(self): return False