1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls','speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
166 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
167 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
168 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
169 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
170 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
171 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
172 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
173 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
174 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
175 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
176 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
177 # but as the stress test might take a while, we sometimes missed the debug mode..
178 'probe_kvm_iptables',
179 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
180 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
181 'ssh_slice_sfa@1', SEPSFA,
182 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
183 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
184 'cross_check_tcp@1', 'check_system_slice', SEP,
185 # for inspecting the slice while it runs the first time
187 # check slices are turned off properly
189 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
190 # check they are properly re-created with the same name
191 'fill_slices', 'ssh_slice_again', SEP,
192 'gather_logs_force', SEP,
195 'export', 'show_boxes', 'super_speed_up_slices', SEP,
196 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
197 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
198 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
199 'delete_leases', 'list_leases', SEP,
201 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
202 'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
203 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
204 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
205 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
206 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
207 'sfa_get_expires', SEPSFA,
208 'plc_db_dump' , 'plc_db_restore', SEP,
209 'check_netflow','check_drl', SEP,
210 'slice_fs_present', SEP,
211 'standby_1_through_20','yes','no',SEP,
212 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
214 default_bonding_steps = [
215 'bonding_init_partial',
217 'bonding_install_rpms', SEP,
221 def printable_steps(list):
222 single_line = " ".join(list) + " "
223 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
225 def valid_step(step):
226 return step != SEP and step != SEPSFA
228 # turn off the sfa-related steps when build has skipped SFA
229 # this was originally for centos5 but is still valid
230 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
232 def _has_sfa_cached(rpms_url):
233 if os.path.isfile(has_sfa_cache_filename):
234 with open(has_sfa_cache_filename) as cache:
235 cached = cache.read() == "yes"
236 utils.header("build provides SFA (cached):{}".format(cached))
238 # warning, we're now building 'sface' so let's be a bit more picky
239 # full builds are expected to return with 0 here
240 utils.header("Checking if build provides SFA package...")
241 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
242 encoded = 'yes' if retcod else 'no'
243 with open(has_sfa_cache_filename,'w') as cache:
248 def check_whether_build_has_sfa(rpms_url):
249 has_sfa = TestPlc._has_sfa_cached(rpms_url)
251 utils.header("build does provide SFA")
253 # move all steps containing 'sfa' from default_steps to other_steps
254 utils.header("SFA package not found - removing steps with sfa or sfi")
255 sfa_steps = [ step for step in TestPlc.default_steps
256 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
257 TestPlc.other_steps += sfa_steps
258 for step in sfa_steps:
259 TestPlc.default_steps.remove(step)
261 def __init__(self, plc_spec, options):
262 self.plc_spec = plc_spec
263 self.options = options
264 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
265 self.vserverip = plc_spec['vserverip']
266 self.vservername = plc_spec['vservername']
267 self.vplchostname = self.vservername.split('-')[-1]
268 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
269 self.apiserver = TestApiserver(self.url, options.dry_run)
270 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
271 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
273 def has_addresses_api(self):
274 return self.apiserver.has_method('AddIpAddress')
277 name = self.plc_spec['name']
278 return "{}.{}".format(name,self.vservername)
281 return self.plc_spec['host_box']
284 return self.test_ssh.is_local()
286 # define the API methods on this object through xmlrpc
287 # would help, but not strictly necessary
291 def actual_command_in_guest(self,command, backslash=False):
292 raw1 = self.host_to_guest(command)
293 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
296 def start_guest(self):
297 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
298 dry_run=self.options.dry_run))
300 def stop_guest(self):
301 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
302 dry_run=self.options.dry_run))
304 def run_in_guest(self, command, backslash=False):
305 raw = self.actual_command_in_guest(command, backslash)
306 return utils.system(raw)
308 def run_in_host(self,command):
309 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
311 # backslashing turned out so awful at some point that I've turned off auto-backslashing
312 # see e.g. plc_start esp. the version for f14
313 #command gets run in the plc's vm
314 def host_to_guest(self, command):
315 ssh_leg = TestSsh(self.vplchostname)
316 return ssh_leg.actual_command(command, keep_stdin=True)
318 # this /vservers thing is legacy...
319 def vm_root_in_host(self):
320 return "/vservers/{}/".format(self.vservername)
322 def vm_timestamp_path(self):
323 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
325 #start/stop the vserver
326 def start_guest_in_host(self):
327 return "virsh -c lxc:/// start {}".format(self.vservername)
329 def stop_guest_in_host(self):
330 return "virsh -c lxc:/// destroy {}".format(self.vservername)
333 def run_in_guest_piped(self,local,remote):
334 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
337 def yum_check_installed(self, rpms):
338 if isinstance(rpms, list):
340 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
342 # does a yum install in the vs, ignore yum retcod, check with rpm
343 def yum_install(self, rpms):
344 if isinstance(rpms, list):
346 yum_mode = self.run_in_guest("yum -y install {}".format(rpms))
348 self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
349 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
350 self.run_in_guest("yum-complete-transaction -y")
351 return self.yum_check_installed(rpms)
354 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
355 'AuthMethod' : 'password',
356 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
357 'Role' : self.plc_spec['role'],
360 def locate_site(self,sitename):
361 for site in self.plc_spec['sites']:
362 if site['site_fields']['name'] == sitename:
364 if site['site_fields']['login_base'] == sitename:
366 raise Exception("Cannot locate site {}".format(sitename))
368 def locate_node(self, nodename):
369 for site in self.plc_spec['sites']:
370 for node in site['nodes']:
371 if node['name'] == nodename:
373 raise Exception("Cannot locate node {}".format(nodename))
375 def locate_hostname(self, hostname):
376 for site in self.plc_spec['sites']:
377 for node in site['nodes']:
378 if node['node_fields']['hostname'] == hostname:
380 raise Exception("Cannot locate hostname {}".format(hostname))
382 def locate_key(self, key_name):
383 for key in self.plc_spec['keys']:
384 if key['key_name'] == key_name:
386 raise Exception("Cannot locate key {}".format(key_name))
388 def locate_private_key_from_key_names(self, key_names):
389 # locate the first avail. key
391 for key_name in key_names:
392 key_spec = self.locate_key(key_name)
393 test_key = TestKey(self,key_spec)
394 publickey = test_key.publicpath()
395 privatekey = test_key.privatepath()
396 if os.path.isfile(publickey) and os.path.isfile(privatekey):
403 def locate_slice(self, slicename):
404 for slice in self.plc_spec['slices']:
405 if slice['slice_fields']['name'] == slicename:
407 raise Exception("Cannot locate slice {}".format(slicename))
409 def all_sliver_objs(self):
411 for slice_spec in self.plc_spec['slices']:
412 slicename = slice_spec['slice_fields']['name']
413 for nodename in slice_spec['nodenames']:
414 result.append(self.locate_sliver_obj(nodename, slicename))
417 def locate_sliver_obj(self, nodename, slicename):
418 site,node = self.locate_node(nodename)
419 slice = self.locate_slice(slicename)
421 test_site = TestSite(self, site)
422 test_node = TestNode(self, test_site, node)
423 # xxx the slice site is assumed to be the node site - mhh - probably harmless
424 test_slice = TestSlice(self, test_site, slice)
425 return TestSliver(self, test_node, test_slice)
427 def locate_first_node(self):
428 nodename = self.plc_spec['slices'][0]['nodenames'][0]
429 site,node = self.locate_node(nodename)
430 test_site = TestSite(self, site)
431 test_node = TestNode(self, test_site, node)
434 def locate_first_sliver(self):
435 slice_spec = self.plc_spec['slices'][0]
436 slicename = slice_spec['slice_fields']['name']
437 nodename = slice_spec['nodenames'][0]
438 return self.locate_sliver_obj(nodename,slicename)
440 # all different hostboxes used in this plc
441 def get_BoxNodes(self):
442 # maps on sites and nodes, return [ (host_box,test_node) ]
444 for site_spec in self.plc_spec['sites']:
445 test_site = TestSite(self,site_spec)
446 for node_spec in site_spec['nodes']:
447 test_node = TestNode(self, test_site, node_spec)
448 if not test_node.is_real():
449 tuples.append( (test_node.host_box(),test_node) )
450 # transform into a dict { 'host_box' -> [ test_node .. ] }
452 for (box,node) in tuples:
453 if box not in result:
456 result[box].append(node)
459 # a step for checking this stuff
460 def show_boxes(self):
461 'print summary of nodes location'
462 for box,nodes in self.get_BoxNodes().items():
463 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
466 # make this a valid step
467 def qemu_kill_all(self):
468 'kill all qemu instances on the qemu boxes involved by this setup'
469 # this is the brute force version, kill all qemus on that host box
470 for (box,nodes) in self.get_BoxNodes().items():
471 # pass the first nodename, as we don't push template-qemu on testboxes
472 nodedir = nodes[0].nodedir()
473 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
476 # make this a valid step
477 def qemu_list_all(self):
478 'list all qemu instances on the qemu boxes involved by this setup'
479 for box,nodes in self.get_BoxNodes().items():
480 # this is the brute force version, kill all qemus on that host box
481 TestBoxQemu(box, self.options.buildname).qemu_list_all()
484 # kill only the qemus related to this test
485 def qemu_list_mine(self):
486 'list qemu instances for our nodes'
487 for (box,nodes) in self.get_BoxNodes().items():
488 # the fine-grain version
493 # kill only the qemus related to this test
494 def qemu_clean_mine(self):
495 'cleanup (rm -rf) qemu instances for our nodes'
496 for box,nodes in self.get_BoxNodes().items():
497 # the fine-grain version
502 # kill only the right qemus
503 def qemu_kill_mine(self):
504 'kill the qemu instances for our nodes'
505 for box,nodes in self.get_BoxNodes().items():
506 # the fine-grain version
511 #################### display config
513 "show test configuration after localization"
518 # uggly hack to make sure 'run export' only reports about the 1st plc
519 # to avoid confusion - also we use 'inri_slice1' in various aliases..
522 "print cut'n paste-able stuff to export env variables to your shell"
523 # guess local domain from hostname
524 if TestPlc.exported_id > 1:
525 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
527 TestPlc.exported_id += 1
528 domain = socket.gethostname().split('.',1)[1]
529 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
530 print("export BUILD={}".format(self.options.buildname))
531 print("export PLCHOSTLXC={}".format(fqdn))
532 print("export GUESTNAME={}".format(self.vservername))
533 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
534 # find hostname of first node
535 hostname, qemubox = self.all_node_infos()[0]
536 print("export KVMHOST={}.{}".format(qemubox, domain))
537 print("export NODE={}".format(hostname))
541 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
542 def show_pass(self, passno):
543 for (key,val) in self.plc_spec.items():
544 if not self.options.verbose and key not in TestPlc.always_display_keys:
549 self.display_site_spec(site)
550 for node in site['nodes']:
551 self.display_node_spec(node)
552 elif key == 'initscripts':
553 for initscript in val:
554 self.display_initscript_spec(initscript)
555 elif key == 'slices':
557 self.display_slice_spec(slice)
560 self.display_key_spec(key)
562 if key not in ['sites', 'initscripts', 'slices', 'keys']:
563 print('+ ', key, ':', val)
565 def display_site_spec(self, site):
566 print('+ ======== site', site['site_fields']['name'])
567 for k,v in site.items():
568 if not self.options.verbose and k not in TestPlc.always_display_keys:
572 print('+ ','nodes : ', end=' ')
574 print(node['node_fields']['hostname'],'', end=' ')
578 print('+ users : ', end=' ')
580 print(user['name'],'', end=' ')
582 elif k == 'site_fields':
583 print('+ login_base', ':', v['login_base'])
584 elif k == 'address_fields':
590 def display_initscript_spec(self, initscript):
591 print('+ ======== initscript', initscript['initscript_fields']['name'])
593 def display_key_spec(self, key):
594 print('+ ======== key', key['key_name'])
596 def display_slice_spec(self, slice):
597 print('+ ======== slice', slice['slice_fields']['name'])
598 for k,v in slice.items():
601 print('+ nodes : ', end=' ')
603 print(nodename,'', end=' ')
605 elif k == 'usernames':
607 print('+ users : ', end=' ')
609 print(username,'', end=' ')
611 elif k == 'slice_fields':
612 print('+ fields',':', end=' ')
613 print('max_nodes=',v['max_nodes'], end=' ')
618 def display_node_spec(self, node):
619 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
620 print("hostname=", node['node_fields']['hostname'], end=' ')
621 print("ip=", node['interface_fields']['ip'])
622 if self.options.verbose:
623 utils.pprint("node details", node, depth=3)
625 # another entry point for just showing the boxes involved
626 def display_mapping(self):
627 TestPlc.display_mapping_plc(self.plc_spec)
631 def display_mapping_plc(plc_spec):
632 print('+ MyPLC',plc_spec['name'])
633 # WARNING this would not be right for lxc-based PLC's - should be harmless though
634 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
635 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
636 for site_spec in plc_spec['sites']:
637 for node_spec in site_spec['nodes']:
638 TestPlc.display_mapping_node(node_spec)
641 def display_mapping_node(node_spec):
642 print('+ NODE {}'.format(node_spec['name']))
643 print('+\tqemu box {}'.format(node_spec['host_box']))
644 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
646 # write a timestamp in /vservers/<>.timestamp
647 # cannot be inside the vserver, that causes vserver .. build to cough
648 def plcvm_timestamp(self):
649 "Create a timestamp to remember creation date for this plc"
650 now = int(time.time())
651 # TODO-lxc check this one
652 # a first approx. is to store the timestamp close to the VM root like vs does
653 stamp_path = self.vm_timestamp_path()
654 stamp_dir = os.path.dirname(stamp_path)
655 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
656 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
658 # this is called inconditionnally at the beginning of the test sequence
659 # just in case this is a rerun, so if the vm is not running it's fine
660 def plcvm_delete(self):
661 "vserver delete the test myplc"
662 stamp_path = self.vm_timestamp_path()
663 self.run_in_host("rm -f {}".format(stamp_path))
664 self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
665 self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
666 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
670 # historically the build was being fetched by the tests
671 # now the build pushes itself as a subdir of the tests workdir
672 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
673 def plcvm_create(self):
674 "vserver creation (no install done)"
675 # push the local build/ dir to the testplc box
677 # a full path for the local calls
678 build_dir = os.path.dirname(sys.argv[0])
679 # sometimes this is empty - set to "." in such a case
682 build_dir += "/build"
684 # use a standard name - will be relative to remote buildname
686 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
687 self.test_ssh.rmdir(build_dir)
688 self.test_ssh.copy(build_dir, recursive=True)
689 # the repo url is taken from arch-rpms-url
690 # with the last step (i386) removed
691 repo_url = self.options.arch_rpms_url
692 for level in [ 'arch' ]:
693 repo_url = os.path.dirname(repo_url)
695 # invoke initvm (drop support for vs)
696 script = "lbuild-initvm.sh"
698 # pass the vbuild-nightly options to [lv]test-initvm
699 script_options += " -p {}".format(self.options.personality)
700 script_options += " -d {}".format(self.options.pldistro)
701 script_options += " -f {}".format(self.options.fcdistro)
702 script_options += " -r {}".format(repo_url)
703 vserver_name = self.vservername
705 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
706 script_options += " -n {}".format(vserver_hostname)
708 print("Cannot reverse lookup {}".format(self.vserverip))
709 print("This is considered fatal, as this might pollute the test results")
711 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
712 return self.run_in_host(create_vserver) == 0
715 def plc_install(self):
717 yum install myplc, noderepo
721 if self.options.personality == "linux32":
723 elif self.options.personality == "linux64":
726 raise Exception("Unsupported personality {}".format(self.options.personality))
727 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
730 pkgs_list.append("slicerepo-{}".format(nodefamily))
731 pkgs_list.append("myplc")
732 pkgs_list.append("noderepo-{}".format(nodefamily))
733 pkgs_string=" ".join(pkgs_list)
734 return self.yum_install(pkgs_list)
736 def install_syslinux6(self):
738 install syslinux6 from the fedora21 release
740 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
743 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
744 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
745 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
747 # this can be done several times
748 self.run_in_guest("rpm --import {key}".format(**locals()))
749 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
751 def bonding_builds(self):
753 list /etc/yum.repos.d on the myplc side
755 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
758 def bonding_nodes(self):
760 List nodes known to the myplc together with their nodefamiliy
762 print("---------------------------------------- nodes")
763 for node in self.apiserver.GetNodes(self.auth_root()):
764 print("{} -> {}".format(node['hostname'],
765 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
766 print("---------------------------------------- nodes")
770 def mod_python(self):
771 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
772 return self.yum_install( ['mod_python'] )
775 def plc_configure(self):
777 tmpname = '{}.plc-config-tty'.format(self.name())
778 with open(tmpname,'w') as fileconf:
779 for var, value in self.plc_spec['settings'].items():
780 fileconf.write('e {}\n{}\n'.format(var, value))
781 fileconf.write('w\n')
782 fileconf.write('q\n')
783 utils.system('cat {}'.format(tmpname))
784 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
785 utils.system('rm {}'.format(tmpname))
788 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
789 # however using a vplc guest under f20 requires this trick
790 # the symptom is this: service plc start
791 # Starting plc (via systemctl): Failed to get D-Bus connection: \
792 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
793 # weird thing is the doc says f14 uses upstart by default and not systemd
794 # so this sounds kind of harmless
795 def start_service(self, service):
796 return self.start_stop_service(service, 'start')
797 def stop_service(self, service):
798 return self.start_stop_service(service, 'stop')
800 def start_stop_service(self, service, start_or_stop):
801 "utility to start/stop a service with the special trick starting with f14"
802 has_systemctl = False
803 if self.options.fcdistro[0] == 'f':
804 number = int(self.options.fcdistro[1:])
807 if not has_systemctl:
808 return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
810 # patch /sbin/service so it does not reset environment
811 self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
812 # this is because our own scripts in turn call service
813 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
814 .format(service, start_or_stop)) == 0
818 return self.start_service('plc')
822 return self.stop_service('plc')
824 def plcvm_start(self):
825 "start the PLC vserver"
829 def plcvm_stop(self):
830 "stop the PLC vserver"
834 # stores the keys from the config for further use
835 def keys_store(self):
836 "stores test users ssh keys in keys/"
837 for key_spec in self.plc_spec['keys']:
838 TestKey(self,key_spec).store_key()
841 def keys_clean(self):
842 "removes keys cached in keys/"
843 utils.system("rm -rf ./keys")
846 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
847 # for later direct access to the nodes
848 def keys_fetch(self):
849 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
851 if not os.path.isdir(dir):
853 vservername = self.vservername
854 vm_root = self.vm_root_in_host()
856 prefix = 'debug_ssh_key'
857 for ext in ['pub', 'rsa'] :
858 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
859 dst = "keys/{vservername}-debug.{ext}".format(**locals())
860 if self.test_ssh.fetch(src, dst) != 0:
865 "create sites with PLCAPI"
866 return self.do_sites()
868 def delete_sites(self):
869 "delete sites with PLCAPI"
870 return self.do_sites(action="delete")
872 def do_sites(self, action="add"):
873 for site_spec in self.plc_spec['sites']:
874 test_site = TestSite(self,site_spec)
875 if (action != "add"):
876 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
877 test_site.delete_site()
878 # deleted with the site
879 #test_site.delete_users()
882 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
883 test_site.create_site()
884 test_site.create_users()
887 def delete_all_sites(self):
888 "Delete all sites in PLC, and related objects"
889 print('auth_root', self.auth_root())
890 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
892 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
893 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
895 site_id = site['site_id']
896 print('Deleting site_id', site_id)
897 self.apiserver.DeleteSite(self.auth_root(), site_id)
901 "create nodes with PLCAPI"
902 return self.do_nodes()
903 def delete_nodes(self):
904 "delete nodes with PLCAPI"
905 return self.do_nodes(action="delete")
907 def do_nodes(self, action="add"):
908 for site_spec in self.plc_spec['sites']:
909 test_site = TestSite(self, site_spec)
911 utils.header("Deleting nodes in site {}".format(test_site.name()))
912 for node_spec in site_spec['nodes']:
913 test_node = TestNode(self, test_site, node_spec)
914 utils.header("Deleting {}".format(test_node.name()))
915 test_node.delete_node()
917 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
918 for node_spec in site_spec['nodes']:
919 utils.pprint('Creating node {}'.format(node_spec), node_spec)
920 test_node = TestNode(self, test_site, node_spec)
921 test_node.create_node()
924 def nodegroups(self):
925 "create nodegroups with PLCAPI"
926 return self.do_nodegroups("add")
927 def delete_nodegroups(self):
928 "delete nodegroups with PLCAPI"
929 return self.do_nodegroups("delete")
933 def translate_timestamp(start, grain, timestamp):
934 if timestamp < TestPlc.YEAR:
935 return start + timestamp*grain
940 def timestamp_printable(timestamp):
941 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
944 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
945 now = int(time.time())
946 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
947 print('API answered grain=', grain)
948 start = (now//grain)*grain
950 # find out all nodes that are reservable
951 nodes = self.all_reservable_nodenames()
953 utils.header("No reservable node found - proceeding without leases")
956 # attach them to the leases as specified in plc_specs
957 # this is where the 'leases' field gets interpreted as relative of absolute
958 for lease_spec in self.plc_spec['leases']:
959 # skip the ones that come with a null slice id
960 if not lease_spec['slice']:
962 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
963 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
964 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
965 lease_spec['t_from'], lease_spec['t_until'])
966 if lease_addition['errors']:
967 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
970 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
971 .format(nodes, lease_spec['slice'],
972 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
973 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
977 def delete_leases(self):
978 "remove all leases in the myplc side"
979 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
980 utils.header("Cleaning leases {}".format(lease_ids))
981 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
984 def list_leases(self):
985 "list all leases known to the myplc"
986 leases = self.apiserver.GetLeases(self.auth_root())
987 now = int(time.time())
989 current = l['t_until'] >= now
990 if self.options.verbose or current:
991 utils.header("{} {} from {} until {}"\
992 .format(l['hostname'], l['name'],
993 TestPlc.timestamp_printable(l['t_from']),
994 TestPlc.timestamp_printable(l['t_until'])))
997 # create nodegroups if needed, and populate
998 def do_nodegroups(self, action="add"):
999 # 1st pass to scan contents
1001 for site_spec in self.plc_spec['sites']:
1002 test_site = TestSite(self,site_spec)
1003 for node_spec in site_spec['nodes']:
1004 test_node = TestNode(self, test_site, node_spec)
1005 if 'nodegroups' in node_spec:
1006 nodegroupnames = node_spec['nodegroups']
1007 if isinstance(nodegroupnames, str):
1008 nodegroupnames = [ nodegroupnames ]
1009 for nodegroupname in nodegroupnames:
1010 if nodegroupname not in groups_dict:
1011 groups_dict[nodegroupname] = []
1012 groups_dict[nodegroupname].append(test_node.name())
1013 auth = self.auth_root()
1015 for (nodegroupname,group_nodes) in groups_dict.items():
1017 print('nodegroups:', 'dealing with nodegroup',\
1018 nodegroupname, 'on nodes', group_nodes)
1019 # first, check if the nodetagtype is here
1020 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1022 tag_type_id = tag_types[0]['tag_type_id']
1024 tag_type_id = self.apiserver.AddTagType(auth,
1025 {'tagname' : nodegroupname,
1026 'description' : 'for nodegroup {}'.format(nodegroupname),
1027 'category' : 'test'})
1028 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1030 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1032 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1033 print('created nodegroup', nodegroupname, \
1034 'from tagname', nodegroupname, 'and value', 'yes')
1035 # set node tag on all nodes, value='yes'
1036 for nodename in group_nodes:
1038 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1040 traceback.print_exc()
1041 print('node', nodename, 'seems to already have tag', nodegroupname)
1044 expect_yes = self.apiserver.GetNodeTags(auth,
1045 {'hostname' : nodename,
1046 'tagname' : nodegroupname},
1047 ['value'])[0]['value']
1048 if expect_yes != "yes":
1049 print('Mismatch node tag on node',nodename,'got',expect_yes)
1052 if not self.options.dry_run:
1053 print('Cannot find tag', nodegroupname, 'on node', nodename)
1057 print('cleaning nodegroup', nodegroupname)
1058 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1060 traceback.print_exc()
1064 # a list of TestNode objs
1065 def all_nodes(self):
1067 for site_spec in self.plc_spec['sites']:
1068 test_site = TestSite(self,site_spec)
1069 for node_spec in site_spec['nodes']:
1070 nodes.append(TestNode(self, test_site, node_spec))
1073 # return a list of tuples (nodename,qemuname)
1074 def all_node_infos(self) :
1076 for site_spec in self.plc_spec['sites']:
1077 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1078 for node_spec in site_spec['nodes'] ]
1081 def all_nodenames(self):
1082 return [ x[0] for x in self.all_node_infos() ]
1083 def all_reservable_nodenames(self):
1085 for site_spec in self.plc_spec['sites']:
1086 for node_spec in site_spec['nodes']:
1087 node_fields = node_spec['node_fields']
1088 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1089 res.append(node_fields['hostname'])
1092 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1093 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1094 silent_minutes, period_seconds = 15):
1095 if self.options.dry_run:
1099 class CompleterTaskBootState(CompleterTask):
1100 def __init__(self, test_plc, hostname):
1101 self.test_plc = test_plc
1102 self.hostname = hostname
1103 self.last_boot_state = 'undef'
1104 def actual_run(self):
1106 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1109 self.last_boot_state = node['boot_state']
1110 return self.last_boot_state == target_boot_state
1114 return "CompleterTaskBootState with node {}".format(self.hostname)
1115 def failure_epilogue(self):
1116 print("node {} in state {} - expected {}"\
1117 .format(self.hostname, self.last_boot_state, target_boot_state))
1119 timeout = timedelta(minutes=timeout_minutes)
1120 graceout = timedelta(minutes=silent_minutes)
1121 period = timedelta(seconds=period_seconds)
1122 # the nodes that haven't checked yet - start with a full list and shrink over time
1123 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1124 tasks = [ CompleterTaskBootState(self,hostname) \
1125 for (hostname,_) in self.all_node_infos() ]
1126 message = 'check_boot_state={}'.format(target_boot_state)
1127 return Completer(tasks, message=message).run(timeout, graceout, period)
1129 def nodes_booted(self):
1130 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1132 def probe_kvm_iptables(self):
1133 (_,kvmbox) = self.all_node_infos()[0]
1134 TestSsh(kvmbox).run("iptables-save")
1138 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1139 class CompleterTaskPingNode(CompleterTask):
1140 def __init__(self, hostname):
1141 self.hostname = hostname
1142 def run(self, silent):
1143 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1144 return utils.system(command, silent=silent) == 0
1145 def failure_epilogue(self):
1146 print("Cannot ping node with name {}".format(self.hostname))
1147 timeout = timedelta(seconds = timeout_seconds)
1149 period = timedelta(seconds = period_seconds)
1150 node_infos = self.all_node_infos()
1151 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1152 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1154 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1155 def ping_node(self):
1157 return self.check_nodes_ping()
1159 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1161 timeout = timedelta(minutes=timeout_minutes)
1162 graceout = timedelta(minutes=silent_minutes)
1163 period = timedelta(seconds=period_seconds)
1164 vservername = self.vservername
1167 completer_message = 'ssh_node_debug'
1168 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1171 completer_message = 'ssh_node_boot'
1172 local_key = "keys/key_admin.rsa"
1173 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1174 node_infos = self.all_node_infos()
1175 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1176 boot_state=message, dry_run=self.options.dry_run) \
1177 for (nodename, qemuname) in node_infos ]
1178 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1180 def ssh_node_debug(self):
1181 "Tries to ssh into nodes in debug mode with the debug ssh key"
1182 return self.check_nodes_ssh(debug = True,
1183 timeout_minutes = self.ssh_node_debug_timeout,
1184 silent_minutes = self.ssh_node_debug_silent)
1186 def ssh_node_boot(self):
1187 "Tries to ssh into nodes in production mode with the root ssh key"
1188 return self.check_nodes_ssh(debug = False,
1189 timeout_minutes = self.ssh_node_boot_timeout,
1190 silent_minutes = self.ssh_node_boot_silent)
1192 def node_bmlogs(self):
1193 "Checks that there's a non-empty dir. /var/log/bm/raw"
1194 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1197 def qemu_local_init(self): pass
1199 def bootcd(self): pass
1201 def qemu_local_config(self): pass
1203 def qemu_export(self): pass
1205 def qemu_cleanlog(self): pass
1207 def nodestate_reinstall(self): pass
1209 def nodestate_upgrade(self): pass
1211 def nodestate_safeboot(self): pass
1213 def nodestate_boot(self): pass
1215 def nodestate_show(self): pass
1217 def nodedistro_f14(self): pass
1219 def nodedistro_f18(self): pass
1221 def nodedistro_f20(self): pass
1223 def nodedistro_f21(self): pass
1225 def nodedistro_f22(self): pass
1227 def nodedistro_show(self): pass
1229 ### check hooks : invoke scripts from hooks/{node,slice}
1230 def check_hooks_node(self):
1231 return self.locate_first_node().check_hooks()
1232 def check_hooks_sliver(self) :
1233 return self.locate_first_sliver().check_hooks()
1235 def check_hooks(self):
1236 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1237 return self.check_hooks_node() and self.check_hooks_sliver()
1240 def do_check_initscripts(self):
1241 class CompleterTaskInitscript(CompleterTask):
1242 def __init__(self, test_sliver, stamp):
1243 self.test_sliver = test_sliver
1245 def actual_run(self):
1246 return self.test_sliver.check_initscript_stamp(self.stamp)
1248 return "initscript checker for {}".format(self.test_sliver.name())
1249 def failure_epilogue(self):
1250 print("initscript stamp {} not found in sliver {}"\
1251 .format(self.stamp, self.test_sliver.name()))
1254 for slice_spec in self.plc_spec['slices']:
1255 if 'initscriptstamp' not in slice_spec:
1257 stamp = slice_spec['initscriptstamp']
1258 slicename = slice_spec['slice_fields']['name']
1259 for nodename in slice_spec['nodenames']:
1260 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1261 site,node = self.locate_node(nodename)
1262 # xxx - passing the wrong site - probably harmless
1263 test_site = TestSite(self, site)
1264 test_slice = TestSlice(self, test_site, slice_spec)
1265 test_node = TestNode(self, test_site, node)
1266 test_sliver = TestSliver(self, test_node, test_slice)
1267 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1268 return Completer(tasks, message='check_initscripts').\
1269 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1271 def check_initscripts(self):
1272 "check that the initscripts have triggered"
1273 return self.do_check_initscripts()
1275 def initscripts(self):
1276 "create initscripts with PLCAPI"
1277 for initscript in self.plc_spec['initscripts']:
1278 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1279 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1282 def delete_initscripts(self):
1283 "delete initscripts with PLCAPI"
1284 for initscript in self.plc_spec['initscripts']:
1285 initscript_name = initscript['initscript_fields']['name']
1286 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1288 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1289 print(initscript_name, 'deleted')
1291 print('deletion went wrong - probably did not exist')
1296 "create slices with PLCAPI"
1297 return self.do_slices(action="add")
1299 def delete_slices(self):
1300 "delete slices with PLCAPI"
1301 return self.do_slices(action="delete")
1303 def fill_slices(self):
1304 "add nodes in slices with PLCAPI"
1305 return self.do_slices(action="fill")
1307 def empty_slices(self):
1308 "remove nodes from slices with PLCAPI"
1309 return self.do_slices(action="empty")
1311 def do_slices(self, action="add"):
1312 for slice in self.plc_spec['slices']:
1313 site_spec = self.locate_site(slice['sitename'])
1314 test_site = TestSite(self,site_spec)
1315 test_slice=TestSlice(self,test_site,slice)
1316 if action == "delete":
1317 test_slice.delete_slice()
1318 elif action == "fill":
1319 test_slice.add_nodes()
1320 elif action == "empty":
1321 test_slice.delete_nodes()
1323 test_slice.create_slice()
1326 @slice_mapper__tasks(20, 10, 15)
1327 def ssh_slice(self): pass
1328 @slice_mapper__tasks(20, 19, 15)
1329 def ssh_slice_off(self): pass
1330 @slice_mapper__tasks(1, 1, 15)
1331 def slice_fs_present(self): pass
1332 @slice_mapper__tasks(1, 1, 15)
1333 def slice_fs_deleted(self): pass
1335 # use another name so we can exclude/ignore it from the tests on the nightly command line
1336 def ssh_slice_again(self): return self.ssh_slice()
1337 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1338 # but for some reason the ignore-wrapping thing would not
1341 def ssh_slice_basics(self): pass
1343 def check_vsys_defaults(self): pass
1346 def keys_clear_known_hosts(self): pass
1348 def plcapi_urls(self):
1350 attempts to reach the PLCAPI with various forms for the URL
1352 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1354 def speed_up_slices(self):
1355 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1356 return self._speed_up_slices (30, 10)
1357 def super_speed_up_slices(self):
1358 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1359 return self._speed_up_slices(5, 1)
1361 def _speed_up_slices(self, p, r):
1362 # create the template on the server-side
1363 template = "{}.nodemanager".format(self.name())
1364 with open(template,"w") as template_file:
1365 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1366 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1367 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1368 self.test_ssh.copy_abs(template, remote)
1370 if not self.apiserver.GetConfFiles(self.auth_root(),
1371 {'dest' : '/etc/sysconfig/nodemanager'}):
1372 self.apiserver.AddConfFile(self.auth_root(),
1373 {'dest' : '/etc/sysconfig/nodemanager',
1374 'source' : 'PlanetLabConf/nodemanager',
1375 'postinstall_cmd' : 'service nm restart',})
1378 def debug_nodemanager(self):
1379 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1380 template = "{}.nodemanager".format(self.name())
1381 with open(template,"w") as template_file:
1382 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1383 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1384 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1385 self.test_ssh.copy_abs(template, remote)
1389 def qemu_start(self) : pass
1392 def qemu_timestamp(self) : pass
1395 def qemu_nodefamily(self): pass
1397 # when a spec refers to a node possibly on another plc
1398 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1399 for plc in [ self ] + other_plcs:
1401 return plc.locate_sliver_obj(nodename, slicename)
1404 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1406 # implement this one as a cross step so that we can take advantage of different nodes
1407 # in multi-plcs mode
1408 def cross_check_tcp(self, other_plcs):
1409 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1410 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1411 utils.header("check_tcp: no/empty config found")
1413 specs = self.plc_spec['tcp_specs']
1416 # first wait for the network to be up and ready from the slices
1417 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1418 def __init__(self, test_sliver):
1419 self.test_sliver = test_sliver
1420 def actual_run(self):
1421 return self.test_sliver.check_tcp_ready(port = 9999)
1423 return "network ready checker for {}".format(self.test_sliver.name())
1424 def failure_epilogue(self):
1425 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1429 managed_sliver_names = set()
1431 # locate the TestSliver instances involved, and cache them in the spec instance
1432 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1433 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1434 message = "Will check TCP between s={} and c={}"\
1435 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1436 if 'client_connect' in spec:
1437 message += " (using {})".format(spec['client_connect'])
1438 utils.header(message)
1439 # we need to check network presence in both slivers, but also
1440 # avoid to insert a sliver several times
1441 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1442 if sliver.name() not in managed_sliver_names:
1443 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1444 # add this sliver's name in the set
1445 managed_sliver_names .update( {sliver.name()} )
1447 # wait for the netork to be OK in all server sides
1448 if not Completer(tasks, message='check for network readiness in slivers').\
1449 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1452 # run server and client
1456 # the issue here is that we have the server run in background
1457 # and so we have no clue if it took off properly or not
1458 # looks like in some cases it does not
1459 if not spec['s_sliver'].run_tcp_server(port, timeout=20):
1463 # idem for the client side
1464 # use nodename from located sliver, unless 'client_connect' is set
1465 if 'client_connect' in spec:
1466 destination = spec['client_connect']
1468 destination = spec['s_sliver'].test_node.name()
1469 if not spec['c_sliver'].run_tcp_client(destination, port):
1473 # painfully enough, we need to allow for some time as netflow might show up last
1474 def check_system_slice(self):
1475 "all nodes: check that a system slice is alive"
1476 # netflow currently not working in the lxc distro
1477 # drl not built at all in the wtx distro
1478 # if we find either of them we're happy
1479 return self.check_netflow() or self.check_drl()
1482 def check_netflow(self): return self._check_system_slice('netflow')
1483 def check_drl(self): return self._check_system_slice('drl')
1485 # we have the slices up already here, so it should not take too long
1486 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1487 class CompleterTaskSystemSlice(CompleterTask):
1488 def __init__(self, test_node, dry_run):
1489 self.test_node = test_node
1490 self.dry_run = dry_run
1491 def actual_run(self):
1492 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1494 return "System slice {} @ {}".format(slicename, self.test_node.name())
1495 def failure_epilogue(self):
1496 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1497 timeout = timedelta(minutes=timeout_minutes)
1498 silent = timedelta(0)
1499 period = timedelta(seconds=period_seconds)
1500 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1501 for test_node in self.all_nodes() ]
1502 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1504 def plcsh_stress_test(self):
1505 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1506 # install the stress-test in the plc image
1507 location = "/usr/share/plc_api/plcsh_stress_test.py"
1508 remote = "{}/{}".format(self.vm_root_in_host(), location)
1509 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1511 command += " -- --check"
1512 if self.options.size == 1:
1513 command += " --tiny"
1514 return self.run_in_guest(command) == 0
1516 # populate runs the same utility without slightly different options
1517 # in particular runs with --preserve (dont cleanup) and without --check
1518 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1520 def sfa_install_all(self):
1521 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1522 return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
1524 def sfa_install_core(self):
1526 return self.yum_install("sfa")
1528 def sfa_install_plc(self):
1529 "yum install sfa-plc"
1530 return self.yum_install("sfa-plc")
1532 def sfa_install_sfatables(self):
1533 "yum install sfa-sfatables"
1534 return self.yum_install("sfa-sfatables")
1536 # for some very odd reason, this sometimes fails with the following symptom
1537 # # yum install sfa-client
1538 # Setting up Install Process
1540 # Downloading Packages:
1541 # Running rpm_check_debug
1542 # Running Transaction Test
1543 # Transaction Test Succeeded
1544 # Running Transaction
1545 # Transaction couldn't start:
1546 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1547 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1548 # even though in the same context I have
1549 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1550 # Filesystem Size Used Avail Use% Mounted on
1551 # /dev/hdv1 806G 264G 501G 35% /
1552 # none 16M 36K 16M 1% /tmp
1554 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1555 def sfa_install_client(self):
1556 "yum install sfa-client"
1557 first_try = self.yum_install("sfa-client")
1560 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1561 code, cached_rpm_path = \
1562 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1563 utils.header("rpm_path=<<{}>>".format(rpm_path))
1565 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1566 return self.yum_check_installed("sfa-client")
1568 def sfa_dbclean(self):
1569 "thoroughly wipes off the SFA database"
1570 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1571 self.run_in_guest("sfa-nuke.py") == 0 or \
1572 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1573 self.run_in_guest("sfaadmin registry nuke") == 0
1575 def sfa_fsclean(self):
1576 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1577 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1580 def sfa_plcclean(self):
1581 "cleans the PLC entries that were created as a side effect of running the script"
1583 sfa_spec = self.plc_spec['sfa']
1585 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1586 login_base = auth_sfa_spec['login_base']
1588 self.apiserver.DeleteSite(self.auth_root(),login_base)
1590 print("Site {} already absent from PLC db".format(login_base))
1592 for spec_name in ['pi_spec','user_spec']:
1593 user_spec = auth_sfa_spec[spec_name]
1594 username = user_spec['email']
1596 self.apiserver.DeletePerson(self.auth_root(),username)
1598 # this in fact is expected as sites delete their members
1599 #print "User {} already absent from PLC db".format(username)
1602 print("REMEMBER TO RUN sfa_import AGAIN")
1605 def sfa_uninstall(self):
1606 "uses rpm to uninstall sfa - ignore result"
1607 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1608 self.run_in_guest("rm -rf /var/lib/sfa")
1609 self.run_in_guest("rm -rf /etc/sfa")
1610 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1612 self.run_in_guest("rpm -e --noscripts sfa-plc")
1615 ### run unit tests for SFA
1616 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1617 # Running Transaction
1618 # Transaction couldn't start:
1619 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1620 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1621 # no matter how many Gbs are available on the testplc
1622 # could not figure out what's wrong, so...
1623 # if the yum install phase fails, consider the test is successful
1624 # other combinations will eventually run it hopefully
1625 def sfa_utest(self):
1626 "yum install sfa-tests and run SFA unittests"
1627 self.run_in_guest("yum -y install sfa-tests")
1628 # failed to install - forget it
1629 if self.run_in_guest("rpm -q sfa-tests") != 0:
1630 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1632 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1636 dirname = "conf.{}".format(self.plc_spec['name'])
1637 if not os.path.isdir(dirname):
1638 utils.system("mkdir -p {}".format(dirname))
1639 if not os.path.isdir(dirname):
1640 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1643 def conffile(self, filename):
1644 return "{}/{}".format(self.confdir(), filename)
1645 def confsubdir(self, dirname, clean, dry_run=False):
1646 subdirname = "{}/{}".format(self.confdir(), dirname)
1648 utils.system("rm -rf {}".format(subdirname))
1649 if not os.path.isdir(subdirname):
1650 utils.system("mkdir -p {}".format(subdirname))
1651 if not dry_run and not os.path.isdir(subdirname):
1652 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1655 def conffile_clean(self, filename):
1656 filename=self.conffile(filename)
1657 return utils.system("rm -rf {}".format(filename))==0
1660 def sfa_configure(self):
1661 "run sfa-config-tty"
1662 tmpname = self.conffile("sfa-config-tty")
1663 with open(tmpname,'w') as fileconf:
1664 for var, value in self.plc_spec['sfa']['settings'].items():
1665 fileconf.write('e {}\n{}\n'.format(var, value))
1666 fileconf.write('w\n')
1667 fileconf.write('R\n')
1668 fileconf.write('q\n')
1669 utils.system('cat {}'.format(tmpname))
1670 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1673 def aggregate_xml_line(self):
1674 port = self.plc_spec['sfa']['neighbours-port']
1675 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1676 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1678 def registry_xml_line(self):
1679 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1680 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1683 # a cross step that takes all other plcs in argument
1684 def cross_sfa_configure(self, other_plcs):
1685 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1686 # of course with a single plc, other_plcs is an empty list
1689 agg_fname = self.conffile("agg.xml")
1690 with open(agg_fname,"w") as out:
1691 out.write("<aggregates>{}</aggregates>\n"\
1692 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1693 utils.header("(Over)wrote {}".format(agg_fname))
1694 reg_fname=self.conffile("reg.xml")
1695 with open(reg_fname,"w") as out:
1696 out.write("<registries>{}</registries>\n"\
1697 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1698 utils.header("(Over)wrote {}".format(reg_fname))
1699 return self.test_ssh.copy_abs(agg_fname,
1700 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1701 and self.test_ssh.copy_abs(reg_fname,
1702 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1704 def sfa_import(self):
1705 "use sfaadmin to import from plc"
1706 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1707 return self.run_in_guest('sfaadmin reg import_registry') == 0
1709 def sfa_start(self):
1711 return self.start_service('sfa')
1714 def sfi_configure(self):
1715 "Create /root/sfi on the plc side for sfi client configuration"
1716 if self.options.dry_run:
1717 utils.header("DRY RUN - skipping step")
1719 sfa_spec = self.plc_spec['sfa']
1720 # cannot use auth_sfa_mapper to pass dir_name
1721 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1722 test_slice = TestAuthSfa(self, slice_spec)
1723 dir_basename = os.path.basename(test_slice.sfi_path())
1724 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1725 clean=True, dry_run=self.options.dry_run)
1726 test_slice.sfi_configure(dir_name)
1727 # push into the remote /root/sfi area
1728 location = test_slice.sfi_path()
1729 remote = "{}/{}".format(self.vm_root_in_host(), location)
1730 self.test_ssh.mkdir(remote, abs=True)
1731 # need to strip last level or remote otherwise we get an extra dir level
1732 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1736 def sfi_clean(self):
1737 "clean up /root/sfi on the plc side"
1738 self.run_in_guest("rm -rf /root/sfi")
1741 def sfa_rspec_empty(self):
1742 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1743 filename = "empty-rspec.xml"
1745 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1746 test_slice = TestAuthSfa(self, slice_spec)
1747 in_vm = test_slice.sfi_path()
1748 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1749 if self.test_ssh.copy_abs(filename, remote) !=0:
1754 def sfa_register_site(self): pass
1756 def sfa_register_pi(self): pass
1758 def sfa_register_user(self): pass
1760 def sfa_update_user(self): pass
1762 def sfa_register_slice(self): pass
1764 def sfa_renew_slice(self): pass
1766 def sfa_get_expires(self): pass
1768 def sfa_discover(self): pass
1770 def sfa_rspec(self): pass
1772 def sfa_allocate(self): pass
1774 def sfa_allocate_empty(self): pass
1776 def sfa_provision(self): pass
1778 def sfa_provision_empty(self): pass
1780 def sfa_describe(self): pass
1782 def sfa_check_slice_plc(self): pass
1784 def sfa_check_slice_plc_empty(self): pass
1786 def sfa_update_slice(self): pass
1788 def sfa_remove_user_from_slice(self): pass
1790 def sfa_insert_user_in_slice(self): pass
1792 def sfi_list(self): pass
1794 def sfi_show_site(self): pass
1796 def sfi_show_slice(self): pass
1798 def sfi_show_slice_researchers(self): pass
1800 def ssh_slice_sfa(self): pass
1802 def sfa_delete_user(self): pass
1804 def sfa_delete_slice(self): pass
1808 return self.stop_service('sfa')
1811 "creates random entries in the PLCAPI"
1812 # install the stress-test in the plc image
1813 location = "/usr/share/plc_api/plcsh_stress_test.py"
1814 remote = "{}/{}".format(self.vm_root_in_host(), location)
1815 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1817 command += " -- --preserve --short-names"
1818 local = (self.run_in_guest(command) == 0);
1819 # second run with --foreign
1820 command += ' --foreign'
1821 remote = (self.run_in_guest(command) == 0);
1822 return local and remote
1825 ####################
1827 def bonding_init_partial(self): pass
1830 def bonding_add_yum(self): pass
1833 def bonding_install_rpms(self): pass
1835 ####################
1837 def gather_logs(self):
1838 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1839 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1840 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1841 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1842 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1843 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1844 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1846 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1847 self.gather_var_logs()
1849 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1850 self.gather_pgsql_logs()
1852 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1853 self.gather_root_sfi()
1855 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1856 for site_spec in self.plc_spec['sites']:
1857 test_site = TestSite(self,site_spec)
1858 for node_spec in site_spec['nodes']:
1859 test_node = TestNode(self, test_site, node_spec)
1860 test_node.gather_qemu_logs()
1862 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1863 self.gather_nodes_var_logs()
1865 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1866 self.gather_slivers_var_logs()
1869 def gather_slivers_var_logs(self):
1870 for test_sliver in self.all_sliver_objs():
1871 remote = test_sliver.tar_var_logs()
1872 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1873 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1874 utils.system(command)
1877 def gather_var_logs(self):
1878 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1879 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1880 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1881 utils.system(command)
1882 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1883 utils.system(command)
1885 def gather_pgsql_logs(self):
1886 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1887 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1888 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1889 utils.system(command)
1891 def gather_root_sfi(self):
1892 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1893 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1894 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1895 utils.system(command)
1897 def gather_nodes_var_logs(self):
1898 for site_spec in self.plc_spec['sites']:
1899 test_site = TestSite(self, site_spec)
1900 for node_spec in site_spec['nodes']:
1901 test_node = TestNode(self, test_site, node_spec)
1902 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1903 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1904 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1905 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1906 utils.system(command)
1909 # returns the filename to use for sql dump/restore, using options.dbname if set
1910 def dbfile(self, database):
1911 # uses options.dbname if it is found
1913 name = self.options.dbname
1914 if not isinstance(name, str):
1920 return "/root/{}-{}.sql".format(database, name)
1922 def plc_db_dump(self):
1923 'dump the planetlab5 DB in /root in the PLC - filename has time'
1924 dump=self.dbfile("planetab5")
1925 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1926 utils.header('Dumped planetlab5 database in {}'.format(dump))
1929 def plc_db_restore(self):
1930 'restore the planetlab5 DB - looks broken, but run -n might help'
1931 dump = self.dbfile("planetab5")
1932 ##stop httpd service
1933 self.run_in_guest('service httpd stop')
1934 # xxx - need another wrapper
1935 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1936 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1937 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1938 ##starting httpd service
1939 self.run_in_guest('service httpd start')
1941 utils.header('Database restored from ' + dump)
1944 def create_ignore_steps():
1945 for step in TestPlc.default_steps + TestPlc.other_steps:
1946 # default step can have a plc qualifier
1948 step, qualifier = step.split('@')
1949 # or be defined as forced or ignored by default
1950 for keyword in ['_ignore','_force']:
1951 if step.endswith(keyword):
1952 step=step.replace(keyword,'')
1953 if step == SEP or step == SEPSFA :
1955 method = getattr(TestPlc,step)
1956 name = step + '_ignore'
1957 wrapped = ignore_result(method)
1958 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1959 setattr(TestPlc, name, wrapped)
1962 # def ssh_slice_again_ignore (self): pass
1964 # def check_initscripts_ignore (self): pass
1966 def standby_1_through_20(self):
1967 """convenience function to wait for a specified number of minutes"""
1970 def standby_1(): pass
1972 def standby_2(): pass
1974 def standby_3(): pass
1976 def standby_4(): pass
1978 def standby_5(): pass
1980 def standby_6(): pass
1982 def standby_7(): pass
1984 def standby_8(): pass
1986 def standby_9(): pass
1988 def standby_10(): pass
1990 def standby_11(): pass
1992 def standby_12(): pass
1994 def standby_13(): pass
1996 def standby_14(): pass
1998 def standby_15(): pass
2000 def standby_16(): pass
2002 def standby_17(): pass
2004 def standby_18(): pass
2006 def standby_19(): pass
2008 def standby_20(): pass
2010 # convenience for debugging the test logic
2011 def yes(self): return True
2012 def no(self): return False
2013 def fail(self): return False