1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls','speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
166 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
167 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
168 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
169 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
170 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
171 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
172 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
173 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
174 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
175 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
176 # but as the stress test might take a while, we sometimes missed the debug mode..
177 'probe_kvm_iptables',
178 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
179 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
180 'ssh_slice_sfa@1', SEPSFA,
181 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
182 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
183 'cross_check_tcp@1', 'check_system_slice', SEP,
184 # for inspecting the slice while it runs the first time
186 # check slices are turned off properly
187 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
188 # check they are properly re-created with the same name
189 'fill_slices', 'ssh_slice_again', SEP,
190 'gather_logs_force', SEP,
193 'export', 'show_boxes', 'super_speed_up_slices', SEP,
194 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
195 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
196 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
197 'delete_leases', 'list_leases', SEP,
199 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
200 'nodeflavour_show','nodedistro_f14','nodedistro_f18', 'nodedistro_f20', 'nodedistro_f21', SEP,
201 'nodeplain_on','nodeplain_off','nodeplain_show', SEP,
202 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
203 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
204 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
205 'sfa_get_expires', SEPSFA,
206 'plc_db_dump' , 'plc_db_restore', SEP,
207 'check_netflow','check_drl', SEP,
208 'debug_nodemanager', 'slice_fs_present', SEP,
209 'standby_1_through_20','yes','no',SEP,
210 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
212 default_bonding_steps = [
213 'bonding_init_partial',
215 'bonding_install_rpms', SEP,
219 def printable_steps(list):
220 single_line = " ".join(list) + " "
221 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
223 def valid_step(step):
224 return step != SEP and step != SEPSFA
226 # turn off the sfa-related steps when build has skipped SFA
227 # this was originally for centos5 but is still valid
228 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
230 def _has_sfa_cached(rpms_url):
231 if os.path.isfile(has_sfa_cache_filename):
232 with open(has_sfa_cache_filename) as cache:
233 cached = cache.read() == "yes"
234 utils.header("build provides SFA (cached):{}".format(cached))
236 # warning, we're now building 'sface' so let's be a bit more picky
237 # full builds are expected to return with 0 here
238 utils.header("Checking if build provides SFA package...")
239 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
240 encoded = 'yes' if retcod else 'no'
241 with open(has_sfa_cache_filename,'w') as cache:
246 def check_whether_build_has_sfa(rpms_url):
247 has_sfa = TestPlc._has_sfa_cached(rpms_url)
249 utils.header("build does provide SFA")
251 # move all steps containing 'sfa' from default_steps to other_steps
252 utils.header("SFA package not found - removing steps with sfa or sfi")
253 sfa_steps = [ step for step in TestPlc.default_steps
254 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
255 TestPlc.other_steps += sfa_steps
256 for step in sfa_steps:
257 TestPlc.default_steps.remove(step)
259 def __init__(self, plc_spec, options):
260 self.plc_spec = plc_spec
261 self.options = options
262 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
263 self.vserverip = plc_spec['vserverip']
264 self.vservername = plc_spec['vservername']
265 self.vplchostname = self.vservername.split('-')[-1]
266 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
267 self.apiserver = TestApiserver(self.url, options.dry_run)
268 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
269 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
271 def has_addresses_api(self):
272 return self.apiserver.has_method('AddIpAddress')
275 name = self.plc_spec['name']
276 return "{}.{}".format(name,self.vservername)
279 return self.plc_spec['host_box']
282 return self.test_ssh.is_local()
284 # define the API methods on this object through xmlrpc
285 # would help, but not strictly necessary
289 def actual_command_in_guest(self,command, backslash=False):
290 raw1 = self.host_to_guest(command)
291 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
294 def start_guest(self):
295 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
296 dry_run=self.options.dry_run))
298 def stop_guest(self):
299 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
300 dry_run=self.options.dry_run))
302 def run_in_guest(self, command, backslash=False):
303 raw = self.actual_command_in_guest(command, backslash)
304 return utils.system(raw)
306 def run_in_host(self,command):
307 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
309 # backslashing turned out so awful at some point that I've turned off auto-backslashing
310 # see e.g. plc_start esp. the version for f14
311 #command gets run in the plc's vm
312 def host_to_guest(self, command):
313 ssh_leg = TestSsh(self.vplchostname)
314 return ssh_leg.actual_command(command, keep_stdin=True)
316 # this /vservers thing is legacy...
317 def vm_root_in_host(self):
318 return "/vservers/{}/".format(self.vservername)
320 def vm_timestamp_path(self):
321 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
323 #start/stop the vserver
324 def start_guest_in_host(self):
325 return "virsh -c lxc:/// start {}".format(self.vservername)
327 def stop_guest_in_host(self):
328 return "virsh -c lxc:/// destroy {}".format(self.vservername)
331 def run_in_guest_piped(self,local,remote):
332 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
335 def yum_check_installed(self, rpms):
336 if isinstance(rpms, list):
338 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
340 # does a yum install in the vs, ignore yum retcod, check with rpm
341 def yum_install(self, rpms):
342 if isinstance(rpms, list):
344 self.run_in_guest("yum -y install {}".format(rpms))
345 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
346 self.run_in_guest("yum-complete-transaction -y")
347 return self.yum_check_installed(rpms)
350 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
351 'AuthMethod' : 'password',
352 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
353 'Role' : self.plc_spec['role'],
356 def locate_site(self,sitename):
357 for site in self.plc_spec['sites']:
358 if site['site_fields']['name'] == sitename:
360 if site['site_fields']['login_base'] == sitename:
362 raise Exception("Cannot locate site {}".format(sitename))
364 def locate_node(self, nodename):
365 for site in self.plc_spec['sites']:
366 for node in site['nodes']:
367 if node['name'] == nodename:
369 raise Exception("Cannot locate node {}".format(nodename))
371 def locate_hostname(self, hostname):
372 for site in self.plc_spec['sites']:
373 for node in site['nodes']:
374 if node['node_fields']['hostname'] == hostname:
376 raise Exception("Cannot locate hostname {}".format(hostname))
378 def locate_key(self, key_name):
379 for key in self.plc_spec['keys']:
380 if key['key_name'] == key_name:
382 raise Exception("Cannot locate key {}".format(key_name))
384 def locate_private_key_from_key_names(self, key_names):
385 # locate the first avail. key
387 for key_name in key_names:
388 key_spec = self.locate_key(key_name)
389 test_key = TestKey(self,key_spec)
390 publickey = test_key.publicpath()
391 privatekey = test_key.privatepath()
392 if os.path.isfile(publickey) and os.path.isfile(privatekey):
399 def locate_slice(self, slicename):
400 for slice in self.plc_spec['slices']:
401 if slice['slice_fields']['name'] == slicename:
403 raise Exception("Cannot locate slice {}".format(slicename))
405 def all_sliver_objs(self):
407 for slice_spec in self.plc_spec['slices']:
408 slicename = slice_spec['slice_fields']['name']
409 for nodename in slice_spec['nodenames']:
410 result.append(self.locate_sliver_obj(nodename, slicename))
413 def locate_sliver_obj(self, nodename, slicename):
414 site,node = self.locate_node(nodename)
415 slice = self.locate_slice(slicename)
417 test_site = TestSite(self, site)
418 test_node = TestNode(self, test_site, node)
419 # xxx the slice site is assumed to be the node site - mhh - probably harmless
420 test_slice = TestSlice(self, test_site, slice)
421 return TestSliver(self, test_node, test_slice)
423 def locate_first_node(self):
424 nodename = self.plc_spec['slices'][0]['nodenames'][0]
425 site,node = self.locate_node(nodename)
426 test_site = TestSite(self, site)
427 test_node = TestNode(self, test_site, node)
430 def locate_first_sliver(self):
431 slice_spec = self.plc_spec['slices'][0]
432 slicename = slice_spec['slice_fields']['name']
433 nodename = slice_spec['nodenames'][0]
434 return self.locate_sliver_obj(nodename,slicename)
436 # all different hostboxes used in this plc
437 def get_BoxNodes(self):
438 # maps on sites and nodes, return [ (host_box,test_node) ]
440 for site_spec in self.plc_spec['sites']:
441 test_site = TestSite(self,site_spec)
442 for node_spec in site_spec['nodes']:
443 test_node = TestNode(self, test_site, node_spec)
444 if not test_node.is_real():
445 tuples.append( (test_node.host_box(),test_node) )
446 # transform into a dict { 'host_box' -> [ test_node .. ] }
448 for (box,node) in tuples:
449 if box not in result:
452 result[box].append(node)
455 # a step for checking this stuff
456 def show_boxes(self):
457 'print summary of nodes location'
458 for box,nodes in self.get_BoxNodes().items():
459 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
462 # make this a valid step
463 def qemu_kill_all(self):
464 'kill all qemu instances on the qemu boxes involved by this setup'
465 # this is the brute force version, kill all qemus on that host box
466 for (box,nodes) in self.get_BoxNodes().items():
467 # pass the first nodename, as we don't push template-qemu on testboxes
468 nodedir = nodes[0].nodedir()
469 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
472 # make this a valid step
473 def qemu_list_all(self):
474 'list all qemu instances on the qemu boxes involved by this setup'
475 for box,nodes in self.get_BoxNodes().items():
476 # this is the brute force version, kill all qemus on that host box
477 TestBoxQemu(box, self.options.buildname).qemu_list_all()
480 # kill only the qemus related to this test
481 def qemu_list_mine(self):
482 'list qemu instances for our nodes'
483 for (box,nodes) in self.get_BoxNodes().items():
484 # the fine-grain version
489 # kill only the qemus related to this test
490 def qemu_clean_mine(self):
491 'cleanup (rm -rf) qemu instances for our nodes'
492 for box,nodes in self.get_BoxNodes().items():
493 # the fine-grain version
498 # kill only the right qemus
499 def qemu_kill_mine(self):
500 'kill the qemu instances for our nodes'
501 for box,nodes in self.get_BoxNodes().items():
502 # the fine-grain version
507 #################### display config
509 "show test configuration after localization"
514 # uggly hack to make sure 'run export' only reports about the 1st plc
515 # to avoid confusion - also we use 'inri_slice1' in various aliases..
518 "print cut'n paste-able stuff to export env variables to your shell"
519 # guess local domain from hostname
520 if TestPlc.exported_id > 1:
521 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
523 TestPlc.exported_id += 1
524 domain = socket.gethostname().split('.',1)[1]
525 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
526 print("export BUILD={}".format(self.options.buildname))
527 print("export PLCHOSTLXC={}".format(fqdn))
528 print("export GUESTNAME={}".format(self.vservername))
529 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
530 # find hostname of first node
531 hostname, qemubox = self.all_node_infos()[0]
532 print("export KVMHOST={}.{}".format(qemubox, domain))
533 print("export NODE={}".format(hostname))
537 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
538 def show_pass(self, passno):
539 for (key,val) in self.plc_spec.items():
540 if not self.options.verbose and key not in TestPlc.always_display_keys:
545 self.display_site_spec(site)
546 for node in site['nodes']:
547 self.display_node_spec(node)
548 elif key == 'initscripts':
549 for initscript in val:
550 self.display_initscript_spec(initscript)
551 elif key == 'slices':
553 self.display_slice_spec(slice)
556 self.display_key_spec(key)
558 if key not in ['sites', 'initscripts', 'slices', 'keys']:
559 print('+ ', key, ':', val)
561 def display_site_spec(self, site):
562 print('+ ======== site', site['site_fields']['name'])
563 for k,v in site.items():
564 if not self.options.verbose and k not in TestPlc.always_display_keys:
568 print('+ ','nodes : ', end=' ')
570 print(node['node_fields']['hostname'],'', end=' ')
574 print('+ users : ', end=' ')
576 print(user['name'],'', end=' ')
578 elif k == 'site_fields':
579 print('+ login_base', ':', v['login_base'])
580 elif k == 'address_fields':
586 def display_initscript_spec(self, initscript):
587 print('+ ======== initscript', initscript['initscript_fields']['name'])
589 def display_key_spec(self, key):
590 print('+ ======== key', key['key_name'])
592 def display_slice_spec(self, slice):
593 print('+ ======== slice', slice['slice_fields']['name'])
594 for k,v in slice.items():
597 print('+ nodes : ', end=' ')
599 print(nodename,'', end=' ')
601 elif k == 'usernames':
603 print('+ users : ', end=' ')
605 print(username,'', end=' ')
607 elif k == 'slice_fields':
608 print('+ fields',':', end=' ')
609 print('max_nodes=',v['max_nodes'], end=' ')
614 def display_node_spec(self, node):
615 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
616 print("hostname=", node['node_fields']['hostname'], end=' ')
617 print("ip=", node['interface_fields']['ip'])
618 if self.options.verbose:
619 utils.pprint("node details", node, depth=3)
621 # another entry point for just showing the boxes involved
622 def display_mapping(self):
623 TestPlc.display_mapping_plc(self.plc_spec)
627 def display_mapping_plc(plc_spec):
628 print('+ MyPLC',plc_spec['name'])
629 # WARNING this would not be right for lxc-based PLC's - should be harmless though
630 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
631 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
632 for site_spec in plc_spec['sites']:
633 for node_spec in site_spec['nodes']:
634 TestPlc.display_mapping_node(node_spec)
637 def display_mapping_node(node_spec):
638 print('+ NODE {}'.format(node_spec['name']))
639 print('+\tqemu box {}'.format(node_spec['host_box']))
640 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
642 # write a timestamp in /vservers/<>.timestamp
643 # cannot be inside the vserver, that causes vserver .. build to cough
644 def plcvm_timestamp(self):
645 "Create a timestamp to remember creation date for this plc"
646 now = int(time.time())
647 # TODO-lxc check this one
648 # a first approx. is to store the timestamp close to the VM root like vs does
649 stamp_path = self.vm_timestamp_path()
650 stamp_dir = os.path.dirname(stamp_path)
651 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
652 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
654 # this is called inconditionnally at the beginning of the test sequence
655 # just in case this is a rerun, so if the vm is not running it's fine
656 def plcvm_delete(self):
657 "vserver delete the test myplc"
658 stamp_path = self.vm_timestamp_path()
659 self.run_in_host("rm -f {}".format(stamp_path))
660 self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
661 self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
662 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
666 # historically the build was being fetched by the tests
667 # now the build pushes itself as a subdir of the tests workdir
668 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
669 def plcvm_create(self):
670 "vserver creation (no install done)"
671 # push the local build/ dir to the testplc box
673 # a full path for the local calls
674 build_dir = os.path.dirname(sys.argv[0])
675 # sometimes this is empty - set to "." in such a case
678 build_dir += "/build"
680 # use a standard name - will be relative to remote buildname
682 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
683 self.test_ssh.rmdir(build_dir)
684 self.test_ssh.copy(build_dir, recursive=True)
685 # the repo url is taken from arch-rpms-url
686 # with the last step (i386) removed
687 repo_url = self.options.arch_rpms_url
688 for level in [ 'arch' ]:
689 repo_url = os.path.dirname(repo_url)
691 # invoke initvm (drop support for vs)
692 script = "lbuild-initvm.sh"
694 # pass the vbuild-nightly options to [lv]test-initvm
695 script_options += " -p {}".format(self.options.personality)
696 script_options += " -d {}".format(self.options.pldistro)
697 script_options += " -f {}".format(self.options.fcdistro)
698 script_options += " -r {}".format(repo_url)
699 vserver_name = self.vservername
701 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
702 script_options += " -n {}".format(vserver_hostname)
704 print("Cannot reverse lookup {}".format(self.vserverip))
705 print("This is considered fatal, as this might pollute the test results")
707 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
708 return self.run_in_host(create_vserver) == 0
711 def plc_install(self):
713 yum install myplc, noderepo + plain bootstrapfs as well
717 if self.options.personality == "linux32":
719 elif self.options.personality == "linux64":
722 raise Exception("Unsupported personality {}".format(self.options.personality))
723 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
726 pkgs_list.append("slicerepo-{}".format(nodefamily))
727 pkgs_list.append("myplc")
728 pkgs_list.append("noderepo-{}".format(nodefamily))
729 pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
730 pkgs_string=" ".join(pkgs_list)
731 return self.yum_install(pkgs_list)
733 def install_syslinux6(self):
735 install syslinux6 from the fedora21 release
737 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
740 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
741 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
742 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
744 # this can be done several times
745 self.run_in_guest("rpm --import {key}".format(**locals()))
746 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
748 def bonding_builds(self):
750 list /etc/yum.repos.d on the myplc side
752 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
755 def bonding_nodes(self):
757 List nodes known to the myplc together with their nodefamiliy
759 print("---------------------------------------- nodes")
760 for node in self.apiserver.GetNodes(self.auth_root()):
761 print("{} -> {}".format(node['hostname'],
762 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
763 print("---------------------------------------- nodes")
767 def mod_python(self):
768 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
769 return self.yum_install( ['mod_python'] )
772 def plc_configure(self):
774 tmpname = '{}.plc-config-tty'.format(self.name())
775 with open(tmpname,'w') as fileconf:
776 for (var,value) in self.plc_spec['settings'].items():
777 fileconf.write('e {}\n{}\n'.format(var, value))
778 fileconf.write('w\n')
779 fileconf.write('q\n')
780 utils.system('cat {}'.format(tmpname))
781 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
782 utils.system('rm {}'.format(tmpname))
785 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
786 # however using a vplc guest under f20 requires this trick
787 # the symptom is this: service plc start
788 # Starting plc (via systemctl): Failed to get D-Bus connection: \
789 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
790 # weird thing is the doc says f14 uses upstart by default and not systemd
791 # so this sounds kind of harmless
792 def start_service(self, service):
793 return self.start_stop_service(service, 'start')
794 def stop_service(self, service):
795 return self.start_stop_service(service, 'stop')
797 def start_stop_service(self, service, start_or_stop):
798 "utility to start/stop a service with the special trick for f14"
799 if self.options.fcdistro != 'f14':
800 return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
802 # patch /sbin/service so it does not reset environment
803 self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
804 # this is because our own scripts in turn call service
805 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
806 .format(service, start_or_stop)) == 0
810 return self.start_service('plc')
814 return self.stop_service('plc')
816 def plcvm_start(self):
817 "start the PLC vserver"
821 def plcvm_stop(self):
822 "stop the PLC vserver"
826 # stores the keys from the config for further use
827 def keys_store(self):
828 "stores test users ssh keys in keys/"
829 for key_spec in self.plc_spec['keys']:
830 TestKey(self,key_spec).store_key()
833 def keys_clean(self):
834 "removes keys cached in keys/"
835 utils.system("rm -rf ./keys")
838 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
839 # for later direct access to the nodes
840 def keys_fetch(self):
841 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
843 if not os.path.isdir(dir):
845 vservername = self.vservername
846 vm_root = self.vm_root_in_host()
848 prefix = 'debug_ssh_key'
849 for ext in ['pub', 'rsa'] :
850 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
851 dst = "keys/{vservername}-debug.{ext}".format(**locals())
852 if self.test_ssh.fetch(src, dst) != 0:
857 "create sites with PLCAPI"
858 return self.do_sites()
860 def delete_sites(self):
861 "delete sites with PLCAPI"
862 return self.do_sites(action="delete")
864 def do_sites(self, action="add"):
865 for site_spec in self.plc_spec['sites']:
866 test_site = TestSite(self,site_spec)
867 if (action != "add"):
868 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
869 test_site.delete_site()
870 # deleted with the site
871 #test_site.delete_users()
874 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
875 test_site.create_site()
876 test_site.create_users()
879 def delete_all_sites(self):
880 "Delete all sites in PLC, and related objects"
881 print('auth_root', self.auth_root())
882 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
884 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
885 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
887 site_id = site['site_id']
888 print('Deleting site_id', site_id)
889 self.apiserver.DeleteSite(self.auth_root(), site_id)
893 "create nodes with PLCAPI"
894 return self.do_nodes()
895 def delete_nodes(self):
896 "delete nodes with PLCAPI"
897 return self.do_nodes(action="delete")
899 def do_nodes(self, action="add"):
900 for site_spec in self.plc_spec['sites']:
901 test_site = TestSite(self, site_spec)
903 utils.header("Deleting nodes in site {}".format(test_site.name()))
904 for node_spec in site_spec['nodes']:
905 test_node = TestNode(self, test_site, node_spec)
906 utils.header("Deleting {}".format(test_node.name()))
907 test_node.delete_node()
909 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
910 for node_spec in site_spec['nodes']:
911 utils.pprint('Creating node {}'.format(node_spec), node_spec)
912 test_node = TestNode(self, test_site, node_spec)
913 test_node.create_node()
916 def nodegroups(self):
917 "create nodegroups with PLCAPI"
918 return self.do_nodegroups("add")
919 def delete_nodegroups(self):
920 "delete nodegroups with PLCAPI"
921 return self.do_nodegroups("delete")
925 def translate_timestamp(start, grain, timestamp):
926 if timestamp < TestPlc.YEAR:
927 return start + timestamp*grain
932 def timestamp_printable(timestamp):
933 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
936 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
937 now = int(time.time())
938 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
939 print('API answered grain=', grain)
940 start = (now//grain)*grain
942 # find out all nodes that are reservable
943 nodes = self.all_reservable_nodenames()
945 utils.header("No reservable node found - proceeding without leases")
948 # attach them to the leases as specified in plc_specs
949 # this is where the 'leases' field gets interpreted as relative of absolute
950 for lease_spec in self.plc_spec['leases']:
951 # skip the ones that come with a null slice id
952 if not lease_spec['slice']:
954 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
955 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
956 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
957 lease_spec['t_from'], lease_spec['t_until'])
958 if lease_addition['errors']:
959 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
962 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
963 .format(nodes, lease_spec['slice'],
964 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
965 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
969 def delete_leases(self):
970 "remove all leases in the myplc side"
971 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
972 utils.header("Cleaning leases {}".format(lease_ids))
973 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
976 def list_leases(self):
977 "list all leases known to the myplc"
978 leases = self.apiserver.GetLeases(self.auth_root())
979 now = int(time.time())
981 current = l['t_until'] >= now
982 if self.options.verbose or current:
983 utils.header("{} {} from {} until {}"\
984 .format(l['hostname'], l['name'],
985 TestPlc.timestamp_printable(l['t_from']),
986 TestPlc.timestamp_printable(l['t_until'])))
989 # create nodegroups if needed, and populate
990 def do_nodegroups(self, action="add"):
991 # 1st pass to scan contents
993 for site_spec in self.plc_spec['sites']:
994 test_site = TestSite(self,site_spec)
995 for node_spec in site_spec['nodes']:
996 test_node = TestNode(self, test_site, node_spec)
997 if 'nodegroups' in node_spec:
998 nodegroupnames = node_spec['nodegroups']
999 if isinstance(nodegroupnames, str):
1000 nodegroupnames = [ nodegroupnames ]
1001 for nodegroupname in nodegroupnames:
1002 if nodegroupname not in groups_dict:
1003 groups_dict[nodegroupname] = []
1004 groups_dict[nodegroupname].append(test_node.name())
1005 auth = self.auth_root()
1007 for (nodegroupname,group_nodes) in groups_dict.items():
1009 print('nodegroups:', 'dealing with nodegroup',\
1010 nodegroupname, 'on nodes', group_nodes)
1011 # first, check if the nodetagtype is here
1012 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1014 tag_type_id = tag_types[0]['tag_type_id']
1016 tag_type_id = self.apiserver.AddTagType(auth,
1017 {'tagname' : nodegroupname,
1018 'description' : 'for nodegroup {}'.format(nodegroupname),
1019 'category' : 'test'})
1020 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1022 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1024 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1025 print('created nodegroup', nodegroupname, \
1026 'from tagname', nodegroupname, 'and value', 'yes')
1027 # set node tag on all nodes, value='yes'
1028 for nodename in group_nodes:
1030 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1032 traceback.print_exc()
1033 print('node', nodename, 'seems to already have tag', nodegroupname)
1036 expect_yes = self.apiserver.GetNodeTags(auth,
1037 {'hostname' : nodename,
1038 'tagname' : nodegroupname},
1039 ['value'])[0]['value']
1040 if expect_yes != "yes":
1041 print('Mismatch node tag on node',nodename,'got',expect_yes)
1044 if not self.options.dry_run:
1045 print('Cannot find tag', nodegroupname, 'on node', nodename)
1049 print('cleaning nodegroup', nodegroupname)
1050 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1052 traceback.print_exc()
1056 # a list of TestNode objs
1057 def all_nodes(self):
1059 for site_spec in self.plc_spec['sites']:
1060 test_site = TestSite(self,site_spec)
1061 for node_spec in site_spec['nodes']:
1062 nodes.append(TestNode(self, test_site, node_spec))
1065 # return a list of tuples (nodename,qemuname)
1066 def all_node_infos(self) :
1068 for site_spec in self.plc_spec['sites']:
1069 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1070 for node_spec in site_spec['nodes'] ]
1073 def all_nodenames(self):
1074 return [ x[0] for x in self.all_node_infos() ]
1075 def all_reservable_nodenames(self):
1077 for site_spec in self.plc_spec['sites']:
1078 for node_spec in site_spec['nodes']:
1079 node_fields = node_spec['node_fields']
1080 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1081 res.append(node_fields['hostname'])
1084 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1085 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1086 silent_minutes, period_seconds = 15):
1087 if self.options.dry_run:
1091 class CompleterTaskBootState(CompleterTask):
1092 def __init__(self, test_plc, hostname):
1093 self.test_plc = test_plc
1094 self.hostname = hostname
1095 self.last_boot_state = 'undef'
1096 def actual_run(self):
1098 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1101 self.last_boot_state = node['boot_state']
1102 return self.last_boot_state == target_boot_state
1106 return "CompleterTaskBootState with node {}".format(self.hostname)
1107 def failure_epilogue(self):
1108 print("node {} in state {} - expected {}"\
1109 .format(self.hostname, self.last_boot_state, target_boot_state))
1111 timeout = timedelta(minutes=timeout_minutes)
1112 graceout = timedelta(minutes=silent_minutes)
1113 period = timedelta(seconds=period_seconds)
1114 # the nodes that haven't checked yet - start with a full list and shrink over time
1115 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1116 tasks = [ CompleterTaskBootState(self,hostname) \
1117 for (hostname,_) in self.all_node_infos() ]
1118 message = 'check_boot_state={}'.format(target_boot_state)
1119 return Completer(tasks, message=message).run(timeout, graceout, period)
1121 def nodes_booted(self):
1122 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1124 def probe_kvm_iptables(self):
1125 (_,kvmbox) = self.all_node_infos()[0]
1126 TestSsh(kvmbox).run("iptables-save")
1130 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1131 class CompleterTaskPingNode(CompleterTask):
1132 def __init__(self, hostname):
1133 self.hostname = hostname
1134 def run(self, silent):
1135 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1136 return utils.system(command, silent=silent) == 0
1137 def failure_epilogue(self):
1138 print("Cannot ping node with name {}".format(self.hostname))
1139 timeout = timedelta(seconds = timeout_seconds)
1141 period = timedelta(seconds = period_seconds)
1142 node_infos = self.all_node_infos()
1143 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1144 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1146 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1147 def ping_node(self):
1149 return self.check_nodes_ping()
1151 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1153 timeout = timedelta(minutes=timeout_minutes)
1154 graceout = timedelta(minutes=silent_minutes)
1155 period = timedelta(seconds=period_seconds)
1156 vservername = self.vservername
1159 completer_message = 'ssh_node_debug'
1160 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1163 completer_message = 'ssh_node_boot'
1164 local_key = "keys/key_admin.rsa"
1165 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1166 node_infos = self.all_node_infos()
1167 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1168 boot_state=message, dry_run=self.options.dry_run) \
1169 for (nodename, qemuname) in node_infos ]
1170 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1172 def ssh_node_debug(self):
1173 "Tries to ssh into nodes in debug mode with the debug ssh key"
1174 return self.check_nodes_ssh(debug = True,
1175 timeout_minutes = self.ssh_node_debug_timeout,
1176 silent_minutes = self.ssh_node_debug_silent)
1178 def ssh_node_boot(self):
1179 "Tries to ssh into nodes in production mode with the root ssh key"
1180 return self.check_nodes_ssh(debug = False,
1181 timeout_minutes = self.ssh_node_boot_timeout,
1182 silent_minutes = self.ssh_node_boot_silent)
1184 def node_bmlogs(self):
1185 "Checks that there's a non-empty dir. /var/log/bm/raw"
1186 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1189 def qemu_local_init(self): pass
1191 def bootcd(self): pass
1193 def qemu_local_config(self): pass
1195 def qemu_export(self): pass
1197 def nodestate_reinstall(self): pass
1199 def nodestate_upgrade(self): pass
1201 def nodestate_safeboot(self): pass
1203 def nodestate_boot(self): pass
1205 def nodestate_show(self): pass
1207 def nodedistro_f14(self): pass
1209 def nodedistro_f18(self): pass
1211 def nodedistro_f20(self): pass
1213 def nodedistro_f21(self): pass
1215 def nodeflavour_show(self): pass
1217 def nodeplain_on(self): pass
1219 def nodeplain_off(self): pass
1221 def nodeplain_show(self): pass
1223 ### check hooks : invoke scripts from hooks/{node,slice}
1224 def check_hooks_node(self):
1225 return self.locate_first_node().check_hooks()
1226 def check_hooks_sliver(self) :
1227 return self.locate_first_sliver().check_hooks()
1229 def check_hooks(self):
1230 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1231 return self.check_hooks_node() and self.check_hooks_sliver()
1234 def do_check_initscripts(self):
1235 class CompleterTaskInitscript(CompleterTask):
1236 def __init__(self, test_sliver, stamp):
1237 self.test_sliver = test_sliver
1239 def actual_run(self):
1240 return self.test_sliver.check_initscript_stamp(self.stamp)
1242 return "initscript checker for {}".format(self.test_sliver.name())
1243 def failure_epilogue(self):
1244 print("initscript stamp {} not found in sliver {}"\
1245 .format(self.stamp, self.test_sliver.name()))
1248 for slice_spec in self.plc_spec['slices']:
1249 if 'initscriptstamp' not in slice_spec:
1251 stamp = slice_spec['initscriptstamp']
1252 slicename = slice_spec['slice_fields']['name']
1253 for nodename in slice_spec['nodenames']:
1254 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1255 site,node = self.locate_node(nodename)
1256 # xxx - passing the wrong site - probably harmless
1257 test_site = TestSite(self, site)
1258 test_slice = TestSlice(self, test_site, slice_spec)
1259 test_node = TestNode(self, test_site, node)
1260 test_sliver = TestSliver(self, test_node, test_slice)
1261 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1262 return Completer(tasks, message='check_initscripts').\
1263 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1265 def check_initscripts(self):
1266 "check that the initscripts have triggered"
1267 return self.do_check_initscripts()
1269 def initscripts(self):
1270 "create initscripts with PLCAPI"
1271 for initscript in self.plc_spec['initscripts']:
1272 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1273 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1276 def delete_initscripts(self):
1277 "delete initscripts with PLCAPI"
1278 for initscript in self.plc_spec['initscripts']:
1279 initscript_name = initscript['initscript_fields']['name']
1280 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1282 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1283 print(initscript_name, 'deleted')
1285 print('deletion went wrong - probably did not exist')
1290 "create slices with PLCAPI"
1291 return self.do_slices(action="add")
1293 def delete_slices(self):
1294 "delete slices with PLCAPI"
1295 return self.do_slices(action="delete")
1297 def fill_slices(self):
1298 "add nodes in slices with PLCAPI"
1299 return self.do_slices(action="fill")
1301 def empty_slices(self):
1302 "remove nodes from slices with PLCAPI"
1303 return self.do_slices(action="empty")
1305 def do_slices(self, action="add"):
1306 for slice in self.plc_spec['slices']:
1307 site_spec = self.locate_site(slice['sitename'])
1308 test_site = TestSite(self,site_spec)
1309 test_slice=TestSlice(self,test_site,slice)
1310 if action == "delete":
1311 test_slice.delete_slice()
1312 elif action == "fill":
1313 test_slice.add_nodes()
1314 elif action == "empty":
1315 test_slice.delete_nodes()
1317 test_slice.create_slice()
1320 @slice_mapper__tasks(20, 10, 15)
1321 def ssh_slice(self): pass
1322 @slice_mapper__tasks(20, 19, 15)
1323 def ssh_slice_off(self): pass
1324 @slice_mapper__tasks(1, 1, 15)
1325 def slice_fs_present(self): pass
1326 @slice_mapper__tasks(1, 1, 15)
1327 def slice_fs_deleted(self): pass
1329 # use another name so we can exclude/ignore it from the tests on the nightly command line
1330 def ssh_slice_again(self): return self.ssh_slice()
1331 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1332 # but for some reason the ignore-wrapping thing would not
1335 def ssh_slice_basics(self): pass
1337 def check_vsys_defaults(self): pass
1340 def keys_clear_known_hosts(self): pass
1342 def plcapi_urls(self):
1344 attempts to reach the PLCAPI with various forms for the URL
1346 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1348 def speed_up_slices(self):
1349 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1350 return self._speed_up_slices (30, 10)
1351 def super_speed_up_slices(self):
1352 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1353 return self._speed_up_slices(5, 1)
1355 def _speed_up_slices(self, p, r):
1356 # create the template on the server-side
1357 template = "{}.nodemanager".format(self.name())
1358 with open(template,"w") as template_file:
1359 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1360 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1361 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1362 self.test_ssh.copy_abs(template, remote)
1364 if not self.apiserver.GetConfFiles(self.auth_root(),
1365 {'dest' : '/etc/sysconfig/nodemanager'}):
1366 self.apiserver.AddConfFile(self.auth_root(),
1367 {'dest' : '/etc/sysconfig/nodemanager',
1368 'source' : 'PlanetLabConf/nodemanager',
1369 'postinstall_cmd' : 'service nm restart',})
1372 def debug_nodemanager(self):
1373 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1374 template = "{}.nodemanager".format(self.name())
1375 with open(template,"w") as template_file:
1376 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1377 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1378 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1379 self.test_ssh.copy_abs(template, remote)
1383 def qemu_start(self) : pass
1386 def qemu_timestamp(self) : pass
1389 def qemu_nodefamily(self): pass
1391 # when a spec refers to a node possibly on another plc
1392 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1393 for plc in [ self ] + other_plcs:
1395 return plc.locate_sliver_obj(nodename, slicename)
1398 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1400 # implement this one as a cross step so that we can take advantage of different nodes
1401 # in multi-plcs mode
1402 def cross_check_tcp(self, other_plcs):
1403 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1404 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1405 utils.header("check_tcp: no/empty config found")
1407 specs = self.plc_spec['tcp_specs']
1410 # first wait for the network to be up and ready from the slices
1411 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1412 def __init__(self, test_sliver):
1413 self.test_sliver = test_sliver
1414 def actual_run(self):
1415 return self.test_sliver.check_tcp_ready(port = 9999)
1417 return "network ready checker for {}".format(self.test_sliver.name())
1418 def failure_epilogue(self):
1419 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1423 managed_sliver_names = set()
1425 # locate the TestSliver instances involved, and cache them in the spec instance
1426 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1427 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1428 message = "Will check TCP between s={} and c={}"\
1429 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1430 if 'client_connect' in spec:
1431 message += " (using {})".format(spec['client_connect'])
1432 utils.header(message)
1433 # we need to check network presence in both slivers, but also
1434 # avoid to insert a sliver several times
1435 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1436 if sliver.name() not in managed_sliver_names:
1437 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1438 # add this sliver's name in the set
1439 managed_sliver_names .update( {sliver.name()} )
1441 # wait for the netork to be OK in all server sides
1442 if not Completer(tasks, message='check for network readiness in slivers').\
1443 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1446 # run server and client
1450 # the issue here is that we have the server run in background
1451 # and so we have no clue if it took off properly or not
1452 # looks like in some cases it does not
1453 if not spec['s_sliver'].run_tcp_server(port, timeout=20):
1457 # idem for the client side
1458 # use nodename from located sliver, unless 'client_connect' is set
1459 if 'client_connect' in spec:
1460 destination = spec['client_connect']
1462 destination = spec['s_sliver'].test_node.name()
1463 if not spec['c_sliver'].run_tcp_client(destination, port):
1467 # painfully enough, we need to allow for some time as netflow might show up last
1468 def check_system_slice(self):
1469 "all nodes: check that a system slice is alive"
1470 # netflow currently not working in the lxc distro
1471 # drl not built at all in the wtx distro
1472 # if we find either of them we're happy
1473 return self.check_netflow() or self.check_drl()
1476 def check_netflow(self): return self._check_system_slice('netflow')
1477 def check_drl(self): return self._check_system_slice('drl')
1479 # we have the slices up already here, so it should not take too long
1480 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1481 class CompleterTaskSystemSlice(CompleterTask):
1482 def __init__(self, test_node, dry_run):
1483 self.test_node = test_node
1484 self.dry_run = dry_run
1485 def actual_run(self):
1486 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1488 return "System slice {} @ {}".format(slicename, self.test_node.name())
1489 def failure_epilogue(self):
1490 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1491 timeout = timedelta(minutes=timeout_minutes)
1492 silent = timedelta(0)
1493 period = timedelta(seconds=period_seconds)
1494 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1495 for test_node in self.all_nodes() ]
1496 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1498 def plcsh_stress_test(self):
1499 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1500 # install the stress-test in the plc image
1501 location = "/usr/share/plc_api/plcsh_stress_test.py"
1502 remote = "{}/{}".format(self.vm_root_in_host(), location)
1503 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1505 command += " -- --check"
1506 if self.options.size == 1:
1507 command += " --tiny"
1508 return self.run_in_guest(command) == 0
1510 # populate runs the same utility without slightly different options
1511 # in particular runs with --preserve (dont cleanup) and without --check
1512 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1514 def sfa_install_all(self):
1515 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1516 return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
1518 def sfa_install_core(self):
1520 return self.yum_install("sfa")
1522 def sfa_install_plc(self):
1523 "yum install sfa-plc"
1524 return self.yum_install("sfa-plc")
1526 def sfa_install_sfatables(self):
1527 "yum install sfa-sfatables"
1528 return self.yum_install("sfa-sfatables")
1530 # for some very odd reason, this sometimes fails with the following symptom
1531 # # yum install sfa-client
1532 # Setting up Install Process
1534 # Downloading Packages:
1535 # Running rpm_check_debug
1536 # Running Transaction Test
1537 # Transaction Test Succeeded
1538 # Running Transaction
1539 # Transaction couldn't start:
1540 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1541 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1542 # even though in the same context I have
1543 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1544 # Filesystem Size Used Avail Use% Mounted on
1545 # /dev/hdv1 806G 264G 501G 35% /
1546 # none 16M 36K 16M 1% /tmp
1548 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1549 def sfa_install_client(self):
1550 "yum install sfa-client"
1551 first_try = self.yum_install("sfa-client")
1554 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1555 code, cached_rpm_path = \
1556 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1557 utils.header("rpm_path=<<{}>>".format(rpm_path))
1559 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1560 return self.yum_check_installed("sfa-client")
1562 def sfa_dbclean(self):
1563 "thoroughly wipes off the SFA database"
1564 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1565 self.run_in_guest("sfa-nuke.py") == 0 or \
1566 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1567 self.run_in_guest("sfaadmin registry nuke") == 0
1569 def sfa_fsclean(self):
1570 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1571 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1574 def sfa_plcclean(self):
1575 "cleans the PLC entries that were created as a side effect of running the script"
1577 sfa_spec = self.plc_spec['sfa']
1579 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1580 login_base = auth_sfa_spec['login_base']
1582 self.apiserver.DeleteSite(self.auth_root(),login_base)
1584 print("Site {} already absent from PLC db".format(login_base))
1586 for spec_name in ['pi_spec','user_spec']:
1587 user_spec = auth_sfa_spec[spec_name]
1588 username = user_spec['email']
1590 self.apiserver.DeletePerson(self.auth_root(),username)
1592 # this in fact is expected as sites delete their members
1593 #print "User {} already absent from PLC db".format(username)
1596 print("REMEMBER TO RUN sfa_import AGAIN")
1599 def sfa_uninstall(self):
1600 "uses rpm to uninstall sfa - ignore result"
1601 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1602 self.run_in_guest("rm -rf /var/lib/sfa")
1603 self.run_in_guest("rm -rf /etc/sfa")
1604 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1606 self.run_in_guest("rpm -e --noscripts sfa-plc")
1609 ### run unit tests for SFA
1610 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1611 # Running Transaction
1612 # Transaction couldn't start:
1613 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1614 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1615 # no matter how many Gbs are available on the testplc
1616 # could not figure out what's wrong, so...
1617 # if the yum install phase fails, consider the test is successful
1618 # other combinations will eventually run it hopefully
1619 def sfa_utest(self):
1620 "yum install sfa-tests and run SFA unittests"
1621 self.run_in_guest("yum -y install sfa-tests")
1622 # failed to install - forget it
1623 if self.run_in_guest("rpm -q sfa-tests") != 0:
1624 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1626 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1630 dirname = "conf.{}".format(self.plc_spec['name'])
1631 if not os.path.isdir(dirname):
1632 utils.system("mkdir -p {}".format(dirname))
1633 if not os.path.isdir(dirname):
1634 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1637 def conffile(self, filename):
1638 return "{}/{}".format(self.confdir(), filename)
1639 def confsubdir(self, dirname, clean, dry_run=False):
1640 subdirname = "{}/{}".format(self.confdir(), dirname)
1642 utils.system("rm -rf {}".format(subdirname))
1643 if not os.path.isdir(subdirname):
1644 utils.system("mkdir -p {}".format(subdirname))
1645 if not dry_run and not os.path.isdir(subdirname):
1646 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1649 def conffile_clean(self, filename):
1650 filename=self.conffile(filename)
1651 return utils.system("rm -rf {}".format(filename))==0
1654 def sfa_configure(self):
1655 "run sfa-config-tty"
1656 tmpname = self.conffile("sfa-config-tty")
1657 with open(tmpname,'w') as fileconf:
1658 for (var,value) in self.plc_spec['sfa']['settings'].items():
1659 fileconf.write('e {}\n{}\n'.format(var, value))
1660 fileconf.write('w\n')
1661 fileconf.write('R\n')
1662 fileconf.write('q\n')
1663 utils.system('cat {}'.format(tmpname))
1664 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1667 def aggregate_xml_line(self):
1668 port = self.plc_spec['sfa']['neighbours-port']
1669 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1670 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1672 def registry_xml_line(self):
1673 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1674 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1677 # a cross step that takes all other plcs in argument
1678 def cross_sfa_configure(self, other_plcs):
1679 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1680 # of course with a single plc, other_plcs is an empty list
1683 agg_fname = self.conffile("agg.xml")
1684 with open(agg_fname,"w") as out:
1685 out.write("<aggregates>{}</aggregates>\n"\
1686 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1687 utils.header("(Over)wrote {}".format(agg_fname))
1688 reg_fname=self.conffile("reg.xml")
1689 with open(reg_fname,"w") as out:
1690 out.write("<registries>{}</registries>\n"\
1691 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1692 utils.header("(Over)wrote {}".format(reg_fname))
1693 return self.test_ssh.copy_abs(agg_fname,
1694 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1695 and self.test_ssh.copy_abs(reg_fname,
1696 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1698 def sfa_import(self):
1699 "use sfaadmin to import from plc"
1700 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1701 return self.run_in_guest('sfaadmin reg import_registry') == 0
1703 def sfa_start(self):
1705 return self.start_service('sfa')
1708 def sfi_configure(self):
1709 "Create /root/sfi on the plc side for sfi client configuration"
1710 if self.options.dry_run:
1711 utils.header("DRY RUN - skipping step")
1713 sfa_spec = self.plc_spec['sfa']
1714 # cannot use auth_sfa_mapper to pass dir_name
1715 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1716 test_slice = TestAuthSfa(self, slice_spec)
1717 dir_basename = os.path.basename(test_slice.sfi_path())
1718 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1719 clean=True, dry_run=self.options.dry_run)
1720 test_slice.sfi_configure(dir_name)
1721 # push into the remote /root/sfi area
1722 location = test_slice.sfi_path()
1723 remote = "{}/{}".format(self.vm_root_in_host(), location)
1724 self.test_ssh.mkdir(remote, abs=True)
1725 # need to strip last level or remote otherwise we get an extra dir level
1726 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1730 def sfi_clean(self):
1731 "clean up /root/sfi on the plc side"
1732 self.run_in_guest("rm -rf /root/sfi")
1735 def sfa_rspec_empty(self):
1736 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1737 filename = "empty-rspec.xml"
1739 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1740 test_slice = TestAuthSfa(self, slice_spec)
1741 in_vm = test_slice.sfi_path()
1742 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1743 if self.test_ssh.copy_abs(filename, remote) !=0:
1748 def sfa_register_site(self): pass
1750 def sfa_register_pi(self): pass
1752 def sfa_register_user(self): pass
1754 def sfa_update_user(self): pass
1756 def sfa_register_slice(self): pass
1758 def sfa_renew_slice(self): pass
1760 def sfa_get_expires(self): pass
1762 def sfa_discover(self): pass
1764 def sfa_rspec(self): pass
1766 def sfa_allocate(self): pass
1768 def sfa_allocate_empty(self): pass
1770 def sfa_provision(self): pass
1772 def sfa_provision_empty(self): pass
1774 def sfa_describe(self): pass
1776 def sfa_check_slice_plc(self): pass
1778 def sfa_check_slice_plc_empty(self): pass
1780 def sfa_update_slice(self): pass
1782 def sfa_remove_user_from_slice(self): pass
1784 def sfa_insert_user_in_slice(self): pass
1786 def sfi_list(self): pass
1788 def sfi_show_site(self): pass
1790 def sfi_show_slice(self): pass
1792 def sfi_show_slice_researchers(self): pass
1794 def ssh_slice_sfa(self): pass
1796 def sfa_delete_user(self): pass
1798 def sfa_delete_slice(self): pass
1802 return self.stop_service('sfa')
1805 "creates random entries in the PLCAPI"
1806 # install the stress-test in the plc image
1807 location = "/usr/share/plc_api/plcsh_stress_test.py"
1808 remote = "{}/{}".format(self.vm_root_in_host(), location)
1809 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1811 command += " -- --preserve --short-names"
1812 local = (self.run_in_guest(command) == 0);
1813 # second run with --foreign
1814 command += ' --foreign'
1815 remote = (self.run_in_guest(command) == 0);
1816 return local and remote
1819 ####################
1821 def bonding_init_partial(self): pass
1824 def bonding_add_yum(self): pass
1827 def bonding_install_rpms(self): pass
1829 ####################
1831 def gather_logs(self):
1832 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1833 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1834 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1835 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1836 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1837 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1838 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1840 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1841 self.gather_var_logs()
1843 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1844 self.gather_pgsql_logs()
1846 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1847 self.gather_root_sfi()
1849 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1850 for site_spec in self.plc_spec['sites']:
1851 test_site = TestSite(self,site_spec)
1852 for node_spec in site_spec['nodes']:
1853 test_node = TestNode(self, test_site, node_spec)
1854 test_node.gather_qemu_logs()
1856 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1857 self.gather_nodes_var_logs()
1859 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1860 self.gather_slivers_var_logs()
1863 def gather_slivers_var_logs(self):
1864 for test_sliver in self.all_sliver_objs():
1865 remote = test_sliver.tar_var_logs()
1866 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1867 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1868 utils.system(command)
1871 def gather_var_logs(self):
1872 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1873 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1874 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1875 utils.system(command)
1876 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1877 utils.system(command)
1879 def gather_pgsql_logs(self):
1880 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1881 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1882 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1883 utils.system(command)
1885 def gather_root_sfi(self):
1886 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1887 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1888 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1889 utils.system(command)
1891 def gather_nodes_var_logs(self):
1892 for site_spec in self.plc_spec['sites']:
1893 test_site = TestSite(self, site_spec)
1894 for node_spec in site_spec['nodes']:
1895 test_node = TestNode(self, test_site, node_spec)
1896 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1897 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1898 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1899 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1900 utils.system(command)
1903 # returns the filename to use for sql dump/restore, using options.dbname if set
1904 def dbfile(self, database):
1905 # uses options.dbname if it is found
1907 name = self.options.dbname
1908 if not isinstance(name, str):
1914 return "/root/{}-{}.sql".format(database, name)
1916 def plc_db_dump(self):
1917 'dump the planetlab5 DB in /root in the PLC - filename has time'
1918 dump=self.dbfile("planetab5")
1919 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1920 utils.header('Dumped planetlab5 database in {}'.format(dump))
1923 def plc_db_restore(self):
1924 'restore the planetlab5 DB - looks broken, but run -n might help'
1925 dump = self.dbfile("planetab5")
1926 ##stop httpd service
1927 self.run_in_guest('service httpd stop')
1928 # xxx - need another wrapper
1929 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1930 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1931 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1932 ##starting httpd service
1933 self.run_in_guest('service httpd start')
1935 utils.header('Database restored from ' + dump)
1938 def create_ignore_steps():
1939 for step in TestPlc.default_steps + TestPlc.other_steps:
1940 # default step can have a plc qualifier
1942 step, qualifier = step.split('@')
1943 # or be defined as forced or ignored by default
1944 for keyword in ['_ignore','_force']:
1945 if step.endswith(keyword):
1946 step=step.replace(keyword,'')
1947 if step == SEP or step == SEPSFA :
1949 method = getattr(TestPlc,step)
1950 name = step + '_ignore'
1951 wrapped = ignore_result(method)
1952 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1953 setattr(TestPlc, name, wrapped)
1956 # def ssh_slice_again_ignore (self): pass
1958 # def check_initscripts_ignore (self): pass
1960 def standby_1_through_20(self):
1961 """convenience function to wait for a specified number of minutes"""
1964 def standby_1(): pass
1966 def standby_2(): pass
1968 def standby_3(): pass
1970 def standby_4(): pass
1972 def standby_5(): pass
1974 def standby_6(): pass
1976 def standby_7(): pass
1978 def standby_8(): pass
1980 def standby_9(): pass
1982 def standby_10(): pass
1984 def standby_11(): pass
1986 def standby_12(): pass
1988 def standby_13(): pass
1990 def standby_14(): pass
1992 def standby_15(): pass
1994 def standby_16(): pass
1996 def standby_17(): pass
1998 def standby_18(): pass
2000 def standby_19(): pass
2002 def standby_20(): pass
2004 # convenience for debugging the test logic
2005 def yes(self): return True
2006 def no(self): return False
2007 def fail(self): return False