1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls','speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
166 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
167 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
168 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
169 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
170 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
171 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
172 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
173 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
174 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
175 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
176 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
177 # but as the stress test might take a while, we sometimes missed the debug mode..
178 'probe_kvm_iptables',
179 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
180 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
181 'ssh_slice_sfa@1', SEPSFA,
182 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
183 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
184 'cross_check_tcp@1', 'check_system_slice', SEP,
185 # for inspecting the slice while it runs the first time
187 # check slices are turned off properly
189 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
190 # check they are properly re-created with the same name
191 'fill_slices', 'ssh_slice_again', SEP,
192 'gather_logs_force', SEP,
195 'export', 'show_boxes', 'super_speed_up_slices', SEP,
196 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
197 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
198 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
199 'delete_leases', 'list_leases', SEP,
201 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
202 'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
203 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
204 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
205 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
206 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
207 'sfa_get_expires', SEPSFA,
208 'plc_db_dump' , 'plc_db_restore', SEP,
209 'check_netflow','check_drl', SEP,
210 'slice_fs_present', 'check_initscripts', SEP,
211 'standby_1_through_20','yes','no',SEP,
212 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
214 default_bonding_steps = [
215 'bonding_init_partial',
217 'bonding_install_rpms', SEP,
221 def printable_steps(list):
222 single_line = " ".join(list) + " "
223 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
225 def valid_step(step):
226 return step != SEP and step != SEPSFA
228 # turn off the sfa-related steps when build has skipped SFA
229 # this was originally for centos5 but is still valid
230 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
232 def _has_sfa_cached(rpms_url):
233 if os.path.isfile(has_sfa_cache_filename):
234 with open(has_sfa_cache_filename) as cache:
235 cached = cache.read() == "yes"
236 utils.header("build provides SFA (cached):{}".format(cached))
238 # warning, we're now building 'sface' so let's be a bit more picky
239 # full builds are expected to return with 0 here
240 utils.header("Checking if build provides SFA package...")
241 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
242 encoded = 'yes' if retcod else 'no'
243 with open(has_sfa_cache_filename,'w') as cache:
248 def check_whether_build_has_sfa(rpms_url):
249 has_sfa = TestPlc._has_sfa_cached(rpms_url)
251 utils.header("build does provide SFA")
253 # move all steps containing 'sfa' from default_steps to other_steps
254 utils.header("SFA package not found - removing steps with sfa or sfi")
255 sfa_steps = [ step for step in TestPlc.default_steps
256 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
257 TestPlc.other_steps += sfa_steps
258 for step in sfa_steps:
259 TestPlc.default_steps.remove(step)
261 def __init__(self, plc_spec, options):
262 self.plc_spec = plc_spec
263 self.options = options
264 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
265 self.vserverip = plc_spec['vserverip']
266 self.vservername = plc_spec['vservername']
267 self.vplchostname = self.vservername.split('-')[-1]
268 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
269 self.apiserver = TestApiserver(self.url, options.dry_run)
270 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
271 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
273 def has_addresses_api(self):
274 return self.apiserver.has_method('AddIpAddress')
277 name = self.plc_spec['name']
278 return "{}.{}".format(name,self.vservername)
281 return self.plc_spec['host_box']
284 return self.test_ssh.is_local()
286 # define the API methods on this object through xmlrpc
287 # would help, but not strictly necessary
291 def actual_command_in_guest(self,command, backslash=False):
292 raw1 = self.host_to_guest(command)
293 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
296 def start_guest(self):
297 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
298 dry_run=self.options.dry_run))
300 def stop_guest(self):
301 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
302 dry_run=self.options.dry_run))
304 def run_in_guest(self, command, backslash=False):
305 raw = self.actual_command_in_guest(command, backslash)
306 return utils.system(raw)
308 def run_in_host(self,command):
309 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
311 # backslashing turned out so awful at some point that I've turned off auto-backslashing
312 # see e.g. plc_start esp. the version for f14
313 #command gets run in the plc's vm
314 def host_to_guest(self, command):
315 ssh_leg = TestSsh(self.vplchostname)
316 return ssh_leg.actual_command(command, keep_stdin=True)
318 # this /vservers thing is legacy...
319 def vm_root_in_host(self):
320 return "/vservers/{}/".format(self.vservername)
322 def vm_timestamp_path(self):
323 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
325 #start/stop the vserver
326 def start_guest_in_host(self):
327 return "virsh -c lxc:/// start {}".format(self.vservername)
329 def stop_guest_in_host(self):
330 return "virsh -c lxc:/// destroy {}".format(self.vservername)
333 def run_in_guest_piped(self,local,remote):
334 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
337 def dnf_check_installed(self, rpms):
338 if isinstance(rpms, list):
340 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
342 # does a yum install in the vs, ignore yum retcod, check with rpm
343 def dnf_install(self, rpms):
344 if isinstance(rpms, list):
346 yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
348 self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
349 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
350 # nothing similar with dnf, forget about this for now
351 # self.run_in_guest("yum-complete-transaction -y")
352 return self.dnf_check_installed(rpms)
354 def pip_install(self, package):
355 return self.run_in_guest("pip install {}".format(package)) == 0
358 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
359 'AuthMethod' : 'password',
360 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
361 'Role' : self.plc_spec['role'],
364 def locate_site(self,sitename):
365 for site in self.plc_spec['sites']:
366 if site['site_fields']['name'] == sitename:
368 if site['site_fields']['login_base'] == sitename:
370 raise Exception("Cannot locate site {}".format(sitename))
372 def locate_node(self, nodename):
373 for site in self.plc_spec['sites']:
374 for node in site['nodes']:
375 if node['name'] == nodename:
377 raise Exception("Cannot locate node {}".format(nodename))
379 def locate_hostname(self, hostname):
380 for site in self.plc_spec['sites']:
381 for node in site['nodes']:
382 if node['node_fields']['hostname'] == hostname:
384 raise Exception("Cannot locate hostname {}".format(hostname))
386 def locate_key(self, key_name):
387 for key in self.plc_spec['keys']:
388 if key['key_name'] == key_name:
390 raise Exception("Cannot locate key {}".format(key_name))
392 def locate_private_key_from_key_names(self, key_names):
393 # locate the first avail. key
395 for key_name in key_names:
396 key_spec = self.locate_key(key_name)
397 test_key = TestKey(self,key_spec)
398 publickey = test_key.publicpath()
399 privatekey = test_key.privatepath()
400 if os.path.isfile(publickey) and os.path.isfile(privatekey):
407 def locate_slice(self, slicename):
408 for slice in self.plc_spec['slices']:
409 if slice['slice_fields']['name'] == slicename:
411 raise Exception("Cannot locate slice {}".format(slicename))
413 def all_sliver_objs(self):
415 for slice_spec in self.plc_spec['slices']:
416 slicename = slice_spec['slice_fields']['name']
417 for nodename in slice_spec['nodenames']:
418 result.append(self.locate_sliver_obj(nodename, slicename))
421 def locate_sliver_obj(self, nodename, slicename):
422 site,node = self.locate_node(nodename)
423 slice = self.locate_slice(slicename)
425 test_site = TestSite(self, site)
426 test_node = TestNode(self, test_site, node)
427 # xxx the slice site is assumed to be the node site - mhh - probably harmless
428 test_slice = TestSlice(self, test_site, slice)
429 return TestSliver(self, test_node, test_slice)
431 def locate_first_node(self):
432 nodename = self.plc_spec['slices'][0]['nodenames'][0]
433 site,node = self.locate_node(nodename)
434 test_site = TestSite(self, site)
435 test_node = TestNode(self, test_site, node)
438 def locate_first_sliver(self):
439 slice_spec = self.plc_spec['slices'][0]
440 slicename = slice_spec['slice_fields']['name']
441 nodename = slice_spec['nodenames'][0]
442 return self.locate_sliver_obj(nodename,slicename)
444 # all different hostboxes used in this plc
445 def get_BoxNodes(self):
446 # maps on sites and nodes, return [ (host_box,test_node) ]
448 for site_spec in self.plc_spec['sites']:
449 test_site = TestSite(self,site_spec)
450 for node_spec in site_spec['nodes']:
451 test_node = TestNode(self, test_site, node_spec)
452 if not test_node.is_real():
453 tuples.append( (test_node.host_box(),test_node) )
454 # transform into a dict { 'host_box' -> [ test_node .. ] }
456 for (box,node) in tuples:
457 if box not in result:
460 result[box].append(node)
463 # a step for checking this stuff
464 def show_boxes(self):
465 'print summary of nodes location'
466 for box,nodes in self.get_BoxNodes().items():
467 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
470 # make this a valid step
471 def qemu_kill_all(self):
472 'kill all qemu instances on the qemu boxes involved by this setup'
473 # this is the brute force version, kill all qemus on that host box
474 for (box,nodes) in self.get_BoxNodes().items():
475 # pass the first nodename, as we don't push template-qemu on testboxes
476 nodedir = nodes[0].nodedir()
477 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
480 # make this a valid step
481 def qemu_list_all(self):
482 'list all qemu instances on the qemu boxes involved by this setup'
483 for box,nodes in self.get_BoxNodes().items():
484 # this is the brute force version, kill all qemus on that host box
485 TestBoxQemu(box, self.options.buildname).qemu_list_all()
488 # kill only the qemus related to this test
489 def qemu_list_mine(self):
490 'list qemu instances for our nodes'
491 for (box,nodes) in self.get_BoxNodes().items():
492 # the fine-grain version
497 # kill only the qemus related to this test
498 def qemu_clean_mine(self):
499 'cleanup (rm -rf) qemu instances for our nodes'
500 for box,nodes in self.get_BoxNodes().items():
501 # the fine-grain version
506 # kill only the right qemus
507 def qemu_kill_mine(self):
508 'kill the qemu instances for our nodes'
509 for box,nodes in self.get_BoxNodes().items():
510 # the fine-grain version
515 #################### display config
517 "show test configuration after localization"
522 # uggly hack to make sure 'run export' only reports about the 1st plc
523 # to avoid confusion - also we use 'inri_slice1' in various aliases..
526 "print cut'n paste-able stuff to export env variables to your shell"
527 # guess local domain from hostname
528 if TestPlc.exported_id > 1:
529 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
531 TestPlc.exported_id += 1
532 domain = socket.gethostname().split('.',1)[1]
533 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
534 print("export BUILD={}".format(self.options.buildname))
535 print("export PLCHOSTLXC={}".format(fqdn))
536 print("export GUESTNAME={}".format(self.vservername))
537 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
538 # find hostname of first node
539 hostname, qemubox = self.all_node_infos()[0]
540 print("export KVMHOST={}.{}".format(qemubox, domain))
541 print("export NODE={}".format(hostname))
545 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
546 def show_pass(self, passno):
547 for (key,val) in self.plc_spec.items():
548 if not self.options.verbose and key not in TestPlc.always_display_keys:
553 self.display_site_spec(site)
554 for node in site['nodes']:
555 self.display_node_spec(node)
556 elif key == 'initscripts':
557 for initscript in val:
558 self.display_initscript_spec(initscript)
559 elif key == 'slices':
561 self.display_slice_spec(slice)
564 self.display_key_spec(key)
566 if key not in ['sites', 'initscripts', 'slices', 'keys']:
567 print('+ ', key, ':', val)
569 def display_site_spec(self, site):
570 print('+ ======== site', site['site_fields']['name'])
571 for k,v in site.items():
572 if not self.options.verbose and k not in TestPlc.always_display_keys:
576 print('+ ','nodes : ', end=' ')
578 print(node['node_fields']['hostname'],'', end=' ')
582 print('+ users : ', end=' ')
584 print(user['name'],'', end=' ')
586 elif k == 'site_fields':
587 print('+ login_base', ':', v['login_base'])
588 elif k == 'address_fields':
594 def display_initscript_spec(self, initscript):
595 print('+ ======== initscript', initscript['initscript_fields']['name'])
597 def display_key_spec(self, key):
598 print('+ ======== key', key['key_name'])
600 def display_slice_spec(self, slice):
601 print('+ ======== slice', slice['slice_fields']['name'])
602 for k,v in slice.items():
605 print('+ nodes : ', end=' ')
607 print(nodename,'', end=' ')
609 elif k == 'usernames':
611 print('+ users : ', end=' ')
613 print(username,'', end=' ')
615 elif k == 'slice_fields':
616 print('+ fields',':', end=' ')
617 print('max_nodes=',v['max_nodes'], end=' ')
622 def display_node_spec(self, node):
623 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
624 print("hostname=", node['node_fields']['hostname'], end=' ')
625 print("ip=", node['interface_fields']['ip'])
626 if self.options.verbose:
627 utils.pprint("node details", node, depth=3)
629 # another entry point for just showing the boxes involved
630 def display_mapping(self):
631 TestPlc.display_mapping_plc(self.plc_spec)
635 def display_mapping_plc(plc_spec):
636 print('+ MyPLC',plc_spec['name'])
637 # WARNING this would not be right for lxc-based PLC's - should be harmless though
638 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
639 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
640 for site_spec in plc_spec['sites']:
641 for node_spec in site_spec['nodes']:
642 TestPlc.display_mapping_node(node_spec)
645 def display_mapping_node(node_spec):
646 print('+ NODE {}'.format(node_spec['name']))
647 print('+\tqemu box {}'.format(node_spec['host_box']))
648 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
650 # write a timestamp in /vservers/<>.timestamp
651 # cannot be inside the vserver, that causes vserver .. build to cough
652 def plcvm_timestamp(self):
653 "Create a timestamp to remember creation date for this plc"
654 now = int(time.time())
655 # TODO-lxc check this one
656 # a first approx. is to store the timestamp close to the VM root like vs does
657 stamp_path = self.vm_timestamp_path()
658 stamp_dir = os.path.dirname(stamp_path)
659 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
660 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
662 # this is called inconditionnally at the beginning of the test sequence
663 # just in case this is a rerun, so if the vm is not running it's fine
664 def plcvm_delete(self):
665 "vserver delete the test myplc"
666 stamp_path = self.vm_timestamp_path()
667 self.run_in_host("rm -f {}".format(stamp_path))
668 self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
669 self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
670 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
674 # historically the build was being fetched by the tests
675 # now the build pushes itself as a subdir of the tests workdir
676 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
677 def plcvm_create(self):
678 "vserver creation (no install done)"
679 # push the local build/ dir to the testplc box
681 # a full path for the local calls
682 build_dir = os.path.dirname(sys.argv[0])
683 # sometimes this is empty - set to "." in such a case
686 build_dir += "/build"
688 # use a standard name - will be relative to remote buildname
690 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
691 self.test_ssh.rmdir(build_dir)
692 self.test_ssh.copy(build_dir, recursive=True)
693 # the repo url is taken from arch-rpms-url
694 # with the last step (i386) removed
695 repo_url = self.options.arch_rpms_url
696 for level in [ 'arch' ]:
697 repo_url = os.path.dirname(repo_url)
699 # invoke initvm (drop support for vs)
700 script = "lbuild-initvm.sh"
702 # pass the vbuild-nightly options to [lv]test-initvm
703 script_options += " -p {}".format(self.options.personality)
704 script_options += " -d {}".format(self.options.pldistro)
705 script_options += " -f {}".format(self.options.fcdistro)
706 script_options += " -r {}".format(repo_url)
707 vserver_name = self.vservername
709 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
710 script_options += " -n {}".format(vserver_hostname)
712 print("Cannot reverse lookup {}".format(self.vserverip))
713 print("This is considered fatal, as this might pollute the test results")
715 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
716 return self.run_in_host(create_vserver) == 0
718 ### install django through pip
719 def django_install(self):
720 # plcapi requires Django, that is no longer provided py fedora as an rpm
721 # so we use pip instead
725 return self.pip_install('Django')
728 def plc_install(self):
730 yum install myplc, noderepo
734 if self.options.personality == "linux32":
736 elif self.options.personality == "linux64":
739 raise Exception("Unsupported personality {}".format(self.options.personality))
740 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
742 # check it's possible to install just 'myplc-core' first
743 if not self.dnf_install("myplc-core"):
747 pkgs_list.append("myplc")
748 pkgs_list.append("slicerepo-{}".format(nodefamily))
749 pkgs_list.append("noderepo-{}".format(nodefamily))
750 pkgs_string=" ".join(pkgs_list)
751 return self.dnf_install(pkgs_list)
753 def install_syslinux6(self):
755 install syslinux6 from the fedora21 release
757 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
760 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
761 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
762 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
764 # this can be done several times
765 self.run_in_guest("rpm --import {key}".format(**locals()))
766 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
768 def bonding_builds(self):
770 list /etc/yum.repos.d on the myplc side
772 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
775 def bonding_nodes(self):
777 List nodes known to the myplc together with their nodefamiliy
779 print("---------------------------------------- nodes")
780 for node in self.apiserver.GetNodes(self.auth_root()):
781 print("{} -> {}".format(node['hostname'],
782 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
783 print("---------------------------------------- nodes")
787 def mod_python(self):
788 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
789 return self.dnf_install( ['mod_python'] )
792 def plc_configure(self):
794 tmpname = '{}.plc-config-tty'.format(self.name())
795 with open(tmpname,'w') as fileconf:
796 for var, value in self.plc_spec['settings'].items():
797 fileconf.write('e {}\n{}\n'.format(var, value))
798 fileconf.write('w\n')
799 fileconf.write('q\n')
800 utils.system('cat {}'.format(tmpname))
801 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
802 utils.system('rm {}'.format(tmpname))
805 # care only about f>=27
806 def start_stop_systemd(self, service, start_or_stop):
807 "utility to start/stop a systemd-defined service (sfa)"
808 return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
811 "start plc through systemclt"
812 return self.start_stop_systemd('plc', 'start')
815 "stop plc through systemctl"
816 return self.start_stop_systemd('plc', 'stop')
818 def plcvm_start(self):
819 "start the PLC vserver"
823 def plcvm_stop(self):
824 "stop the PLC vserver"
828 # stores the keys from the config for further use
829 def keys_store(self):
830 "stores test users ssh keys in keys/"
831 for key_spec in self.plc_spec['keys']:
832 TestKey(self,key_spec).store_key()
835 def keys_clean(self):
836 "removes keys cached in keys/"
837 utils.system("rm -rf ./keys")
840 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
841 # for later direct access to the nodes
842 def keys_fetch(self):
843 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
845 if not os.path.isdir(dir):
847 vservername = self.vservername
848 vm_root = self.vm_root_in_host()
850 prefix = 'debug_ssh_key'
851 for ext in ['pub', 'rsa'] :
852 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
853 dst = "keys/{vservername}-debug.{ext}".format(**locals())
854 if self.test_ssh.fetch(src, dst) != 0:
859 "create sites with PLCAPI"
860 return self.do_sites()
862 def delete_sites(self):
863 "delete sites with PLCAPI"
864 return self.do_sites(action="delete")
866 def do_sites(self, action="add"):
867 for site_spec in self.plc_spec['sites']:
868 test_site = TestSite(self,site_spec)
869 if (action != "add"):
870 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
871 test_site.delete_site()
872 # deleted with the site
873 #test_site.delete_users()
876 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
877 test_site.create_site()
878 test_site.create_users()
881 def delete_all_sites(self):
882 "Delete all sites in PLC, and related objects"
883 print('auth_root', self.auth_root())
884 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
886 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
887 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
889 site_id = site['site_id']
890 print('Deleting site_id', site_id)
891 self.apiserver.DeleteSite(self.auth_root(), site_id)
895 "create nodes with PLCAPI"
896 return self.do_nodes()
897 def delete_nodes(self):
898 "delete nodes with PLCAPI"
899 return self.do_nodes(action="delete")
901 def do_nodes(self, action="add"):
902 for site_spec in self.plc_spec['sites']:
903 test_site = TestSite(self, site_spec)
905 utils.header("Deleting nodes in site {}".format(test_site.name()))
906 for node_spec in site_spec['nodes']:
907 test_node = TestNode(self, test_site, node_spec)
908 utils.header("Deleting {}".format(test_node.name()))
909 test_node.delete_node()
911 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
912 for node_spec in site_spec['nodes']:
913 utils.pprint('Creating node {}'.format(node_spec), node_spec)
914 test_node = TestNode(self, test_site, node_spec)
915 test_node.create_node()
918 def nodegroups(self):
919 "create nodegroups with PLCAPI"
920 return self.do_nodegroups("add")
921 def delete_nodegroups(self):
922 "delete nodegroups with PLCAPI"
923 return self.do_nodegroups("delete")
927 def translate_timestamp(start, grain, timestamp):
928 if timestamp < TestPlc.YEAR:
929 return start + timestamp*grain
934 def timestamp_printable(timestamp):
935 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
938 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
939 now = int(time.time())
940 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
941 print('API answered grain=', grain)
942 start = (now//grain)*grain
944 # find out all nodes that are reservable
945 nodes = self.all_reservable_nodenames()
947 utils.header("No reservable node found - proceeding without leases")
950 # attach them to the leases as specified in plc_specs
951 # this is where the 'leases' field gets interpreted as relative of absolute
952 for lease_spec in self.plc_spec['leases']:
953 # skip the ones that come with a null slice id
954 if not lease_spec['slice']:
956 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
957 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
958 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
959 lease_spec['t_from'], lease_spec['t_until'])
960 if lease_addition['errors']:
961 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
964 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
965 .format(nodes, lease_spec['slice'],
966 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
967 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
971 def delete_leases(self):
972 "remove all leases in the myplc side"
973 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
974 utils.header("Cleaning leases {}".format(lease_ids))
975 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
978 def list_leases(self):
979 "list all leases known to the myplc"
980 leases = self.apiserver.GetLeases(self.auth_root())
981 now = int(time.time())
983 current = l['t_until'] >= now
984 if self.options.verbose or current:
985 utils.header("{} {} from {} until {}"\
986 .format(l['hostname'], l['name'],
987 TestPlc.timestamp_printable(l['t_from']),
988 TestPlc.timestamp_printable(l['t_until'])))
991 # create nodegroups if needed, and populate
992 def do_nodegroups(self, action="add"):
993 # 1st pass to scan contents
995 for site_spec in self.plc_spec['sites']:
996 test_site = TestSite(self,site_spec)
997 for node_spec in site_spec['nodes']:
998 test_node = TestNode(self, test_site, node_spec)
999 if 'nodegroups' in node_spec:
1000 nodegroupnames = node_spec['nodegroups']
1001 if isinstance(nodegroupnames, str):
1002 nodegroupnames = [ nodegroupnames ]
1003 for nodegroupname in nodegroupnames:
1004 if nodegroupname not in groups_dict:
1005 groups_dict[nodegroupname] = []
1006 groups_dict[nodegroupname].append(test_node.name())
1007 auth = self.auth_root()
1009 for (nodegroupname,group_nodes) in groups_dict.items():
1011 print('nodegroups:', 'dealing with nodegroup',\
1012 nodegroupname, 'on nodes', group_nodes)
1013 # first, check if the nodetagtype is here
1014 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1016 tag_type_id = tag_types[0]['tag_type_id']
1018 tag_type_id = self.apiserver.AddTagType(auth,
1019 {'tagname' : nodegroupname,
1020 'description' : 'for nodegroup {}'.format(nodegroupname),
1021 'category' : 'test'})
1022 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1024 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1026 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1027 print('created nodegroup', nodegroupname, \
1028 'from tagname', nodegroupname, 'and value', 'yes')
1029 # set node tag on all nodes, value='yes'
1030 for nodename in group_nodes:
1032 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1034 traceback.print_exc()
1035 print('node', nodename, 'seems to already have tag', nodegroupname)
1038 expect_yes = self.apiserver.GetNodeTags(auth,
1039 {'hostname' : nodename,
1040 'tagname' : nodegroupname},
1041 ['value'])[0]['value']
1042 if expect_yes != "yes":
1043 print('Mismatch node tag on node',nodename,'got',expect_yes)
1046 if not self.options.dry_run:
1047 print('Cannot find tag', nodegroupname, 'on node', nodename)
1051 print('cleaning nodegroup', nodegroupname)
1052 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1054 traceback.print_exc()
1058 # a list of TestNode objs
1059 def all_nodes(self):
1061 for site_spec in self.plc_spec['sites']:
1062 test_site = TestSite(self,site_spec)
1063 for node_spec in site_spec['nodes']:
1064 nodes.append(TestNode(self, test_site, node_spec))
1067 # return a list of tuples (nodename,qemuname)
1068 def all_node_infos(self) :
1070 for site_spec in self.plc_spec['sites']:
1071 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1072 for node_spec in site_spec['nodes'] ]
1075 def all_nodenames(self):
1076 return [ x[0] for x in self.all_node_infos() ]
1077 def all_reservable_nodenames(self):
1079 for site_spec in self.plc_spec['sites']:
1080 for node_spec in site_spec['nodes']:
1081 node_fields = node_spec['node_fields']
1082 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1083 res.append(node_fields['hostname'])
1086 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1087 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1088 silent_minutes, period_seconds = 15):
1089 if self.options.dry_run:
1093 class CompleterTaskBootState(CompleterTask):
1094 def __init__(self, test_plc, hostname):
1095 self.test_plc = test_plc
1096 self.hostname = hostname
1097 self.last_boot_state = 'undef'
1098 def actual_run(self):
1100 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1103 self.last_boot_state = node['boot_state']
1104 return self.last_boot_state == target_boot_state
1108 return "CompleterTaskBootState with node {}".format(self.hostname)
1109 def failure_epilogue(self):
1110 print("node {} in state {} - expected {}"\
1111 .format(self.hostname, self.last_boot_state, target_boot_state))
1113 timeout = timedelta(minutes=timeout_minutes)
1114 graceout = timedelta(minutes=silent_minutes)
1115 period = timedelta(seconds=period_seconds)
1116 # the nodes that haven't checked yet - start with a full list and shrink over time
1117 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1118 tasks = [ CompleterTaskBootState(self,hostname) \
1119 for (hostname,_) in self.all_node_infos() ]
1120 message = 'check_boot_state={}'.format(target_boot_state)
1121 return Completer(tasks, message=message).run(timeout, graceout, period)
1123 def nodes_booted(self):
1124 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1126 def probe_kvm_iptables(self):
1127 (_,kvmbox) = self.all_node_infos()[0]
1128 TestSsh(kvmbox).run("iptables-save")
1132 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1133 class CompleterTaskPingNode(CompleterTask):
1134 def __init__(self, hostname):
1135 self.hostname = hostname
1136 def run(self, silent):
1137 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1138 return utils.system(command, silent=silent) == 0
1139 def failure_epilogue(self):
1140 print("Cannot ping node with name {}".format(self.hostname))
1141 timeout = timedelta(seconds = timeout_seconds)
1143 period = timedelta(seconds = period_seconds)
1144 node_infos = self.all_node_infos()
1145 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1146 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1148 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1149 def ping_node(self):
1151 return self.check_nodes_ping()
1153 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1155 timeout = timedelta(minutes=timeout_minutes)
1156 graceout = timedelta(minutes=silent_minutes)
1157 period = timedelta(seconds=period_seconds)
1158 vservername = self.vservername
1161 completer_message = 'ssh_node_debug'
1162 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1165 completer_message = 'ssh_node_boot'
1166 local_key = "keys/key_admin.rsa"
1167 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1168 node_infos = self.all_node_infos()
1169 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1170 boot_state=message, dry_run=self.options.dry_run) \
1171 for (nodename, qemuname) in node_infos ]
1172 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1174 def ssh_node_debug(self):
1175 "Tries to ssh into nodes in debug mode with the debug ssh key"
1176 return self.check_nodes_ssh(debug = True,
1177 timeout_minutes = self.ssh_node_debug_timeout,
1178 silent_minutes = self.ssh_node_debug_silent)
1180 def ssh_node_boot(self):
1181 "Tries to ssh into nodes in production mode with the root ssh key"
1182 return self.check_nodes_ssh(debug = False,
1183 timeout_minutes = self.ssh_node_boot_timeout,
1184 silent_minutes = self.ssh_node_boot_silent)
1186 def node_bmlogs(self):
1187 "Checks that there's a non-empty dir. /var/log/bm/raw"
1188 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1191 def qemu_local_init(self): pass
1193 def bootcd(self): pass
1195 def qemu_local_config(self): pass
1197 def qemu_export(self): pass
1199 def qemu_cleanlog(self): pass
1201 def nodestate_reinstall(self): pass
1203 def nodestate_upgrade(self): pass
1205 def nodestate_safeboot(self): pass
1207 def nodestate_boot(self): pass
1209 def nodestate_show(self): pass
1211 def nodedistro_f14(self): pass
1213 def nodedistro_f18(self): pass
1215 def nodedistro_f20(self): pass
1217 def nodedistro_f21(self): pass
1219 def nodedistro_f22(self): pass
1221 def nodedistro_show(self): pass
1223 ### check hooks : invoke scripts from hooks/{node,slice}
1224 def check_hooks_node(self):
1225 return self.locate_first_node().check_hooks()
1226 def check_hooks_sliver(self) :
1227 return self.locate_first_sliver().check_hooks()
1229 def check_hooks(self):
1230 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1231 return self.check_hooks_node() and self.check_hooks_sliver()
1234 def do_check_initscripts(self):
1235 class CompleterTaskInitscript(CompleterTask):
1236 def __init__(self, test_sliver, stamp):
1237 self.test_sliver = test_sliver
1239 def actual_run(self):
1240 return self.test_sliver.check_initscript_stamp(self.stamp)
1242 return "initscript checker for {}".format(self.test_sliver.name())
1243 def failure_epilogue(self):
1244 print("initscript stamp {} not found in sliver {}"\
1245 .format(self.stamp, self.test_sliver.name()))
1248 for slice_spec in self.plc_spec['slices']:
1249 if 'initscriptstamp' not in slice_spec:
1251 stamp = slice_spec['initscriptstamp']
1252 slicename = slice_spec['slice_fields']['name']
1253 for nodename in slice_spec['nodenames']:
1254 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1255 site,node = self.locate_node(nodename)
1256 # xxx - passing the wrong site - probably harmless
1257 test_site = TestSite(self, site)
1258 test_slice = TestSlice(self, test_site, slice_spec)
1259 test_node = TestNode(self, test_site, node)
1260 test_sliver = TestSliver(self, test_node, test_slice)
1261 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1262 return Completer(tasks, message='check_initscripts').\
1263 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1265 def check_initscripts(self):
1266 "check that the initscripts have triggered"
1267 return self.do_check_initscripts()
1269 def initscripts(self):
1270 "create initscripts with PLCAPI"
1271 for initscript in self.plc_spec['initscripts']:
1272 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1273 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1276 def delete_initscripts(self):
1277 "delete initscripts with PLCAPI"
1278 for initscript in self.plc_spec['initscripts']:
1279 initscript_name = initscript['initscript_fields']['name']
1280 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1282 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1283 print(initscript_name, 'deleted')
1285 print('deletion went wrong - probably did not exist')
1290 "create slices with PLCAPI"
1291 return self.do_slices(action="add")
1293 def delete_slices(self):
1294 "delete slices with PLCAPI"
1295 return self.do_slices(action="delete")
1297 def fill_slices(self):
1298 "add nodes in slices with PLCAPI"
1299 return self.do_slices(action="fill")
1301 def empty_slices(self):
1302 "remove nodes from slices with PLCAPI"
1303 return self.do_slices(action="empty")
1305 def do_slices(self, action="add"):
1306 for slice in self.plc_spec['slices']:
1307 site_spec = self.locate_site(slice['sitename'])
1308 test_site = TestSite(self,site_spec)
1309 test_slice=TestSlice(self,test_site,slice)
1310 if action == "delete":
1311 test_slice.delete_slice()
1312 elif action == "fill":
1313 test_slice.add_nodes()
1314 elif action == "empty":
1315 test_slice.delete_nodes()
1317 test_slice.create_slice()
1320 @slice_mapper__tasks(20, 10, 15)
1321 def ssh_slice(self): pass
1322 @slice_mapper__tasks(20, 19, 15)
1323 def ssh_slice_off(self): pass
1324 @slice_mapper__tasks(1, 1, 15)
1325 def slice_fs_present(self): pass
1326 @slice_mapper__tasks(1, 1, 15)
1327 def slice_fs_deleted(self): pass
1329 # use another name so we can exclude/ignore it from the tests on the nightly command line
1330 def ssh_slice_again(self): return self.ssh_slice()
1331 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1332 # but for some reason the ignore-wrapping thing would not
1335 def ssh_slice_basics(self): pass
1337 def check_vsys_defaults(self): pass
1340 def keys_clear_known_hosts(self): pass
1342 def plcapi_urls(self):
1344 attempts to reach the PLCAPI with various forms for the URL
1346 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1348 def speed_up_slices(self):
1349 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1350 return self._speed_up_slices (30, 10)
1351 def super_speed_up_slices(self):
1352 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1353 return self._speed_up_slices(5, 1)
1355 def _speed_up_slices(self, p, r):
1356 # create the template on the server-side
1357 template = "{}.nodemanager".format(self.name())
1358 with open(template,"w") as template_file:
1359 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1360 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1361 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1362 self.test_ssh.copy_abs(template, remote)
1364 if not self.apiserver.GetConfFiles(self.auth_root(),
1365 {'dest' : '/etc/sysconfig/nodemanager'}):
1366 self.apiserver.AddConfFile(self.auth_root(),
1367 {'dest' : '/etc/sysconfig/nodemanager',
1368 'source' : 'PlanetLabConf/nodemanager',
1369 'postinstall_cmd' : 'service nm restart',})
1372 def debug_nodemanager(self):
1373 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1374 template = "{}.nodemanager".format(self.name())
1375 with open(template,"w") as template_file:
1376 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1377 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1378 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1379 self.test_ssh.copy_abs(template, remote)
1383 def qemu_start(self) : pass
1386 def qemu_timestamp(self) : pass
1389 def qemu_nodefamily(self): pass
1391 # when a spec refers to a node possibly on another plc
1392 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1393 for plc in [ self ] + other_plcs:
1395 return plc.locate_sliver_obj(nodename, slicename)
1398 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1400 # implement this one as a cross step so that we can take advantage of different nodes
1401 # in multi-plcs mode
1402 def cross_check_tcp(self, other_plcs):
1403 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1404 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1405 utils.header("check_tcp: no/empty config found")
1407 specs = self.plc_spec['tcp_specs']
1410 # first wait for the network to be up and ready from the slices
1411 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1412 def __init__(self, test_sliver):
1413 self.test_sliver = test_sliver
1414 def actual_run(self):
1415 return self.test_sliver.check_tcp_ready(port = 9999)
1417 return "network ready checker for {}".format(self.test_sliver.name())
1418 def failure_epilogue(self):
1419 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1423 managed_sliver_names = set()
1425 # locate the TestSliver instances involved, and cache them in the spec instance
1426 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1427 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1428 message = "Will check TCP between s={} and c={}"\
1429 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1430 if 'client_connect' in spec:
1431 message += " (using {})".format(spec['client_connect'])
1432 utils.header(message)
1433 # we need to check network presence in both slivers, but also
1434 # avoid to insert a sliver several times
1435 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1436 if sliver.name() not in managed_sliver_names:
1437 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1438 # add this sliver's name in the set
1439 managed_sliver_names .update( {sliver.name()} )
1441 # wait for the netork to be OK in all server sides
1442 if not Completer(tasks, message='check for network readiness in slivers').\
1443 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1446 # run server and client
1450 # the issue here is that we have the server run in background
1451 # and so we have no clue if it took off properly or not
1452 # looks like in some cases it does not
1453 address = spec['s_sliver'].test_node.name()
1454 if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1458 # idem for the client side
1459 # use nodename from located sliver, unless 'client_connect' is set
1460 if 'client_connect' in spec:
1461 destination = spec['client_connect']
1463 destination = spec['s_sliver'].test_node.name()
1464 if not spec['c_sliver'].run_tcp_client(destination, port):
1468 # painfully enough, we need to allow for some time as netflow might show up last
1469 def check_system_slice(self):
1470 "all nodes: check that a system slice is alive"
1471 # netflow currently not working in the lxc distro
1472 # drl not built at all in the wtx distro
1473 # if we find either of them we're happy
1474 return self.check_netflow() or self.check_drl()
1477 def check_netflow(self): return self._check_system_slice('netflow')
1478 def check_drl(self): return self._check_system_slice('drl')
1480 # we have the slices up already here, so it should not take too long
1481 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1482 class CompleterTaskSystemSlice(CompleterTask):
1483 def __init__(self, test_node, dry_run):
1484 self.test_node = test_node
1485 self.dry_run = dry_run
1486 def actual_run(self):
1487 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1489 return "System slice {} @ {}".format(slicename, self.test_node.name())
1490 def failure_epilogue(self):
1491 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1492 timeout = timedelta(minutes=timeout_minutes)
1493 silent = timedelta(0)
1494 period = timedelta(seconds=period_seconds)
1495 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1496 for test_node in self.all_nodes() ]
1497 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1499 def plcsh_stress_test(self):
1500 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1501 # install the stress-test in the plc image
1502 location = "/usr/share/plc_api/plcsh_stress_test.py"
1503 remote = "{}/{}".format(self.vm_root_in_host(), location)
1504 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1506 command += " -- --check"
1507 if self.options.size == 1:
1508 command += " --tiny"
1509 return self.run_in_guest(command) == 0
1511 # populate runs the same utility without slightly different options
1512 # in particular runs with --preserve (dont cleanup) and without --check
1513 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1515 def sfa_install_all(self):
1516 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1517 return (self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client") and
1518 self.run_in_guest("systemctl enable sfa-registry")==0 and
1519 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1521 def sfa_install_core(self):
1523 return self.dnf_install("sfa")
1525 def sfa_install_plc(self):
1526 "yum install sfa-plc"
1527 return self.dnf_install("sfa-plc")
1529 def sfa_install_sfatables(self):
1530 "yum install sfa-sfatables"
1531 return self.dnf_install("sfa-sfatables")
1533 # for some very odd reason, this sometimes fails with the following symptom
1534 # # yum install sfa-client
1535 # Setting up Install Process
1537 # Downloading Packages:
1538 # Running rpm_check_debug
1539 # Running Transaction Test
1540 # Transaction Test Succeeded
1541 # Running Transaction
1542 # Transaction couldn't start:
1543 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1544 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1545 # even though in the same context I have
1546 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1547 # Filesystem Size Used Avail Use% Mounted on
1548 # /dev/hdv1 806G 264G 501G 35% /
1549 # none 16M 36K 16M 1% /tmp
1551 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1552 def sfa_install_client(self):
1553 "yum install sfa-client"
1554 first_try = self.dnf_install("sfa-client")
1557 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1558 code, cached_rpm_path = \
1559 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1560 utils.header("rpm_path=<<{}>>".format(rpm_path))
1562 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1563 return self.dnf_check_installed("sfa-client")
1565 def sfa_dbclean(self):
1566 "thoroughly wipes off the SFA database"
1567 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1568 self.run_in_guest("sfa-nuke.py") == 0 or \
1569 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1570 self.run_in_guest("sfaadmin registry nuke") == 0
1572 def sfa_fsclean(self):
1573 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1574 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1577 def sfa_plcclean(self):
1578 "cleans the PLC entries that were created as a side effect of running the script"
1580 sfa_spec = self.plc_spec['sfa']
1582 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1583 login_base = auth_sfa_spec['login_base']
1585 self.apiserver.DeleteSite(self.auth_root(),login_base)
1587 print("Site {} already absent from PLC db".format(login_base))
1589 for spec_name in ['pi_spec','user_spec']:
1590 user_spec = auth_sfa_spec[spec_name]
1591 username = user_spec['email']
1593 self.apiserver.DeletePerson(self.auth_root(),username)
1595 # this in fact is expected as sites delete their members
1596 #print "User {} already absent from PLC db".format(username)
1599 print("REMEMBER TO RUN sfa_import AGAIN")
1602 def sfa_uninstall(self):
1603 "uses rpm to uninstall sfa - ignore result"
1604 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1605 self.run_in_guest("rm -rf /var/lib/sfa")
1606 self.run_in_guest("rm -rf /etc/sfa")
1607 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1609 self.run_in_guest("rpm -e --noscripts sfa-plc")
1612 ### run unit tests for SFA
1613 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1614 # Running Transaction
1615 # Transaction couldn't start:
1616 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1617 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1618 # no matter how many Gbs are available on the testplc
1619 # could not figure out what's wrong, so...
1620 # if the yum install phase fails, consider the test is successful
1621 # other combinations will eventually run it hopefully
1622 def sfa_utest(self):
1623 "dnf install sfa-tests and run SFA unittests"
1624 self.run_in_guest("dnf -y install sfa-tests")
1625 # failed to install - forget it
1626 if self.run_in_guest("rpm -q sfa-tests") != 0:
1627 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1629 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1633 dirname = "conf.{}".format(self.plc_spec['name'])
1634 if not os.path.isdir(dirname):
1635 utils.system("mkdir -p {}".format(dirname))
1636 if not os.path.isdir(dirname):
1637 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1640 def conffile(self, filename):
1641 return "{}/{}".format(self.confdir(), filename)
1642 def confsubdir(self, dirname, clean, dry_run=False):
1643 subdirname = "{}/{}".format(self.confdir(), dirname)
1645 utils.system("rm -rf {}".format(subdirname))
1646 if not os.path.isdir(subdirname):
1647 utils.system("mkdir -p {}".format(subdirname))
1648 if not dry_run and not os.path.isdir(subdirname):
1649 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1652 def conffile_clean(self, filename):
1653 filename=self.conffile(filename)
1654 return utils.system("rm -rf {}".format(filename))==0
1657 def sfa_configure(self):
1658 "run sfa-config-tty"
1659 tmpname = self.conffile("sfa-config-tty")
1660 with open(tmpname,'w') as fileconf:
1661 for var, value in self.plc_spec['sfa']['settings'].items():
1662 fileconf.write('e {}\n{}\n'.format(var, value))
1663 fileconf.write('w\n')
1664 fileconf.write('R\n')
1665 fileconf.write('q\n')
1666 utils.system('cat {}'.format(tmpname))
1667 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1670 def aggregate_xml_line(self):
1671 port = self.plc_spec['sfa']['neighbours-port']
1672 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1673 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1675 def registry_xml_line(self):
1676 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1677 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1680 # a cross step that takes all other plcs in argument
1681 def cross_sfa_configure(self, other_plcs):
1682 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1683 # of course with a single plc, other_plcs is an empty list
1686 agg_fname = self.conffile("agg.xml")
1687 with open(agg_fname,"w") as out:
1688 out.write("<aggregates>{}</aggregates>\n"\
1689 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1690 utils.header("(Over)wrote {}".format(agg_fname))
1691 reg_fname=self.conffile("reg.xml")
1692 with open(reg_fname,"w") as out:
1693 out.write("<registries>{}</registries>\n"\
1694 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1695 utils.header("(Over)wrote {}".format(reg_fname))
1696 return self.test_ssh.copy_abs(agg_fname,
1697 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1698 and self.test_ssh.copy_abs(reg_fname,
1699 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1701 def sfa_import(self):
1702 "use sfaadmin to import from plc"
1703 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1704 return self.run_in_guest('sfaadmin reg import_registry') == 0
1706 def sfa_start(self):
1707 "start SFA through systemctl"
1708 return (self.start_stop_systemd('sfa-registry', 'start') and
1709 self.start_stop_systemd('sfa-aggregate', 'start'))
1712 def sfi_configure(self):
1713 "Create /root/sfi on the plc side for sfi client configuration"
1714 if self.options.dry_run:
1715 utils.header("DRY RUN - skipping step")
1717 sfa_spec = self.plc_spec['sfa']
1718 # cannot use auth_sfa_mapper to pass dir_name
1719 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1720 test_slice = TestAuthSfa(self, slice_spec)
1721 dir_basename = os.path.basename(test_slice.sfi_path())
1722 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1723 clean=True, dry_run=self.options.dry_run)
1724 test_slice.sfi_configure(dir_name)
1725 # push into the remote /root/sfi area
1726 location = test_slice.sfi_path()
1727 remote = "{}/{}".format(self.vm_root_in_host(), location)
1728 self.test_ssh.mkdir(remote, abs=True)
1729 # need to strip last level or remote otherwise we get an extra dir level
1730 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1734 def sfi_clean(self):
1735 "clean up /root/sfi on the plc side"
1736 self.run_in_guest("rm -rf /root/sfi")
1739 def sfa_rspec_empty(self):
1740 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1741 filename = "empty-rspec.xml"
1743 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1744 test_slice = TestAuthSfa(self, slice_spec)
1745 in_vm = test_slice.sfi_path()
1746 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1747 if self.test_ssh.copy_abs(filename, remote) !=0:
1752 def sfa_register_site(self): pass
1754 def sfa_register_pi(self): pass
1756 def sfa_register_user(self): pass
1758 def sfa_update_user(self): pass
1760 def sfa_register_slice(self): pass
1762 def sfa_renew_slice(self): pass
1764 def sfa_get_expires(self): pass
1766 def sfa_discover(self): pass
1768 def sfa_rspec(self): pass
1770 def sfa_allocate(self): pass
1772 def sfa_allocate_empty(self): pass
1774 def sfa_provision(self): pass
1776 def sfa_provision_empty(self): pass
1778 def sfa_describe(self): pass
1780 def sfa_check_slice_plc(self): pass
1782 def sfa_check_slice_plc_empty(self): pass
1784 def sfa_update_slice(self): pass
1786 def sfa_remove_user_from_slice(self): pass
1788 def sfa_insert_user_in_slice(self): pass
1790 def sfi_list(self): pass
1792 def sfi_show_site(self): pass
1794 def sfi_show_slice(self): pass
1796 def sfi_show_slice_researchers(self): pass
1798 def ssh_slice_sfa(self): pass
1800 def sfa_delete_user(self): pass
1802 def sfa_delete_slice(self): pass
1805 "stop sfa through systemclt"
1806 return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1807 self.start_stop_systemd('sfa-registry', 'stop'))
1810 "creates random entries in the PLCAPI"
1811 # install the stress-test in the plc image
1812 location = "/usr/share/plc_api/plcsh_stress_test.py"
1813 remote = "{}/{}".format(self.vm_root_in_host(), location)
1814 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1816 command += " -- --preserve --short-names"
1817 local = (self.run_in_guest(command) == 0);
1818 # second run with --foreign
1819 command += ' --foreign'
1820 remote = (self.run_in_guest(command) == 0);
1821 return local and remote
1824 ####################
1826 def bonding_init_partial(self): pass
1829 def bonding_add_yum(self): pass
1832 def bonding_install_rpms(self): pass
1834 ####################
1836 def gather_logs(self):
1837 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1838 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1839 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1840 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1841 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1842 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1843 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1845 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1846 self.gather_var_logs()
1848 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1849 self.gather_pgsql_logs()
1851 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1852 self.gather_root_sfi()
1854 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1855 for site_spec in self.plc_spec['sites']:
1856 test_site = TestSite(self,site_spec)
1857 for node_spec in site_spec['nodes']:
1858 test_node = TestNode(self, test_site, node_spec)
1859 test_node.gather_qemu_logs()
1861 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1862 self.gather_nodes_var_logs()
1864 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1865 self.gather_slivers_var_logs()
1868 def gather_slivers_var_logs(self):
1869 for test_sliver in self.all_sliver_objs():
1870 remote = test_sliver.tar_var_logs()
1871 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1872 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1873 utils.system(command)
1876 def gather_var_logs(self):
1877 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1878 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1879 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1880 utils.system(command)
1881 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1882 utils.system(command)
1884 def gather_pgsql_logs(self):
1885 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1886 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1887 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1888 utils.system(command)
1890 def gather_root_sfi(self):
1891 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1892 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1893 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1894 utils.system(command)
1896 def gather_nodes_var_logs(self):
1897 for site_spec in self.plc_spec['sites']:
1898 test_site = TestSite(self, site_spec)
1899 for node_spec in site_spec['nodes']:
1900 test_node = TestNode(self, test_site, node_spec)
1901 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1902 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1903 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1904 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1905 utils.system(command)
1908 # returns the filename to use for sql dump/restore, using options.dbname if set
1909 def dbfile(self, database):
1910 # uses options.dbname if it is found
1912 name = self.options.dbname
1913 if not isinstance(name, str):
1919 return "/root/{}-{}.sql".format(database, name)
1921 def plc_db_dump(self):
1922 'dump the planetlab5 DB in /root in the PLC - filename has time'
1923 dump=self.dbfile("planetab5")
1924 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1925 utils.header('Dumped planetlab5 database in {}'.format(dump))
1928 def plc_db_restore(self):
1929 'restore the planetlab5 DB - looks broken, but run -n might help'
1930 dump = self.dbfile("planetab5")
1931 self.run_in_guest('systemctl stop httpd')
1932 # xxx - need another wrapper
1933 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1934 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1935 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1936 ##starting httpd service
1937 self.run_in_guest('systemctl start httpd')
1939 utils.header('Database restored from ' + dump)
1942 def create_ignore_steps():
1943 for step in TestPlc.default_steps + TestPlc.other_steps:
1944 # default step can have a plc qualifier
1946 step, qualifier = step.split('@')
1947 # or be defined as forced or ignored by default
1948 for keyword in ['_ignore','_force']:
1949 if step.endswith(keyword):
1950 step=step.replace(keyword,'')
1951 if step == SEP or step == SEPSFA :
1953 method = getattr(TestPlc,step)
1954 name = step + '_ignore'
1955 wrapped = ignore_result(method)
1956 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1957 setattr(TestPlc, name, wrapped)
1960 # def ssh_slice_again_ignore (self): pass
1962 # def check_initscripts_ignore (self): pass
1964 def standby_1_through_20(self):
1965 """convenience function to wait for a specified number of minutes"""
1968 def standby_1(): pass
1970 def standby_2(): pass
1972 def standby_3(): pass
1974 def standby_4(): pass
1976 def standby_5(): pass
1978 def standby_6(): pass
1980 def standby_7(): pass
1982 def standby_8(): pass
1984 def standby_9(): pass
1986 def standby_10(): pass
1988 def standby_11(): pass
1990 def standby_12(): pass
1992 def standby_13(): pass
1994 def standby_14(): pass
1996 def standby_15(): pass
1998 def standby_16(): pass
2000 def standby_17(): pass
2002 def standby_18(): pass
2004 def standby_19(): pass
2006 def standby_20(): pass
2008 # convenience for debugging the test logic
2009 def yes(self): return True
2010 def no(self): return False
2011 def fail(self): return False