1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls','speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
166 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
167 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
168 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
169 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
170 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
171 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
172 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
173 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
174 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
175 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
176 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
177 # but as the stress test might take a while, we sometimes missed the debug mode..
178 'probe_kvm_iptables',
179 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
180 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
181 'ssh_slice_sfa@1', SEPSFA,
182 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
183 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
184 'check_system_slice', SEP,
185 # for inspecting the slice while it runs the first time
187 # check slices are turned off properly
189 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
190 # check they are properly re-created with the same name
191 'fill_slices', 'ssh_slice_again', SEP,
192 'gather_logs_force', SEP,
195 'export', 'show_boxes', 'super_speed_up_slices', SEP,
196 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
197 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
198 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
199 'delete_leases', 'list_leases', SEP,
201 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
202 'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
203 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
204 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
205 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
206 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
207 'sfa_get_expires', SEPSFA,
208 'plc_db_dump', 'plc_db_restore', SEP,
209 'check_netflow', 'check_drl', SEP,
210 # used to be part of default steps but won't work since f27
212 'slice_fs_present', 'check_initscripts', SEP,
213 'standby_1_through_20','yes','no',SEP,
214 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
216 default_bonding_steps = [
217 'bonding_init_partial',
219 'bonding_install_rpms', SEP,
223 def printable_steps(list):
224 single_line = " ".join(list) + " "
225 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
227 def valid_step(step):
228 return step != SEP and step != SEPSFA
230 # turn off the sfa-related steps when build has skipped SFA
231 # this was originally for centos5 but is still valid
232 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
234 def _has_sfa_cached(rpms_url):
235 if os.path.isfile(has_sfa_cache_filename):
236 with open(has_sfa_cache_filename) as cache:
237 cached = cache.read() == "yes"
238 utils.header("build provides SFA (cached):{}".format(cached))
240 # warning, we're now building 'sface' so let's be a bit more picky
241 # full builds are expected to return with 0 here
242 utils.header("Checking if build provides SFA package...")
243 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
244 encoded = 'yes' if retcod else 'no'
245 with open(has_sfa_cache_filename,'w') as cache:
250 def check_whether_build_has_sfa(rpms_url):
251 has_sfa = TestPlc._has_sfa_cached(rpms_url)
253 utils.header("build does provide SFA")
255 # move all steps containing 'sfa' from default_steps to other_steps
256 utils.header("SFA package not found - removing steps with sfa or sfi")
257 sfa_steps = [ step for step in TestPlc.default_steps
258 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
259 TestPlc.other_steps += sfa_steps
260 for step in sfa_steps:
261 TestPlc.default_steps.remove(step)
263 def __init__(self, plc_spec, options):
264 self.plc_spec = plc_spec
265 self.options = options
266 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
267 self.vserverip = plc_spec['vserverip']
268 self.vservername = plc_spec['vservername']
269 self.vplchostname = self.vservername.split('-')[-1]
270 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
271 self.apiserver = TestApiserver(self.url, options.dry_run)
272 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
273 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
275 def has_addresses_api(self):
276 return self.apiserver.has_method('AddIpAddress')
279 name = self.plc_spec['name']
280 return "{}.{}".format(name,self.vservername)
283 return self.plc_spec['host_box']
286 return self.test_ssh.is_local()
288 # define the API methods on this object through xmlrpc
289 # would help, but not strictly necessary
293 def actual_command_in_guest(self,command, backslash=False):
294 raw1 = self.host_to_guest(command)
295 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
298 def start_guest(self):
299 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
300 dry_run=self.options.dry_run))
302 def stop_guest(self):
303 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
304 dry_run=self.options.dry_run))
306 def run_in_guest(self, command, backslash=False):
307 raw = self.actual_command_in_guest(command, backslash)
308 return utils.system(raw)
310 def run_in_host(self,command):
311 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
313 # backslashing turned out so awful at some point that I've turned off auto-backslashing
314 # see e.g. plc_start esp. the version for f14
315 #command gets run in the plc's vm
316 def host_to_guest(self, command):
317 ssh_leg = TestSsh(self.vplchostname)
318 return ssh_leg.actual_command(command, keep_stdin=True)
320 # this /vservers thing is legacy...
321 def vm_root_in_host(self):
322 return "/vservers/{}/".format(self.vservername)
324 def vm_timestamp_path(self):
325 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
327 #start/stop the vserver
328 def start_guest_in_host(self):
329 return "virsh -c lxc:/// start {}".format(self.vservername)
331 def stop_guest_in_host(self):
332 return "virsh -c lxc:/// destroy {}".format(self.vservername)
335 def run_in_guest_piped(self,local,remote):
336 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
339 def dnf_check_installed(self, rpms):
340 if isinstance(rpms, list):
342 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
344 # does a yum install in the vs, ignore yum retcod, check with rpm
345 def dnf_install(self, rpms):
346 if isinstance(rpms, list):
348 yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
350 self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
351 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
352 # nothing similar with dnf, forget about this for now
353 # self.run_in_guest("yum-complete-transaction -y")
354 return self.dnf_check_installed(rpms)
356 def pip_install(self, package):
357 return self.run_in_guest("pip install {}".format(package)) == 0
360 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
361 'AuthMethod' : 'password',
362 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
363 'Role' : self.plc_spec['role'],
366 def locate_site(self,sitename):
367 for site in self.plc_spec['sites']:
368 if site['site_fields']['name'] == sitename:
370 if site['site_fields']['login_base'] == sitename:
372 raise Exception("Cannot locate site {}".format(sitename))
374 def locate_node(self, nodename):
375 for site in self.plc_spec['sites']:
376 for node in site['nodes']:
377 if node['name'] == nodename:
379 raise Exception("Cannot locate node {}".format(nodename))
381 def locate_hostname(self, hostname):
382 for site in self.plc_spec['sites']:
383 for node in site['nodes']:
384 if node['node_fields']['hostname'] == hostname:
386 raise Exception("Cannot locate hostname {}".format(hostname))
388 def locate_key(self, key_name):
389 for key in self.plc_spec['keys']:
390 if key['key_name'] == key_name:
392 raise Exception("Cannot locate key {}".format(key_name))
394 def locate_private_key_from_key_names(self, key_names):
395 # locate the first avail. key
397 for key_name in key_names:
398 key_spec = self.locate_key(key_name)
399 test_key = TestKey(self,key_spec)
400 publickey = test_key.publicpath()
401 privatekey = test_key.privatepath()
402 if os.path.isfile(publickey) and os.path.isfile(privatekey):
409 def locate_slice(self, slicename):
410 for slice in self.plc_spec['slices']:
411 if slice['slice_fields']['name'] == slicename:
413 raise Exception("Cannot locate slice {}".format(slicename))
415 def all_sliver_objs(self):
417 for slice_spec in self.plc_spec['slices']:
418 slicename = slice_spec['slice_fields']['name']
419 for nodename in slice_spec['nodenames']:
420 result.append(self.locate_sliver_obj(nodename, slicename))
423 def locate_sliver_obj(self, nodename, slicename):
424 site,node = self.locate_node(nodename)
425 slice = self.locate_slice(slicename)
427 test_site = TestSite(self, site)
428 test_node = TestNode(self, test_site, node)
429 # xxx the slice site is assumed to be the node site - mhh - probably harmless
430 test_slice = TestSlice(self, test_site, slice)
431 return TestSliver(self, test_node, test_slice)
433 def locate_first_node(self):
434 nodename = self.plc_spec['slices'][0]['nodenames'][0]
435 site,node = self.locate_node(nodename)
436 test_site = TestSite(self, site)
437 test_node = TestNode(self, test_site, node)
440 def locate_first_sliver(self):
441 slice_spec = self.plc_spec['slices'][0]
442 slicename = slice_spec['slice_fields']['name']
443 nodename = slice_spec['nodenames'][0]
444 return self.locate_sliver_obj(nodename,slicename)
446 # all different hostboxes used in this plc
447 def get_BoxNodes(self):
448 # maps on sites and nodes, return [ (host_box,test_node) ]
450 for site_spec in self.plc_spec['sites']:
451 test_site = TestSite(self,site_spec)
452 for node_spec in site_spec['nodes']:
453 test_node = TestNode(self, test_site, node_spec)
454 if not test_node.is_real():
455 tuples.append( (test_node.host_box(),test_node) )
456 # transform into a dict { 'host_box' -> [ test_node .. ] }
458 for (box,node) in tuples:
459 if box not in result:
462 result[box].append(node)
465 # a step for checking this stuff
466 def show_boxes(self):
467 'print summary of nodes location'
468 for box,nodes in self.get_BoxNodes().items():
469 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
472 # make this a valid step
473 def qemu_kill_all(self):
474 'kill all qemu instances on the qemu boxes involved by this setup'
475 # this is the brute force version, kill all qemus on that host box
476 for (box,nodes) in self.get_BoxNodes().items():
477 # pass the first nodename, as we don't push template-qemu on testboxes
478 nodedir = nodes[0].nodedir()
479 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
482 # make this a valid step
483 def qemu_list_all(self):
484 'list all qemu instances on the qemu boxes involved by this setup'
485 for box,nodes in self.get_BoxNodes().items():
486 # this is the brute force version, kill all qemus on that host box
487 TestBoxQemu(box, self.options.buildname).qemu_list_all()
490 # kill only the qemus related to this test
491 def qemu_list_mine(self):
492 'list qemu instances for our nodes'
493 for (box,nodes) in self.get_BoxNodes().items():
494 # the fine-grain version
499 # kill only the qemus related to this test
500 def qemu_clean_mine(self):
501 'cleanup (rm -rf) qemu instances for our nodes'
502 for box,nodes in self.get_BoxNodes().items():
503 # the fine-grain version
508 # kill only the right qemus
509 def qemu_kill_mine(self):
510 'kill the qemu instances for our nodes'
511 for box,nodes in self.get_BoxNodes().items():
512 # the fine-grain version
517 #################### display config
519 "show test configuration after localization"
524 # uggly hack to make sure 'run export' only reports about the 1st plc
525 # to avoid confusion - also we use 'inri_slice1' in various aliases..
528 "print cut'n paste-able stuff to export env variables to your shell"
529 # guess local domain from hostname
530 if TestPlc.exported_id > 1:
531 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
533 TestPlc.exported_id += 1
534 domain = socket.gethostname().split('.',1)[1]
535 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
536 print("export BUILD={}".format(self.options.buildname))
537 print("export PLCHOSTLXC={}".format(fqdn))
538 print("export GUESTNAME={}".format(self.vservername))
539 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
540 # find hostname of first node
541 hostname, qemubox = self.all_node_infos()[0]
542 print("export KVMHOST={}.{}".format(qemubox, domain))
543 print("export NODE={}".format(hostname))
547 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
548 def show_pass(self, passno):
549 for (key,val) in self.plc_spec.items():
550 if not self.options.verbose and key not in TestPlc.always_display_keys:
555 self.display_site_spec(site)
556 for node in site['nodes']:
557 self.display_node_spec(node)
558 elif key == 'initscripts':
559 for initscript in val:
560 self.display_initscript_spec(initscript)
561 elif key == 'slices':
563 self.display_slice_spec(slice)
566 self.display_key_spec(key)
568 if key not in ['sites', 'initscripts', 'slices', 'keys']:
569 print('+ ', key, ':', val)
571 def display_site_spec(self, site):
572 print('+ ======== site', site['site_fields']['name'])
573 for k,v in site.items():
574 if not self.options.verbose and k not in TestPlc.always_display_keys:
578 print('+ ','nodes : ', end=' ')
580 print(node['node_fields']['hostname'],'', end=' ')
584 print('+ users : ', end=' ')
586 print(user['name'],'', end=' ')
588 elif k == 'site_fields':
589 print('+ login_base', ':', v['login_base'])
590 elif k == 'address_fields':
596 def display_initscript_spec(self, initscript):
597 print('+ ======== initscript', initscript['initscript_fields']['name'])
599 def display_key_spec(self, key):
600 print('+ ======== key', key['key_name'])
602 def display_slice_spec(self, slice):
603 print('+ ======== slice', slice['slice_fields']['name'])
604 for k,v in slice.items():
607 print('+ nodes : ', end=' ')
609 print(nodename,'', end=' ')
611 elif k == 'usernames':
613 print('+ users : ', end=' ')
615 print(username,'', end=' ')
617 elif k == 'slice_fields':
618 print('+ fields',':', end=' ')
619 print('max_nodes=',v['max_nodes'], end=' ')
624 def display_node_spec(self, node):
625 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
626 print("hostname=", node['node_fields']['hostname'], end=' ')
627 print("ip=", node['interface_fields']['ip'])
628 if self.options.verbose:
629 utils.pprint("node details", node, depth=3)
631 # another entry point for just showing the boxes involved
632 def display_mapping(self):
633 TestPlc.display_mapping_plc(self.plc_spec)
637 def display_mapping_plc(plc_spec):
638 print('+ MyPLC',plc_spec['name'])
639 # WARNING this would not be right for lxc-based PLC's - should be harmless though
640 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
641 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
642 for site_spec in plc_spec['sites']:
643 for node_spec in site_spec['nodes']:
644 TestPlc.display_mapping_node(node_spec)
647 def display_mapping_node(node_spec):
648 print('+ NODE {}'.format(node_spec['name']))
649 print('+\tqemu box {}'.format(node_spec['host_box']))
650 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
652 # write a timestamp in /vservers/<>.timestamp
653 # cannot be inside the vserver, that causes vserver .. build to cough
654 def plcvm_timestamp(self):
655 "Create a timestamp to remember creation date for this plc"
656 now = int(time.time())
657 # TODO-lxc check this one
658 # a first approx. is to store the timestamp close to the VM root like vs does
659 stamp_path = self.vm_timestamp_path()
660 stamp_dir = os.path.dirname(stamp_path)
661 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
662 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
664 # this is called inconditionnally at the beginning of the test sequence
665 # just in case this is a rerun, so if the vm is not running it's fine
666 def plcvm_delete(self):
667 "vserver delete the test myplc"
668 stamp_path = self.vm_timestamp_path()
669 self.run_in_host("rm -f {}".format(stamp_path))
670 self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
671 self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
672 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
676 # historically the build was being fetched by the tests
677 # now the build pushes itself as a subdir of the tests workdir
678 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
679 def plcvm_create(self):
680 "vserver creation (no install done)"
681 # push the local build/ dir to the testplc box
683 # a full path for the local calls
684 build_dir = os.path.dirname(sys.argv[0])
685 # sometimes this is empty - set to "." in such a case
688 build_dir += "/build"
690 # use a standard name - will be relative to remote buildname
692 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
693 self.test_ssh.rmdir(build_dir)
694 self.test_ssh.copy(build_dir, recursive=True)
695 # the repo url is taken from arch-rpms-url
696 # with the last step (i386) removed
697 repo_url = self.options.arch_rpms_url
698 for level in [ 'arch' ]:
699 repo_url = os.path.dirname(repo_url)
701 # invoke initvm (drop support for vs)
702 script = "lbuild-initvm.sh"
704 # pass the vbuild-nightly options to [lv]test-initvm
705 script_options += " -p {}".format(self.options.personality)
706 script_options += " -d {}".format(self.options.pldistro)
707 script_options += " -f {}".format(self.options.fcdistro)
708 script_options += " -r {}".format(repo_url)
709 vserver_name = self.vservername
711 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
712 script_options += " -n {}".format(vserver_hostname)
714 print("Cannot reverse lookup {}".format(self.vserverip))
715 print("This is considered fatal, as this might pollute the test results")
717 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
718 return self.run_in_host(create_vserver) == 0
720 ### install django through pip
721 def django_install(self):
722 # plcapi requires Django, that is no longer provided py fedora as an rpm
723 # so we use pip instead
727 return self.pip_install('Django')
730 def plc_install(self):
732 yum install myplc, noderepo
736 if self.options.personality == "linux32":
738 elif self.options.personality == "linux64":
741 raise Exception("Unsupported personality {}".format(self.options.personality))
742 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
744 # check it's possible to install just 'myplc-core' first
745 if not self.dnf_install("myplc-core"):
749 pkgs_list.append("myplc")
750 pkgs_list.append("slicerepo-{}".format(nodefamily))
751 pkgs_list.append("noderepo-{}".format(nodefamily))
752 pkgs_string=" ".join(pkgs_list)
753 return self.dnf_install(pkgs_list)
755 def install_syslinux6(self):
757 install syslinux6 from the fedora21 release
759 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
762 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
763 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
764 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
766 # this can be done several times
767 self.run_in_guest("rpm --import {key}".format(**locals()))
768 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
770 def bonding_builds(self):
772 list /etc/yum.repos.d on the myplc side
774 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
777 def bonding_nodes(self):
779 List nodes known to the myplc together with their nodefamiliy
781 print("---------------------------------------- nodes")
782 for node in self.apiserver.GetNodes(self.auth_root()):
783 print("{} -> {}".format(node['hostname'],
784 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
785 print("---------------------------------------- nodes")
789 def mod_python(self):
790 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
791 return self.dnf_install( ['mod_python'] )
794 def plc_configure(self):
796 tmpname = '{}.plc-config-tty'.format(self.name())
797 with open(tmpname,'w') as fileconf:
798 for var, value in self.plc_spec['settings'].items():
799 fileconf.write('e {}\n{}\n'.format(var, value))
800 fileconf.write('w\n')
801 fileconf.write('q\n')
802 utils.system('cat {}'.format(tmpname))
803 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
804 utils.system('rm {}'.format(tmpname))
807 # care only about f>=27
808 def start_stop_systemd(self, service, start_or_stop):
809 "utility to start/stop a systemd-defined service (sfa)"
810 return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
813 "start plc through systemclt"
814 return self.start_stop_systemd('plc', 'start')
817 "stop plc through systemctl"
818 return self.start_stop_systemd('plc', 'stop')
820 def plcvm_start(self):
821 "start the PLC vserver"
825 def plcvm_stop(self):
826 "stop the PLC vserver"
830 # stores the keys from the config for further use
831 def keys_store(self):
832 "stores test users ssh keys in keys/"
833 for key_spec in self.plc_spec['keys']:
834 TestKey(self,key_spec).store_key()
837 def keys_clean(self):
838 "removes keys cached in keys/"
839 utils.system("rm -rf ./keys")
842 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
843 # for later direct access to the nodes
844 def keys_fetch(self):
845 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
847 if not os.path.isdir(dir):
849 vservername = self.vservername
850 vm_root = self.vm_root_in_host()
852 prefix = 'debug_ssh_key'
853 for ext in ['pub', 'rsa'] :
854 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
855 dst = "keys/{vservername}-debug.{ext}".format(**locals())
856 if self.test_ssh.fetch(src, dst) != 0:
861 "create sites with PLCAPI"
862 return self.do_sites()
864 def delete_sites(self):
865 "delete sites with PLCAPI"
866 return self.do_sites(action="delete")
868 def do_sites(self, action="add"):
869 for site_spec in self.plc_spec['sites']:
870 test_site = TestSite(self,site_spec)
871 if (action != "add"):
872 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
873 test_site.delete_site()
874 # deleted with the site
875 #test_site.delete_users()
878 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
879 test_site.create_site()
880 test_site.create_users()
883 def delete_all_sites(self):
884 "Delete all sites in PLC, and related objects"
885 print('auth_root', self.auth_root())
886 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
888 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
889 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
891 site_id = site['site_id']
892 print('Deleting site_id', site_id)
893 self.apiserver.DeleteSite(self.auth_root(), site_id)
897 "create nodes with PLCAPI"
898 return self.do_nodes()
899 def delete_nodes(self):
900 "delete nodes with PLCAPI"
901 return self.do_nodes(action="delete")
903 def do_nodes(self, action="add"):
904 for site_spec in self.plc_spec['sites']:
905 test_site = TestSite(self, site_spec)
907 utils.header("Deleting nodes in site {}".format(test_site.name()))
908 for node_spec in site_spec['nodes']:
909 test_node = TestNode(self, test_site, node_spec)
910 utils.header("Deleting {}".format(test_node.name()))
911 test_node.delete_node()
913 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
914 for node_spec in site_spec['nodes']:
915 utils.pprint('Creating node {}'.format(node_spec), node_spec)
916 test_node = TestNode(self, test_site, node_spec)
917 test_node.create_node()
920 def nodegroups(self):
921 "create nodegroups with PLCAPI"
922 return self.do_nodegroups("add")
923 def delete_nodegroups(self):
924 "delete nodegroups with PLCAPI"
925 return self.do_nodegroups("delete")
929 def translate_timestamp(start, grain, timestamp):
930 if timestamp < TestPlc.YEAR:
931 return start + timestamp*grain
936 def timestamp_printable(timestamp):
937 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
940 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
941 now = int(time.time())
942 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
943 print('API answered grain=', grain)
944 start = (now//grain)*grain
946 # find out all nodes that are reservable
947 nodes = self.all_reservable_nodenames()
949 utils.header("No reservable node found - proceeding without leases")
952 # attach them to the leases as specified in plc_specs
953 # this is where the 'leases' field gets interpreted as relative of absolute
954 for lease_spec in self.plc_spec['leases']:
955 # skip the ones that come with a null slice id
956 if not lease_spec['slice']:
958 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
959 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
960 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
961 lease_spec['t_from'], lease_spec['t_until'])
962 if lease_addition['errors']:
963 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
966 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
967 .format(nodes, lease_spec['slice'],
968 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
969 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
973 def delete_leases(self):
974 "remove all leases in the myplc side"
975 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
976 utils.header("Cleaning leases {}".format(lease_ids))
977 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
980 def list_leases(self):
981 "list all leases known to the myplc"
982 leases = self.apiserver.GetLeases(self.auth_root())
983 now = int(time.time())
985 current = l['t_until'] >= now
986 if self.options.verbose or current:
987 utils.header("{} {} from {} until {}"\
988 .format(l['hostname'], l['name'],
989 TestPlc.timestamp_printable(l['t_from']),
990 TestPlc.timestamp_printable(l['t_until'])))
993 # create nodegroups if needed, and populate
994 def do_nodegroups(self, action="add"):
995 # 1st pass to scan contents
997 for site_spec in self.plc_spec['sites']:
998 test_site = TestSite(self,site_spec)
999 for node_spec in site_spec['nodes']:
1000 test_node = TestNode(self, test_site, node_spec)
1001 if 'nodegroups' in node_spec:
1002 nodegroupnames = node_spec['nodegroups']
1003 if isinstance(nodegroupnames, str):
1004 nodegroupnames = [ nodegroupnames ]
1005 for nodegroupname in nodegroupnames:
1006 if nodegroupname not in groups_dict:
1007 groups_dict[nodegroupname] = []
1008 groups_dict[nodegroupname].append(test_node.name())
1009 auth = self.auth_root()
1011 for (nodegroupname,group_nodes) in groups_dict.items():
1013 print('nodegroups:', 'dealing with nodegroup',\
1014 nodegroupname, 'on nodes', group_nodes)
1015 # first, check if the nodetagtype is here
1016 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1018 tag_type_id = tag_types[0]['tag_type_id']
1020 tag_type_id = self.apiserver.AddTagType(auth,
1021 {'tagname' : nodegroupname,
1022 'description' : 'for nodegroup {}'.format(nodegroupname),
1023 'category' : 'test'})
1024 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1026 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1028 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1029 print('created nodegroup', nodegroupname, \
1030 'from tagname', nodegroupname, 'and value', 'yes')
1031 # set node tag on all nodes, value='yes'
1032 for nodename in group_nodes:
1034 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1036 traceback.print_exc()
1037 print('node', nodename, 'seems to already have tag', nodegroupname)
1040 expect_yes = self.apiserver.GetNodeTags(auth,
1041 {'hostname' : nodename,
1042 'tagname' : nodegroupname},
1043 ['value'])[0]['value']
1044 if expect_yes != "yes":
1045 print('Mismatch node tag on node',nodename,'got',expect_yes)
1048 if not self.options.dry_run:
1049 print('Cannot find tag', nodegroupname, 'on node', nodename)
1053 print('cleaning nodegroup', nodegroupname)
1054 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1056 traceback.print_exc()
1060 # a list of TestNode objs
1061 def all_nodes(self):
1063 for site_spec in self.plc_spec['sites']:
1064 test_site = TestSite(self,site_spec)
1065 for node_spec in site_spec['nodes']:
1066 nodes.append(TestNode(self, test_site, node_spec))
1069 # return a list of tuples (nodename,qemuname)
1070 def all_node_infos(self) :
1072 for site_spec in self.plc_spec['sites']:
1073 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1074 for node_spec in site_spec['nodes'] ]
1077 def all_nodenames(self):
1078 return [ x[0] for x in self.all_node_infos() ]
1079 def all_reservable_nodenames(self):
1081 for site_spec in self.plc_spec['sites']:
1082 for node_spec in site_spec['nodes']:
1083 node_fields = node_spec['node_fields']
1084 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1085 res.append(node_fields['hostname'])
1088 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1089 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1090 silent_minutes, period_seconds = 15):
1091 if self.options.dry_run:
1095 class CompleterTaskBootState(CompleterTask):
1096 def __init__(self, test_plc, hostname):
1097 self.test_plc = test_plc
1098 self.hostname = hostname
1099 self.last_boot_state = 'undef'
1100 def actual_run(self):
1102 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1105 self.last_boot_state = node['boot_state']
1106 return self.last_boot_state == target_boot_state
1110 return "CompleterTaskBootState with node {}".format(self.hostname)
1111 def failure_epilogue(self):
1112 print("node {} in state {} - expected {}"\
1113 .format(self.hostname, self.last_boot_state, target_boot_state))
1115 timeout = timedelta(minutes=timeout_minutes)
1116 graceout = timedelta(minutes=silent_minutes)
1117 period = timedelta(seconds=period_seconds)
1118 # the nodes that haven't checked yet - start with a full list and shrink over time
1119 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1120 tasks = [ CompleterTaskBootState(self,hostname) \
1121 for (hostname,_) in self.all_node_infos() ]
1122 message = 'check_boot_state={}'.format(target_boot_state)
1123 return Completer(tasks, message=message).run(timeout, graceout, period)
1125 def nodes_booted(self):
1126 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1128 def probe_kvm_iptables(self):
1129 (_,kvmbox) = self.all_node_infos()[0]
1130 TestSsh(kvmbox).run("iptables-save")
1134 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1135 class CompleterTaskPingNode(CompleterTask):
1136 def __init__(self, hostname):
1137 self.hostname = hostname
1138 def run(self, silent):
1139 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1140 return utils.system(command, silent=silent) == 0
1141 def failure_epilogue(self):
1142 print("Cannot ping node with name {}".format(self.hostname))
1143 timeout = timedelta(seconds = timeout_seconds)
1145 period = timedelta(seconds = period_seconds)
1146 node_infos = self.all_node_infos()
1147 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1148 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1150 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1151 def ping_node(self):
1153 return self.check_nodes_ping()
1155 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1157 timeout = timedelta(minutes=timeout_minutes)
1158 graceout = timedelta(minutes=silent_minutes)
1159 period = timedelta(seconds=period_seconds)
1160 vservername = self.vservername
1163 completer_message = 'ssh_node_debug'
1164 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1167 completer_message = 'ssh_node_boot'
1168 local_key = "keys/key_admin.rsa"
1169 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1170 node_infos = self.all_node_infos()
1171 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1172 boot_state=message, dry_run=self.options.dry_run) \
1173 for (nodename, qemuname) in node_infos ]
1174 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1176 def ssh_node_debug(self):
1177 "Tries to ssh into nodes in debug mode with the debug ssh key"
1178 return self.check_nodes_ssh(debug = True,
1179 timeout_minutes = self.ssh_node_debug_timeout,
1180 silent_minutes = self.ssh_node_debug_silent)
1182 def ssh_node_boot(self):
1183 "Tries to ssh into nodes in production mode with the root ssh key"
1184 return self.check_nodes_ssh(debug = False,
1185 timeout_minutes = self.ssh_node_boot_timeout,
1186 silent_minutes = self.ssh_node_boot_silent)
1188 def node_bmlogs(self):
1189 "Checks that there's a non-empty dir. /var/log/bm/raw"
1190 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1193 def qemu_local_init(self): pass
1195 def bootcd(self): pass
1197 def qemu_local_config(self): pass
1199 def qemu_export(self): pass
1201 def qemu_cleanlog(self): pass
1203 def nodestate_reinstall(self): pass
1205 def nodestate_upgrade(self): pass
1207 def nodestate_safeboot(self): pass
1209 def nodestate_boot(self): pass
1211 def nodestate_show(self): pass
1213 def nodedistro_f14(self): pass
1215 def nodedistro_f18(self): pass
1217 def nodedistro_f20(self): pass
1219 def nodedistro_f21(self): pass
1221 def nodedistro_f22(self): pass
1223 def nodedistro_show(self): pass
1225 ### check hooks : invoke scripts from hooks/{node,slice}
1226 def check_hooks_node(self):
1227 return self.locate_first_node().check_hooks()
1228 def check_hooks_sliver(self) :
1229 return self.locate_first_sliver().check_hooks()
1231 def check_hooks(self):
1232 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1233 return self.check_hooks_node() and self.check_hooks_sliver()
1236 def do_check_initscripts(self):
1237 class CompleterTaskInitscript(CompleterTask):
1238 def __init__(self, test_sliver, stamp):
1239 self.test_sliver = test_sliver
1241 def actual_run(self):
1242 return self.test_sliver.check_initscript_stamp(self.stamp)
1244 return "initscript checker for {}".format(self.test_sliver.name())
1245 def failure_epilogue(self):
1246 print("initscript stamp {} not found in sliver {}"\
1247 .format(self.stamp, self.test_sliver.name()))
1250 for slice_spec in self.plc_spec['slices']:
1251 if 'initscriptstamp' not in slice_spec:
1253 stamp = slice_spec['initscriptstamp']
1254 slicename = slice_spec['slice_fields']['name']
1255 for nodename in slice_spec['nodenames']:
1256 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1257 site,node = self.locate_node(nodename)
1258 # xxx - passing the wrong site - probably harmless
1259 test_site = TestSite(self, site)
1260 test_slice = TestSlice(self, test_site, slice_spec)
1261 test_node = TestNode(self, test_site, node)
1262 test_sliver = TestSliver(self, test_node, test_slice)
1263 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1264 return Completer(tasks, message='check_initscripts').\
1265 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1267 def check_initscripts(self):
1268 "check that the initscripts have triggered"
1269 return self.do_check_initscripts()
1271 def initscripts(self):
1272 "create initscripts with PLCAPI"
1273 for initscript in self.plc_spec['initscripts']:
1274 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1275 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1278 def delete_initscripts(self):
1279 "delete initscripts with PLCAPI"
1280 for initscript in self.plc_spec['initscripts']:
1281 initscript_name = initscript['initscript_fields']['name']
1282 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1284 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1285 print(initscript_name, 'deleted')
1287 print('deletion went wrong - probably did not exist')
1292 "create slices with PLCAPI"
1293 return self.do_slices(action="add")
1295 def delete_slices(self):
1296 "delete slices with PLCAPI"
1297 return self.do_slices(action="delete")
1299 def fill_slices(self):
1300 "add nodes in slices with PLCAPI"
1301 return self.do_slices(action="fill")
1303 def empty_slices(self):
1304 "remove nodes from slices with PLCAPI"
1305 return self.do_slices(action="empty")
1307 def do_slices(self, action="add"):
1308 for slice in self.plc_spec['slices']:
1309 site_spec = self.locate_site(slice['sitename'])
1310 test_site = TestSite(self,site_spec)
1311 test_slice=TestSlice(self,test_site,slice)
1312 if action == "delete":
1313 test_slice.delete_slice()
1314 elif action == "fill":
1315 test_slice.add_nodes()
1316 elif action == "empty":
1317 test_slice.delete_nodes()
1319 test_slice.create_slice()
1322 @slice_mapper__tasks(20, 10, 15)
1323 def ssh_slice(self): pass
1324 @slice_mapper__tasks(20, 19, 15)
1325 def ssh_slice_off(self): pass
1326 @slice_mapper__tasks(1, 1, 15)
1327 def slice_fs_present(self): pass
1328 @slice_mapper__tasks(1, 1, 15)
1329 def slice_fs_deleted(self): pass
1331 # use another name so we can exclude/ignore it from the tests on the nightly command line
1332 def ssh_slice_again(self): return self.ssh_slice()
1333 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1334 # but for some reason the ignore-wrapping thing would not
1337 def ssh_slice_basics(self): pass
1339 def check_vsys_defaults(self): pass
1342 def keys_clear_known_hosts(self): pass
1344 def plcapi_urls(self):
1346 attempts to reach the PLCAPI with various forms for the URL
1348 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1350 def speed_up_slices(self):
1351 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1352 return self._speed_up_slices (30, 10)
1353 def super_speed_up_slices(self):
1354 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1355 return self._speed_up_slices(5, 1)
1357 def _speed_up_slices(self, p, r):
1358 # create the template on the server-side
1359 template = "{}.nodemanager".format(self.name())
1360 with open(template,"w") as template_file:
1361 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1362 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1363 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1364 self.test_ssh.copy_abs(template, remote)
1366 if not self.apiserver.GetConfFiles(self.auth_root(),
1367 {'dest' : '/etc/sysconfig/nodemanager'}):
1368 self.apiserver.AddConfFile(self.auth_root(),
1369 {'dest' : '/etc/sysconfig/nodemanager',
1370 'source' : 'PlanetLabConf/nodemanager',
1371 'postinstall_cmd' : 'service nm restart',})
1374 def debug_nodemanager(self):
1375 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1376 template = "{}.nodemanager".format(self.name())
1377 with open(template,"w") as template_file:
1378 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1379 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1380 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1381 self.test_ssh.copy_abs(template, remote)
1385 def qemu_start(self) : pass
1388 def qemu_timestamp(self) : pass
1391 def qemu_nodefamily(self): pass
1393 # when a spec refers to a node possibly on another plc
1394 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1395 for plc in [ self ] + other_plcs:
1397 return plc.locate_sliver_obj(nodename, slicename)
1400 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1402 # implement this one as a cross step so that we can take advantage of different nodes
1403 # in multi-plcs mode
1404 def cross_check_tcp(self, other_plcs):
1405 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1406 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1407 utils.header("check_tcp: no/empty config found")
1409 specs = self.plc_spec['tcp_specs']
1412 # first wait for the network to be up and ready from the slices
1413 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1414 def __init__(self, test_sliver):
1415 self.test_sliver = test_sliver
1416 def actual_run(self):
1417 return self.test_sliver.check_tcp_ready(port = 9999)
1419 return "network ready checker for {}".format(self.test_sliver.name())
1420 def failure_epilogue(self):
1421 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1425 managed_sliver_names = set()
1427 # locate the TestSliver instances involved, and cache them in the spec instance
1428 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1429 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1430 message = "Will check TCP between s={} and c={}"\
1431 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1432 if 'client_connect' in spec:
1433 message += " (using {})".format(spec['client_connect'])
1434 utils.header(message)
1435 # we need to check network presence in both slivers, but also
1436 # avoid to insert a sliver several times
1437 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1438 if sliver.name() not in managed_sliver_names:
1439 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1440 # add this sliver's name in the set
1441 managed_sliver_names .update( {sliver.name()} )
1443 # wait for the netork to be OK in all server sides
1444 if not Completer(tasks, message='check for network readiness in slivers').\
1445 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1448 # run server and client
1452 # the issue here is that we have the server run in background
1453 # and so we have no clue if it took off properly or not
1454 # looks like in some cases it does not
1455 address = spec['s_sliver'].test_node.name()
1456 if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1460 # idem for the client side
1461 # use nodename from located sliver, unless 'client_connect' is set
1462 if 'client_connect' in spec:
1463 destination = spec['client_connect']
1465 destination = spec['s_sliver'].test_node.name()
1466 if not spec['c_sliver'].run_tcp_client(destination, port):
1470 # painfully enough, we need to allow for some time as netflow might show up last
1471 def check_system_slice(self):
1472 "all nodes: check that a system slice is alive"
1473 # netflow currently not working in the lxc distro
1474 # drl not built at all in the wtx distro
1475 # if we find either of them we're happy
1476 return self.check_netflow() or self.check_drl()
1479 def check_netflow(self): return self._check_system_slice('netflow')
1480 def check_drl(self): return self._check_system_slice('drl')
1482 # we have the slices up already here, so it should not take too long
1483 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1484 class CompleterTaskSystemSlice(CompleterTask):
1485 def __init__(self, test_node, dry_run):
1486 self.test_node = test_node
1487 self.dry_run = dry_run
1488 def actual_run(self):
1489 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1491 return "System slice {} @ {}".format(slicename, self.test_node.name())
1492 def failure_epilogue(self):
1493 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1494 timeout = timedelta(minutes=timeout_minutes)
1495 silent = timedelta(0)
1496 period = timedelta(seconds=period_seconds)
1497 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1498 for test_node in self.all_nodes() ]
1499 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1501 def plcsh_stress_test(self):
1502 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1503 # install the stress-test in the plc image
1504 location = "/usr/share/plc_api/plcsh_stress_test.py"
1505 remote = "{}/{}".format(self.vm_root_in_host(), location)
1506 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1508 command += " -- --check"
1509 if self.options.size == 1:
1510 command += " --tiny"
1511 return self.run_in_guest(command) == 0
1513 # populate runs the same utility without slightly different options
1514 # in particular runs with --preserve (dont cleanup) and without --check
1515 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1517 def sfa_install_all(self):
1518 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1519 return (self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client") and
1520 self.run_in_guest("systemctl enable sfa-registry")==0 and
1521 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1523 def sfa_install_core(self):
1525 return self.dnf_install("sfa")
1527 def sfa_install_plc(self):
1528 "yum install sfa-plc"
1529 return self.dnf_install("sfa-plc")
1531 def sfa_install_sfatables(self):
1532 "yum install sfa-sfatables"
1533 return self.dnf_install("sfa-sfatables")
1535 # for some very odd reason, this sometimes fails with the following symptom
1536 # # yum install sfa-client
1537 # Setting up Install Process
1539 # Downloading Packages:
1540 # Running rpm_check_debug
1541 # Running Transaction Test
1542 # Transaction Test Succeeded
1543 # Running Transaction
1544 # Transaction couldn't start:
1545 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1546 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1547 # even though in the same context I have
1548 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1549 # Filesystem Size Used Avail Use% Mounted on
1550 # /dev/hdv1 806G 264G 501G 35% /
1551 # none 16M 36K 16M 1% /tmp
1553 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1554 def sfa_install_client(self):
1555 "yum install sfa-client"
1556 first_try = self.dnf_install("sfa-client")
1559 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1560 code, cached_rpm_path = \
1561 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1562 utils.header("rpm_path=<<{}>>".format(rpm_path))
1564 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1565 return self.dnf_check_installed("sfa-client")
1567 def sfa_dbclean(self):
1568 "thoroughly wipes off the SFA database"
1569 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1570 self.run_in_guest("sfa-nuke.py") == 0 or \
1571 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1572 self.run_in_guest("sfaadmin registry nuke") == 0
1574 def sfa_fsclean(self):
1575 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1576 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1579 def sfa_plcclean(self):
1580 "cleans the PLC entries that were created as a side effect of running the script"
1582 sfa_spec = self.plc_spec['sfa']
1584 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1585 login_base = auth_sfa_spec['login_base']
1587 self.apiserver.DeleteSite(self.auth_root(),login_base)
1589 print("Site {} already absent from PLC db".format(login_base))
1591 for spec_name in ['pi_spec','user_spec']:
1592 user_spec = auth_sfa_spec[spec_name]
1593 username = user_spec['email']
1595 self.apiserver.DeletePerson(self.auth_root(),username)
1597 # this in fact is expected as sites delete their members
1598 #print "User {} already absent from PLC db".format(username)
1601 print("REMEMBER TO RUN sfa_import AGAIN")
1604 def sfa_uninstall(self):
1605 "uses rpm to uninstall sfa - ignore result"
1606 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1607 self.run_in_guest("rm -rf /var/lib/sfa")
1608 self.run_in_guest("rm -rf /etc/sfa")
1609 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1611 self.run_in_guest("rpm -e --noscripts sfa-plc")
1614 ### run unit tests for SFA
1615 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1616 # Running Transaction
1617 # Transaction couldn't start:
1618 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1619 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1620 # no matter how many Gbs are available on the testplc
1621 # could not figure out what's wrong, so...
1622 # if the yum install phase fails, consider the test is successful
1623 # other combinations will eventually run it hopefully
1624 def sfa_utest(self):
1625 "dnf install sfa-tests and run SFA unittests"
1626 self.run_in_guest("dnf -y install sfa-tests")
1627 # failed to install - forget it
1628 if self.run_in_guest("rpm -q sfa-tests") != 0:
1629 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1631 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1635 dirname = "conf.{}".format(self.plc_spec['name'])
1636 if not os.path.isdir(dirname):
1637 utils.system("mkdir -p {}".format(dirname))
1638 if not os.path.isdir(dirname):
1639 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1642 def conffile(self, filename):
1643 return "{}/{}".format(self.confdir(), filename)
1644 def confsubdir(self, dirname, clean, dry_run=False):
1645 subdirname = "{}/{}".format(self.confdir(), dirname)
1647 utils.system("rm -rf {}".format(subdirname))
1648 if not os.path.isdir(subdirname):
1649 utils.system("mkdir -p {}".format(subdirname))
1650 if not dry_run and not os.path.isdir(subdirname):
1651 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1654 def conffile_clean(self, filename):
1655 filename=self.conffile(filename)
1656 return utils.system("rm -rf {}".format(filename))==0
1659 def sfa_configure(self):
1660 "run sfa-config-tty"
1661 tmpname = self.conffile("sfa-config-tty")
1662 with open(tmpname,'w') as fileconf:
1663 for var, value in self.plc_spec['sfa']['settings'].items():
1664 fileconf.write('e {}\n{}\n'.format(var, value))
1665 fileconf.write('w\n')
1666 fileconf.write('R\n')
1667 fileconf.write('q\n')
1668 utils.system('cat {}'.format(tmpname))
1669 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1672 def aggregate_xml_line(self):
1673 port = self.plc_spec['sfa']['neighbours-port']
1674 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1675 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1677 def registry_xml_line(self):
1678 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1679 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1682 # a cross step that takes all other plcs in argument
1683 def cross_sfa_configure(self, other_plcs):
1684 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1685 # of course with a single plc, other_plcs is an empty list
1688 agg_fname = self.conffile("agg.xml")
1689 with open(agg_fname,"w") as out:
1690 out.write("<aggregates>{}</aggregates>\n"\
1691 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1692 utils.header("(Over)wrote {}".format(agg_fname))
1693 reg_fname=self.conffile("reg.xml")
1694 with open(reg_fname,"w") as out:
1695 out.write("<registries>{}</registries>\n"\
1696 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1697 utils.header("(Over)wrote {}".format(reg_fname))
1698 return self.test_ssh.copy_abs(agg_fname,
1699 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1700 and self.test_ssh.copy_abs(reg_fname,
1701 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1703 def sfa_import(self):
1704 "use sfaadmin to import from plc"
1705 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1706 return self.run_in_guest('sfaadmin reg import_registry') == 0
1708 def sfa_start(self):
1709 "start SFA through systemctl"
1710 return (self.start_stop_systemd('sfa-registry', 'start') and
1711 self.start_stop_systemd('sfa-aggregate', 'start'))
1714 def sfi_configure(self):
1715 "Create /root/sfi on the plc side for sfi client configuration"
1716 if self.options.dry_run:
1717 utils.header("DRY RUN - skipping step")
1719 sfa_spec = self.plc_spec['sfa']
1720 # cannot use auth_sfa_mapper to pass dir_name
1721 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1722 test_slice = TestAuthSfa(self, slice_spec)
1723 dir_basename = os.path.basename(test_slice.sfi_path())
1724 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1725 clean=True, dry_run=self.options.dry_run)
1726 test_slice.sfi_configure(dir_name)
1727 # push into the remote /root/sfi area
1728 location = test_slice.sfi_path()
1729 remote = "{}/{}".format(self.vm_root_in_host(), location)
1730 self.test_ssh.mkdir(remote, abs=True)
1731 # need to strip last level or remote otherwise we get an extra dir level
1732 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1736 def sfi_clean(self):
1737 "clean up /root/sfi on the plc side"
1738 self.run_in_guest("rm -rf /root/sfi")
1741 def sfa_rspec_empty(self):
1742 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1743 filename = "empty-rspec.xml"
1745 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1746 test_slice = TestAuthSfa(self, slice_spec)
1747 in_vm = test_slice.sfi_path()
1748 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1749 if self.test_ssh.copy_abs(filename, remote) !=0:
1754 def sfa_register_site(self): pass
1756 def sfa_register_pi(self): pass
1758 def sfa_register_user(self): pass
1760 def sfa_update_user(self): pass
1762 def sfa_register_slice(self): pass
1764 def sfa_renew_slice(self): pass
1766 def sfa_get_expires(self): pass
1768 def sfa_discover(self): pass
1770 def sfa_rspec(self): pass
1772 def sfa_allocate(self): pass
1774 def sfa_allocate_empty(self): pass
1776 def sfa_provision(self): pass
1778 def sfa_provision_empty(self): pass
1780 def sfa_describe(self): pass
1782 def sfa_check_slice_plc(self): pass
1784 def sfa_check_slice_plc_empty(self): pass
1786 def sfa_update_slice(self): pass
1788 def sfa_remove_user_from_slice(self): pass
1790 def sfa_insert_user_in_slice(self): pass
1792 def sfi_list(self): pass
1794 def sfi_show_site(self): pass
1796 def sfi_show_slice(self): pass
1798 def sfi_show_slice_researchers(self): pass
1800 def ssh_slice_sfa(self): pass
1802 def sfa_delete_user(self): pass
1804 def sfa_delete_slice(self): pass
1807 "stop sfa through systemclt"
1808 return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1809 self.start_stop_systemd('sfa-registry', 'stop'))
1812 "creates random entries in the PLCAPI"
1813 # install the stress-test in the plc image
1814 location = "/usr/share/plc_api/plcsh_stress_test.py"
1815 remote = "{}/{}".format(self.vm_root_in_host(), location)
1816 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1818 command += " -- --preserve --short-names"
1819 local = (self.run_in_guest(command) == 0);
1820 # second run with --foreign
1821 command += ' --foreign'
1822 remote = (self.run_in_guest(command) == 0);
1823 return local and remote
1826 ####################
1828 def bonding_init_partial(self): pass
1831 def bonding_add_yum(self): pass
1834 def bonding_install_rpms(self): pass
1836 ####################
1838 def gather_logs(self):
1839 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1840 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1841 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1842 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1843 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1844 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1845 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1847 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1848 self.gather_var_logs()
1850 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1851 self.gather_pgsql_logs()
1853 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1854 self.gather_root_sfi()
1856 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1857 for site_spec in self.plc_spec['sites']:
1858 test_site = TestSite(self,site_spec)
1859 for node_spec in site_spec['nodes']:
1860 test_node = TestNode(self, test_site, node_spec)
1861 test_node.gather_qemu_logs()
1863 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1864 self.gather_nodes_var_logs()
1866 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1867 self.gather_slivers_var_logs()
1870 def gather_slivers_var_logs(self):
1871 for test_sliver in self.all_sliver_objs():
1872 remote = test_sliver.tar_var_logs()
1873 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1874 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1875 utils.system(command)
1878 def gather_var_logs(self):
1879 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1880 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1881 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1882 utils.system(command)
1883 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1884 utils.system(command)
1886 def gather_pgsql_logs(self):
1887 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1888 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1889 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1890 utils.system(command)
1892 def gather_root_sfi(self):
1893 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1894 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1895 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1896 utils.system(command)
1898 def gather_nodes_var_logs(self):
1899 for site_spec in self.plc_spec['sites']:
1900 test_site = TestSite(self, site_spec)
1901 for node_spec in site_spec['nodes']:
1902 test_node = TestNode(self, test_site, node_spec)
1903 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1904 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1905 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1906 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1907 utils.system(command)
1910 # returns the filename to use for sql dump/restore, using options.dbname if set
1911 def dbfile(self, database):
1912 # uses options.dbname if it is found
1914 name = self.options.dbname
1915 if not isinstance(name, str):
1921 return "/root/{}-{}.sql".format(database, name)
1923 def plc_db_dump(self):
1924 'dump the planetlab5 DB in /root in the PLC - filename has time'
1925 dump=self.dbfile("planetab5")
1926 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1927 utils.header('Dumped planetlab5 database in {}'.format(dump))
1930 def plc_db_restore(self):
1931 'restore the planetlab5 DB - looks broken, but run -n might help'
1932 dump = self.dbfile("planetab5")
1933 self.run_in_guest('systemctl stop httpd')
1934 # xxx - need another wrapper
1935 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1936 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1937 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1938 ##starting httpd service
1939 self.run_in_guest('systemctl start httpd')
1941 utils.header('Database restored from ' + dump)
1944 def create_ignore_steps():
1945 for step in TestPlc.default_steps + TestPlc.other_steps:
1946 # default step can have a plc qualifier
1948 step, qualifier = step.split('@')
1949 # or be defined as forced or ignored by default
1950 for keyword in ['_ignore','_force']:
1951 if step.endswith(keyword):
1952 step=step.replace(keyword,'')
1953 if step == SEP or step == SEPSFA :
1955 method = getattr(TestPlc,step)
1956 name = step + '_ignore'
1957 wrapped = ignore_result(method)
1958 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1959 setattr(TestPlc, name, wrapped)
1962 # def ssh_slice_again_ignore (self): pass
1964 # def check_initscripts_ignore (self): pass
1966 def standby_1_through_20(self):
1967 """convenience function to wait for a specified number of minutes"""
1970 def standby_1(): pass
1972 def standby_2(): pass
1974 def standby_3(): pass
1976 def standby_4(): pass
1978 def standby_5(): pass
1980 def standby_6(): pass
1982 def standby_7(): pass
1984 def standby_8(): pass
1986 def standby_9(): pass
1988 def standby_10(): pass
1990 def standby_11(): pass
1992 def standby_12(): pass
1994 def standby_13(): pass
1996 def standby_14(): pass
1998 def standby_15(): pass
2000 def standby_16(): pass
2002 def standby_17(): pass
2004 def standby_18(): pass
2006 def standby_19(): pass
2008 def standby_20(): pass
2010 # convenience for debugging the test logic
2011 def yes(self): return True
2012 def no(self): return False
2013 def fail(self): return False