1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls','speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
166 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
167 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
168 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
169 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
170 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
171 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
172 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
173 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
174 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
175 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
176 # but as the stress test might take a while, we sometimes missed the debug mode..
177 'probe_kvm_iptables',
178 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
179 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
180 'ssh_slice_sfa@1', SEPSFA,
181 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
182 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
183 'cross_check_tcp@1', 'check_system_slice', SEP,
184 # for inspecting the slice while it runs the first time
186 # check slices are turned off properly
187 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
188 # check they are properly re-created with the same name
189 'fill_slices', 'ssh_slice_again', SEP,
190 'gather_logs_force', SEP,
193 'export', 'show_boxes', 'super_speed_up_slices', SEP,
194 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
195 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
196 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
197 'delete_leases', 'list_leases', SEP,
199 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
200 'nodeflavour_show','nodedistro_f14','nodedistro_f18', SEP,
201 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
202 'nodeplain_on','nodeplain_off','nodeplain_show', SEP,
203 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
204 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
205 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
206 'sfa_get_expires', SEPSFA,
207 'plc_db_dump' , 'plc_db_restore', SEP,
208 'check_netflow','check_drl', SEP,
209 'debug_nodemanager', 'slice_fs_present', SEP,
210 'standby_1_through_20','yes','no',SEP,
211 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
213 default_bonding_steps = [
214 'bonding_init_partial',
216 'bonding_install_rpms', SEP,
220 def printable_steps(list):
221 single_line = " ".join(list) + " "
222 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
224 def valid_step(step):
225 return step != SEP and step != SEPSFA
227 # turn off the sfa-related steps when build has skipped SFA
228 # this was originally for centos5 but is still valid
229 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
231 def _has_sfa_cached(rpms_url):
232 if os.path.isfile(has_sfa_cache_filename):
233 with open(has_sfa_cache_filename) as cache:
234 cached = cache.read() == "yes"
235 utils.header("build provides SFA (cached):{}".format(cached))
237 # warning, we're now building 'sface' so let's be a bit more picky
238 # full builds are expected to return with 0 here
239 utils.header("Checking if build provides SFA package...")
240 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
241 encoded = 'yes' if retcod else 'no'
242 with open(has_sfa_cache_filename,'w') as cache:
247 def check_whether_build_has_sfa(rpms_url):
248 has_sfa = TestPlc._has_sfa_cached(rpms_url)
250 utils.header("build does provide SFA")
252 # move all steps containing 'sfa' from default_steps to other_steps
253 utils.header("SFA package not found - removing steps with sfa or sfi")
254 sfa_steps = [ step for step in TestPlc.default_steps
255 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
256 TestPlc.other_steps += sfa_steps
257 for step in sfa_steps:
258 TestPlc.default_steps.remove(step)
260 def __init__(self, plc_spec, options):
261 self.plc_spec = plc_spec
262 self.options = options
263 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
264 self.vserverip = plc_spec['vserverip']
265 self.vservername = plc_spec['vservername']
266 self.vplchostname = self.vservername.split('-')[-1]
267 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
268 self.apiserver = TestApiserver(self.url, options.dry_run)
269 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
270 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
272 def has_addresses_api(self):
273 return self.apiserver.has_method('AddIpAddress')
276 name = self.plc_spec['name']
277 return "{}.{}".format(name,self.vservername)
280 return self.plc_spec['host_box']
283 return self.test_ssh.is_local()
285 # define the API methods on this object through xmlrpc
286 # would help, but not strictly necessary
290 def actual_command_in_guest(self,command, backslash=False):
291 raw1 = self.host_to_guest(command)
292 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
295 def start_guest(self):
296 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
297 dry_run=self.options.dry_run))
299 def stop_guest(self):
300 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
301 dry_run=self.options.dry_run))
303 def run_in_guest(self, command, backslash=False):
304 raw = self.actual_command_in_guest(command, backslash)
305 return utils.system(raw)
307 def run_in_host(self,command):
308 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
310 # backslashing turned out so awful at some point that I've turned off auto-backslashing
311 # see e.g. plc_start esp. the version for f14
312 #command gets run in the plc's vm
313 def host_to_guest(self, command):
314 ssh_leg = TestSsh(self.vplchostname)
315 return ssh_leg.actual_command(command, keep_stdin=True)
317 # this /vservers thing is legacy...
318 def vm_root_in_host(self):
319 return "/vservers/{}/".format(self.vservername)
321 def vm_timestamp_path(self):
322 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
324 #start/stop the vserver
325 def start_guest_in_host(self):
326 return "virsh -c lxc:/// start {}".format(self.vservername)
328 def stop_guest_in_host(self):
329 return "virsh -c lxc:/// destroy {}".format(self.vservername)
332 def run_in_guest_piped(self,local,remote):
333 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
336 def yum_check_installed(self, rpms):
337 if isinstance(rpms, list):
339 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
341 # does a yum install in the vs, ignore yum retcod, check with rpm
342 def yum_install(self, rpms):
343 if isinstance(rpms, list):
345 self.run_in_guest("yum -y install {}".format(rpms))
346 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
347 self.run_in_guest("yum-complete-transaction -y")
348 return self.yum_check_installed(rpms)
351 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
352 'AuthMethod' : 'password',
353 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
354 'Role' : self.plc_spec['role'],
357 def locate_site(self,sitename):
358 for site in self.plc_spec['sites']:
359 if site['site_fields']['name'] == sitename:
361 if site['site_fields']['login_base'] == sitename:
363 raise Exception("Cannot locate site {}".format(sitename))
365 def locate_node(self, nodename):
366 for site in self.plc_spec['sites']:
367 for node in site['nodes']:
368 if node['name'] == nodename:
370 raise Exception("Cannot locate node {}".format(nodename))
372 def locate_hostname(self, hostname):
373 for site in self.plc_spec['sites']:
374 for node in site['nodes']:
375 if node['node_fields']['hostname'] == hostname:
377 raise Exception("Cannot locate hostname {}".format(hostname))
379 def locate_key(self, key_name):
380 for key in self.plc_spec['keys']:
381 if key['key_name'] == key_name:
383 raise Exception("Cannot locate key {}".format(key_name))
385 def locate_private_key_from_key_names(self, key_names):
386 # locate the first avail. key
388 for key_name in key_names:
389 key_spec = self.locate_key(key_name)
390 test_key = TestKey(self,key_spec)
391 publickey = test_key.publicpath()
392 privatekey = test_key.privatepath()
393 if os.path.isfile(publickey) and os.path.isfile(privatekey):
400 def locate_slice(self, slicename):
401 for slice in self.plc_spec['slices']:
402 if slice['slice_fields']['name'] == slicename:
404 raise Exception("Cannot locate slice {}".format(slicename))
406 def all_sliver_objs(self):
408 for slice_spec in self.plc_spec['slices']:
409 slicename = slice_spec['slice_fields']['name']
410 for nodename in slice_spec['nodenames']:
411 result.append(self.locate_sliver_obj(nodename, slicename))
414 def locate_sliver_obj(self, nodename, slicename):
415 site,node = self.locate_node(nodename)
416 slice = self.locate_slice(slicename)
418 test_site = TestSite(self, site)
419 test_node = TestNode(self, test_site, node)
420 # xxx the slice site is assumed to be the node site - mhh - probably harmless
421 test_slice = TestSlice(self, test_site, slice)
422 return TestSliver(self, test_node, test_slice)
424 def locate_first_node(self):
425 nodename = self.plc_spec['slices'][0]['nodenames'][0]
426 site,node = self.locate_node(nodename)
427 test_site = TestSite(self, site)
428 test_node = TestNode(self, test_site, node)
431 def locate_first_sliver(self):
432 slice_spec = self.plc_spec['slices'][0]
433 slicename = slice_spec['slice_fields']['name']
434 nodename = slice_spec['nodenames'][0]
435 return self.locate_sliver_obj(nodename,slicename)
437 # all different hostboxes used in this plc
438 def get_BoxNodes(self):
439 # maps on sites and nodes, return [ (host_box,test_node) ]
441 for site_spec in self.plc_spec['sites']:
442 test_site = TestSite(self,site_spec)
443 for node_spec in site_spec['nodes']:
444 test_node = TestNode(self, test_site, node_spec)
445 if not test_node.is_real():
446 tuples.append( (test_node.host_box(),test_node) )
447 # transform into a dict { 'host_box' -> [ test_node .. ] }
449 for (box,node) in tuples:
450 if box not in result:
453 result[box].append(node)
456 # a step for checking this stuff
457 def show_boxes(self):
458 'print summary of nodes location'
459 for box,nodes in self.get_BoxNodes().items():
460 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
463 # make this a valid step
464 def qemu_kill_all(self):
465 'kill all qemu instances on the qemu boxes involved by this setup'
466 # this is the brute force version, kill all qemus on that host box
467 for (box,nodes) in self.get_BoxNodes().items():
468 # pass the first nodename, as we don't push template-qemu on testboxes
469 nodedir = nodes[0].nodedir()
470 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
473 # make this a valid step
474 def qemu_list_all(self):
475 'list all qemu instances on the qemu boxes involved by this setup'
476 for box,nodes in self.get_BoxNodes().items():
477 # this is the brute force version, kill all qemus on that host box
478 TestBoxQemu(box, self.options.buildname).qemu_list_all()
481 # kill only the qemus related to this test
482 def qemu_list_mine(self):
483 'list qemu instances for our nodes'
484 for (box,nodes) in self.get_BoxNodes().items():
485 # the fine-grain version
490 # kill only the qemus related to this test
491 def qemu_clean_mine(self):
492 'cleanup (rm -rf) qemu instances for our nodes'
493 for box,nodes in self.get_BoxNodes().items():
494 # the fine-grain version
499 # kill only the right qemus
500 def qemu_kill_mine(self):
501 'kill the qemu instances for our nodes'
502 for box,nodes in self.get_BoxNodes().items():
503 # the fine-grain version
508 #################### display config
510 "show test configuration after localization"
515 # uggly hack to make sure 'run export' only reports about the 1st plc
516 # to avoid confusion - also we use 'inri_slice1' in various aliases..
519 "print cut'n paste-able stuff to export env variables to your shell"
520 # guess local domain from hostname
521 if TestPlc.exported_id > 1:
522 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
524 TestPlc.exported_id += 1
525 domain = socket.gethostname().split('.',1)[1]
526 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
527 print("export BUILD={}".format(self.options.buildname))
528 print("export PLCHOSTLXC={}".format(fqdn))
529 print("export GUESTNAME={}".format(self.vservername))
530 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
531 # find hostname of first node
532 hostname, qemubox = self.all_node_infos()[0]
533 print("export KVMHOST={}.{}".format(qemubox, domain))
534 print("export NODE={}".format(hostname))
538 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
539 def show_pass(self, passno):
540 for (key,val) in self.plc_spec.items():
541 if not self.options.verbose and key not in TestPlc.always_display_keys:
546 self.display_site_spec(site)
547 for node in site['nodes']:
548 self.display_node_spec(node)
549 elif key == 'initscripts':
550 for initscript in val:
551 self.display_initscript_spec(initscript)
552 elif key == 'slices':
554 self.display_slice_spec(slice)
557 self.display_key_spec(key)
559 if key not in ['sites', 'initscripts', 'slices', 'keys']:
560 print('+ ', key, ':', val)
562 def display_site_spec(self, site):
563 print('+ ======== site', site['site_fields']['name'])
564 for k,v in site.items():
565 if not self.options.verbose and k not in TestPlc.always_display_keys:
569 print('+ ','nodes : ', end=' ')
571 print(node['node_fields']['hostname'],'', end=' ')
575 print('+ users : ', end=' ')
577 print(user['name'],'', end=' ')
579 elif k == 'site_fields':
580 print('+ login_base', ':', v['login_base'])
581 elif k == 'address_fields':
587 def display_initscript_spec(self, initscript):
588 print('+ ======== initscript', initscript['initscript_fields']['name'])
590 def display_key_spec(self, key):
591 print('+ ======== key', key['key_name'])
593 def display_slice_spec(self, slice):
594 print('+ ======== slice', slice['slice_fields']['name'])
595 for k,v in slice.items():
598 print('+ nodes : ', end=' ')
600 print(nodename,'', end=' ')
602 elif k == 'usernames':
604 print('+ users : ', end=' ')
606 print(username,'', end=' ')
608 elif k == 'slice_fields':
609 print('+ fields',':', end=' ')
610 print('max_nodes=',v['max_nodes'], end=' ')
615 def display_node_spec(self, node):
616 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
617 print("hostname=", node['node_fields']['hostname'], end=' ')
618 print("ip=", node['interface_fields']['ip'])
619 if self.options.verbose:
620 utils.pprint("node details", node, depth=3)
622 # another entry point for just showing the boxes involved
623 def display_mapping(self):
624 TestPlc.display_mapping_plc(self.plc_spec)
628 def display_mapping_plc(plc_spec):
629 print('+ MyPLC',plc_spec['name'])
630 # WARNING this would not be right for lxc-based PLC's - should be harmless though
631 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
632 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
633 for site_spec in plc_spec['sites']:
634 for node_spec in site_spec['nodes']:
635 TestPlc.display_mapping_node(node_spec)
638 def display_mapping_node(node_spec):
639 print('+ NODE {}'.format(node_spec['name']))
640 print('+\tqemu box {}'.format(node_spec['host_box']))
641 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
643 # write a timestamp in /vservers/<>.timestamp
644 # cannot be inside the vserver, that causes vserver .. build to cough
645 def plcvm_timestamp(self):
646 "Create a timestamp to remember creation date for this plc"
647 now = int(time.time())
648 # TODO-lxc check this one
649 # a first approx. is to store the timestamp close to the VM root like vs does
650 stamp_path = self.vm_timestamp_path()
651 stamp_dir = os.path.dirname(stamp_path)
652 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
653 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
655 # this is called inconditionnally at the beginning of the test sequence
656 # just in case this is a rerun, so if the vm is not running it's fine
657 def plcvm_delete(self):
658 "vserver delete the test myplc"
659 stamp_path = self.vm_timestamp_path()
660 self.run_in_host("rm -f {}".format(stamp_path))
661 self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
662 self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
663 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
667 # historically the build was being fetched by the tests
668 # now the build pushes itself as a subdir of the tests workdir
669 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
670 def plcvm_create(self):
671 "vserver creation (no install done)"
672 # push the local build/ dir to the testplc box
674 # a full path for the local calls
675 build_dir = os.path.dirname(sys.argv[0])
676 # sometimes this is empty - set to "." in such a case
679 build_dir += "/build"
681 # use a standard name - will be relative to remote buildname
683 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
684 self.test_ssh.rmdir(build_dir)
685 self.test_ssh.copy(build_dir, recursive=True)
686 # the repo url is taken from arch-rpms-url
687 # with the last step (i386) removed
688 repo_url = self.options.arch_rpms_url
689 for level in [ 'arch' ]:
690 repo_url = os.path.dirname(repo_url)
692 # invoke initvm (drop support for vs)
693 script = "lbuild-initvm.sh"
695 # pass the vbuild-nightly options to [lv]test-initvm
696 script_options += " -p {}".format(self.options.personality)
697 script_options += " -d {}".format(self.options.pldistro)
698 script_options += " -f {}".format(self.options.fcdistro)
699 script_options += " -r {}".format(repo_url)
700 vserver_name = self.vservername
702 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
703 script_options += " -n {}".format(vserver_hostname)
705 print("Cannot reverse lookup {}".format(self.vserverip))
706 print("This is considered fatal, as this might pollute the test results")
708 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
709 return self.run_in_host(create_vserver) == 0
712 def plc_install(self):
714 yum install myplc, noderepo + plain bootstrapfs as well
718 if self.options.personality == "linux32":
720 elif self.options.personality == "linux64":
723 raise Exception("Unsupported personality {}".format(self.options.personality))
724 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
727 pkgs_list.append("slicerepo-{}".format(nodefamily))
728 pkgs_list.append("myplc")
729 pkgs_list.append("noderepo-{}".format(nodefamily))
730 pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
731 pkgs_string=" ".join(pkgs_list)
732 return self.yum_install(pkgs_list)
734 def install_syslinux6(self):
736 install syslinux6 from the fedora21 release
738 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
741 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
742 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
743 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
745 # this can be done several times
746 self.run_in_guest("rpm --import {key}".format(**locals()))
747 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
749 def bonding_builds(self):
751 list /etc/yum.repos.d on the myplc side
753 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
756 def bonding_nodes(self):
758 List nodes known to the myplc together with their nodefamiliy
760 print("---------------------------------------- nodes")
761 for node in self.apiserver.GetNodes(self.auth_root()):
762 print("{} -> {}".format(node['hostname'],
763 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
764 print("---------------------------------------- nodes")
768 def mod_python(self):
769 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
770 return self.yum_install( ['mod_python'] )
773 def plc_configure(self):
775 tmpname = '{}.plc-config-tty'.format(self.name())
776 with open(tmpname,'w') as fileconf:
777 for (var,value) in self.plc_spec['settings'].items():
778 fileconf.write('e {}\n{}\n'.format(var, value))
779 fileconf.write('w\n')
780 fileconf.write('q\n')
781 utils.system('cat {}'.format(tmpname))
782 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
783 utils.system('rm {}'.format(tmpname))
786 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
787 # however using a vplc guest under f20 requires this trick
788 # the symptom is this: service plc start
789 # Starting plc (via systemctl): Failed to get D-Bus connection: \
790 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
791 # weird thing is the doc says f14 uses upstart by default and not systemd
792 # so this sounds kind of harmless
793 def start_service(self, service):
794 return self.start_stop_service(service, 'start')
795 def stop_service(self, service):
796 return self.start_stop_service(service, 'stop')
798 def start_stop_service(self, service, start_or_stop):
799 "utility to start/stop a service with the special trick for f14"
800 if self.options.fcdistro != 'f14':
801 return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
803 # patch /sbin/service so it does not reset environment
804 self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
805 # this is because our own scripts in turn call service
806 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
807 .format(service, start_or_stop)) == 0
811 return self.start_service('plc')
815 return self.stop_service('plc')
817 def plcvm_start(self):
818 "start the PLC vserver"
822 def plcvm_stop(self):
823 "stop the PLC vserver"
827 # stores the keys from the config for further use
828 def keys_store(self):
829 "stores test users ssh keys in keys/"
830 for key_spec in self.plc_spec['keys']:
831 TestKey(self,key_spec).store_key()
834 def keys_clean(self):
835 "removes keys cached in keys/"
836 utils.system("rm -rf ./keys")
839 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
840 # for later direct access to the nodes
841 def keys_fetch(self):
842 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
844 if not os.path.isdir(dir):
846 vservername = self.vservername
847 vm_root = self.vm_root_in_host()
849 prefix = 'debug_ssh_key'
850 for ext in ['pub', 'rsa'] :
851 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
852 dst = "keys/{vservername}-debug.{ext}".format(**locals())
853 if self.test_ssh.fetch(src, dst) != 0:
858 "create sites with PLCAPI"
859 return self.do_sites()
861 def delete_sites(self):
862 "delete sites with PLCAPI"
863 return self.do_sites(action="delete")
865 def do_sites(self, action="add"):
866 for site_spec in self.plc_spec['sites']:
867 test_site = TestSite(self,site_spec)
868 if (action != "add"):
869 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
870 test_site.delete_site()
871 # deleted with the site
872 #test_site.delete_users()
875 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
876 test_site.create_site()
877 test_site.create_users()
880 def delete_all_sites(self):
881 "Delete all sites in PLC, and related objects"
882 print('auth_root', self.auth_root())
883 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
885 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
886 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
888 site_id = site['site_id']
889 print('Deleting site_id', site_id)
890 self.apiserver.DeleteSite(self.auth_root(), site_id)
894 "create nodes with PLCAPI"
895 return self.do_nodes()
896 def delete_nodes(self):
897 "delete nodes with PLCAPI"
898 return self.do_nodes(action="delete")
900 def do_nodes(self, action="add"):
901 for site_spec in self.plc_spec['sites']:
902 test_site = TestSite(self, site_spec)
904 utils.header("Deleting nodes in site {}".format(test_site.name()))
905 for node_spec in site_spec['nodes']:
906 test_node = TestNode(self, test_site, node_spec)
907 utils.header("Deleting {}".format(test_node.name()))
908 test_node.delete_node()
910 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
911 for node_spec in site_spec['nodes']:
912 utils.pprint('Creating node {}'.format(node_spec), node_spec)
913 test_node = TestNode(self, test_site, node_spec)
914 test_node.create_node()
917 def nodegroups(self):
918 "create nodegroups with PLCAPI"
919 return self.do_nodegroups("add")
920 def delete_nodegroups(self):
921 "delete nodegroups with PLCAPI"
922 return self.do_nodegroups("delete")
926 def translate_timestamp(start, grain, timestamp):
927 if timestamp < TestPlc.YEAR:
928 return start + timestamp*grain
933 def timestamp_printable(timestamp):
934 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
937 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
938 now = int(time.time())
939 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
940 print('API answered grain=', grain)
941 start = (now//grain)*grain
943 # find out all nodes that are reservable
944 nodes = self.all_reservable_nodenames()
946 utils.header("No reservable node found - proceeding without leases")
949 # attach them to the leases as specified in plc_specs
950 # this is where the 'leases' field gets interpreted as relative of absolute
951 for lease_spec in self.plc_spec['leases']:
952 # skip the ones that come with a null slice id
953 if not lease_spec['slice']:
955 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
956 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
957 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
958 lease_spec['t_from'], lease_spec['t_until'])
959 if lease_addition['errors']:
960 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
963 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
964 .format(nodes, lease_spec['slice'],
965 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
966 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
970 def delete_leases(self):
971 "remove all leases in the myplc side"
972 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
973 utils.header("Cleaning leases {}".format(lease_ids))
974 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
977 def list_leases(self):
978 "list all leases known to the myplc"
979 leases = self.apiserver.GetLeases(self.auth_root())
980 now = int(time.time())
982 current = l['t_until'] >= now
983 if self.options.verbose or current:
984 utils.header("{} {} from {} until {}"\
985 .format(l['hostname'], l['name'],
986 TestPlc.timestamp_printable(l['t_from']),
987 TestPlc.timestamp_printable(l['t_until'])))
990 # create nodegroups if needed, and populate
991 def do_nodegroups(self, action="add"):
992 # 1st pass to scan contents
994 for site_spec in self.plc_spec['sites']:
995 test_site = TestSite(self,site_spec)
996 for node_spec in site_spec['nodes']:
997 test_node = TestNode(self, test_site, node_spec)
998 if 'nodegroups' in node_spec:
999 nodegroupnames = node_spec['nodegroups']
1000 if isinstance(nodegroupnames, str):
1001 nodegroupnames = [ nodegroupnames ]
1002 for nodegroupname in nodegroupnames:
1003 if nodegroupname not in groups_dict:
1004 groups_dict[nodegroupname] = []
1005 groups_dict[nodegroupname].append(test_node.name())
1006 auth = self.auth_root()
1008 for (nodegroupname,group_nodes) in groups_dict.items():
1010 print('nodegroups:', 'dealing with nodegroup',\
1011 nodegroupname, 'on nodes', group_nodes)
1012 # first, check if the nodetagtype is here
1013 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1015 tag_type_id = tag_types[0]['tag_type_id']
1017 tag_type_id = self.apiserver.AddTagType(auth,
1018 {'tagname' : nodegroupname,
1019 'description' : 'for nodegroup {}'.format(nodegroupname),
1020 'category' : 'test'})
1021 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1023 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1025 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1026 print('created nodegroup', nodegroupname, \
1027 'from tagname', nodegroupname, 'and value', 'yes')
1028 # set node tag on all nodes, value='yes'
1029 for nodename in group_nodes:
1031 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1033 traceback.print_exc()
1034 print('node', nodename, 'seems to already have tag', nodegroupname)
1037 expect_yes = self.apiserver.GetNodeTags(auth,
1038 {'hostname' : nodename,
1039 'tagname' : nodegroupname},
1040 ['value'])[0]['value']
1041 if expect_yes != "yes":
1042 print('Mismatch node tag on node',nodename,'got',expect_yes)
1045 if not self.options.dry_run:
1046 print('Cannot find tag', nodegroupname, 'on node', nodename)
1050 print('cleaning nodegroup', nodegroupname)
1051 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1053 traceback.print_exc()
1057 # a list of TestNode objs
1058 def all_nodes(self):
1060 for site_spec in self.plc_spec['sites']:
1061 test_site = TestSite(self,site_spec)
1062 for node_spec in site_spec['nodes']:
1063 nodes.append(TestNode(self, test_site, node_spec))
1066 # return a list of tuples (nodename,qemuname)
1067 def all_node_infos(self) :
1069 for site_spec in self.plc_spec['sites']:
1070 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1071 for node_spec in site_spec['nodes'] ]
1074 def all_nodenames(self):
1075 return [ x[0] for x in self.all_node_infos() ]
1076 def all_reservable_nodenames(self):
1078 for site_spec in self.plc_spec['sites']:
1079 for node_spec in site_spec['nodes']:
1080 node_fields = node_spec['node_fields']
1081 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1082 res.append(node_fields['hostname'])
1085 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1086 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1087 silent_minutes, period_seconds = 15):
1088 if self.options.dry_run:
1092 class CompleterTaskBootState(CompleterTask):
1093 def __init__(self, test_plc, hostname):
1094 self.test_plc = test_plc
1095 self.hostname = hostname
1096 self.last_boot_state = 'undef'
1097 def actual_run(self):
1099 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1102 self.last_boot_state = node['boot_state']
1103 return self.last_boot_state == target_boot_state
1107 return "CompleterTaskBootState with node {}".format(self.hostname)
1108 def failure_epilogue(self):
1109 print("node {} in state {} - expected {}"\
1110 .format(self.hostname, self.last_boot_state, target_boot_state))
1112 timeout = timedelta(minutes=timeout_minutes)
1113 graceout = timedelta(minutes=silent_minutes)
1114 period = timedelta(seconds=period_seconds)
1115 # the nodes that haven't checked yet - start with a full list and shrink over time
1116 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1117 tasks = [ CompleterTaskBootState(self,hostname) \
1118 for (hostname,_) in self.all_node_infos() ]
1119 message = 'check_boot_state={}'.format(target_boot_state)
1120 return Completer(tasks, message=message).run(timeout, graceout, period)
1122 def nodes_booted(self):
1123 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1125 def probe_kvm_iptables(self):
1126 (_,kvmbox) = self.all_node_infos()[0]
1127 TestSsh(kvmbox).run("iptables-save")
1131 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1132 class CompleterTaskPingNode(CompleterTask):
1133 def __init__(self, hostname):
1134 self.hostname = hostname
1135 def run(self, silent):
1136 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1137 return utils.system(command, silent=silent) == 0
1138 def failure_epilogue(self):
1139 print("Cannot ping node with name {}".format(self.hostname))
1140 timeout = timedelta(seconds = timeout_seconds)
1142 period = timedelta(seconds = period_seconds)
1143 node_infos = self.all_node_infos()
1144 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1145 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1147 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1148 def ping_node(self):
1150 return self.check_nodes_ping()
1152 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1154 timeout = timedelta(minutes=timeout_minutes)
1155 graceout = timedelta(minutes=silent_minutes)
1156 period = timedelta(seconds=period_seconds)
1157 vservername = self.vservername
1160 completer_message = 'ssh_node_debug'
1161 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1164 completer_message = 'ssh_node_boot'
1165 local_key = "keys/key_admin.rsa"
1166 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1167 node_infos = self.all_node_infos()
1168 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1169 boot_state=message, dry_run=self.options.dry_run) \
1170 for (nodename, qemuname) in node_infos ]
1171 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1173 def ssh_node_debug(self):
1174 "Tries to ssh into nodes in debug mode with the debug ssh key"
1175 return self.check_nodes_ssh(debug = True,
1176 timeout_minutes = self.ssh_node_debug_timeout,
1177 silent_minutes = self.ssh_node_debug_silent)
1179 def ssh_node_boot(self):
1180 "Tries to ssh into nodes in production mode with the root ssh key"
1181 return self.check_nodes_ssh(debug = False,
1182 timeout_minutes = self.ssh_node_boot_timeout,
1183 silent_minutes = self.ssh_node_boot_silent)
1185 def node_bmlogs(self):
1186 "Checks that there's a non-empty dir. /var/log/bm/raw"
1187 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1190 def qemu_local_init(self): pass
1192 def bootcd(self): pass
1194 def qemu_local_config(self): pass
1196 def qemu_export(self): pass
1198 def nodestate_reinstall(self): pass
1200 def nodestate_upgrade(self): pass
1202 def nodestate_safeboot(self): pass
1204 def nodestate_boot(self): pass
1206 def nodestate_show(self): pass
1208 def nodedistro_f14(self): pass
1210 def nodedistro_f18(self): pass
1212 def nodedistro_f20(self): pass
1214 def nodedistro_f21(self): pass
1216 def nodeflavour_show(self): pass
1218 def nodeplain_on(self): pass
1220 def nodeplain_off(self): pass
1222 def nodeplain_show(self): pass
1224 ### check hooks : invoke scripts from hooks/{node,slice}
1225 def check_hooks_node(self):
1226 return self.locate_first_node().check_hooks()
1227 def check_hooks_sliver(self) :
1228 return self.locate_first_sliver().check_hooks()
1230 def check_hooks(self):
1231 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1232 return self.check_hooks_node() and self.check_hooks_sliver()
1235 def do_check_initscripts(self):
1236 class CompleterTaskInitscript(CompleterTask):
1237 def __init__(self, test_sliver, stamp):
1238 self.test_sliver = test_sliver
1240 def actual_run(self):
1241 return self.test_sliver.check_initscript_stamp(self.stamp)
1243 return "initscript checker for {}".format(self.test_sliver.name())
1244 def failure_epilogue(self):
1245 print("initscript stamp {} not found in sliver {}"\
1246 .format(self.stamp, self.test_sliver.name()))
1249 for slice_spec in self.plc_spec['slices']:
1250 if 'initscriptstamp' not in slice_spec:
1252 stamp = slice_spec['initscriptstamp']
1253 slicename = slice_spec['slice_fields']['name']
1254 for nodename in slice_spec['nodenames']:
1255 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1256 site,node = self.locate_node(nodename)
1257 # xxx - passing the wrong site - probably harmless
1258 test_site = TestSite(self, site)
1259 test_slice = TestSlice(self, test_site, slice_spec)
1260 test_node = TestNode(self, test_site, node)
1261 test_sliver = TestSliver(self, test_node, test_slice)
1262 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1263 return Completer(tasks, message='check_initscripts').\
1264 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1266 def check_initscripts(self):
1267 "check that the initscripts have triggered"
1268 return self.do_check_initscripts()
1270 def initscripts(self):
1271 "create initscripts with PLCAPI"
1272 for initscript in self.plc_spec['initscripts']:
1273 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1274 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1277 def delete_initscripts(self):
1278 "delete initscripts with PLCAPI"
1279 for initscript in self.plc_spec['initscripts']:
1280 initscript_name = initscript['initscript_fields']['name']
1281 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1283 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1284 print(initscript_name, 'deleted')
1286 print('deletion went wrong - probably did not exist')
1291 "create slices with PLCAPI"
1292 return self.do_slices(action="add")
1294 def delete_slices(self):
1295 "delete slices with PLCAPI"
1296 return self.do_slices(action="delete")
1298 def fill_slices(self):
1299 "add nodes in slices with PLCAPI"
1300 return self.do_slices(action="fill")
1302 def empty_slices(self):
1303 "remove nodes from slices with PLCAPI"
1304 return self.do_slices(action="empty")
1306 def do_slices(self, action="add"):
1307 for slice in self.plc_spec['slices']:
1308 site_spec = self.locate_site(slice['sitename'])
1309 test_site = TestSite(self,site_spec)
1310 test_slice=TestSlice(self,test_site,slice)
1311 if action == "delete":
1312 test_slice.delete_slice()
1313 elif action == "fill":
1314 test_slice.add_nodes()
1315 elif action == "empty":
1316 test_slice.delete_nodes()
1318 test_slice.create_slice()
1321 @slice_mapper__tasks(20, 10, 15)
1322 def ssh_slice(self): pass
1323 @slice_mapper__tasks(20, 19, 15)
1324 def ssh_slice_off(self): pass
1325 @slice_mapper__tasks(1, 1, 15)
1326 def slice_fs_present(self): pass
1327 @slice_mapper__tasks(1, 1, 15)
1328 def slice_fs_deleted(self): pass
1330 # use another name so we can exclude/ignore it from the tests on the nightly command line
1331 def ssh_slice_again(self): return self.ssh_slice()
1332 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1333 # but for some reason the ignore-wrapping thing would not
1336 def ssh_slice_basics(self): pass
1338 def check_vsys_defaults(self): pass
1341 def keys_clear_known_hosts(self): pass
1343 def plcapi_urls(self):
1345 attempts to reach the PLCAPI with various forms for the URL
1347 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1349 def speed_up_slices(self):
1350 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1351 return self._speed_up_slices (30, 10)
1352 def super_speed_up_slices(self):
1353 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1354 return self._speed_up_slices(5, 1)
1356 def _speed_up_slices(self, p, r):
1357 # create the template on the server-side
1358 template = "{}.nodemanager".format(self.name())
1359 with open(template,"w") as template_file:
1360 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1361 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1362 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1363 self.test_ssh.copy_abs(template, remote)
1365 if not self.apiserver.GetConfFiles(self.auth_root(),
1366 {'dest' : '/etc/sysconfig/nodemanager'}):
1367 self.apiserver.AddConfFile(self.auth_root(),
1368 {'dest' : '/etc/sysconfig/nodemanager',
1369 'source' : 'PlanetLabConf/nodemanager',
1370 'postinstall_cmd' : 'service nm restart',})
1373 def debug_nodemanager(self):
1374 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1375 template = "{}.nodemanager".format(self.name())
1376 with open(template,"w") as template_file:
1377 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1378 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1379 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1380 self.test_ssh.copy_abs(template, remote)
1384 def qemu_start(self) : pass
1387 def qemu_timestamp(self) : pass
1390 def qemu_nodefamily(self): pass
1392 # when a spec refers to a node possibly on another plc
1393 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1394 for plc in [ self ] + other_plcs:
1396 return plc.locate_sliver_obj(nodename, slicename)
1399 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1401 # implement this one as a cross step so that we can take advantage of different nodes
1402 # in multi-plcs mode
1403 def cross_check_tcp(self, other_plcs):
1404 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1405 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1406 utils.header("check_tcp: no/empty config found")
1408 specs = self.plc_spec['tcp_specs']
1411 # first wait for the network to be up and ready from the slices
1412 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1413 def __init__(self, test_sliver):
1414 self.test_sliver = test_sliver
1415 def actual_run(self):
1416 return self.test_sliver.check_tcp_ready(port = 9999)
1418 return "network ready checker for {}".format(self.test_sliver.name())
1419 def failure_epilogue(self):
1420 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1424 managed_sliver_names = set()
1426 # locate the TestSliver instances involved, and cache them in the spec instance
1427 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1428 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1429 message = "Will check TCP between s={} and c={}"\
1430 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1431 if 'client_connect' in spec:
1432 message += " (using {})".format(spec['client_connect'])
1433 utils.header(message)
1434 # we need to check network presence in both slivers, but also
1435 # avoid to insert a sliver several times
1436 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1437 if sliver.name() not in managed_sliver_names:
1438 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1439 # add this sliver's name in the set
1440 managed_sliver_names .update( {sliver.name()} )
1442 # wait for the netork to be OK in all server sides
1443 if not Completer(tasks, message='check for network readiness in slivers').\
1444 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1447 # run server and client
1451 # the issue here is that we have the server run in background
1452 # and so we have no clue if it took off properly or not
1453 # looks like in some cases it does not
1454 if not spec['s_sliver'].run_tcp_server(port, timeout=20):
1458 # idem for the client side
1459 # use nodename from located sliver, unless 'client_connect' is set
1460 if 'client_connect' in spec:
1461 destination = spec['client_connect']
1463 destination = spec['s_sliver'].test_node.name()
1464 if not spec['c_sliver'].run_tcp_client(destination, port):
1468 # painfully enough, we need to allow for some time as netflow might show up last
1469 def check_system_slice(self):
1470 "all nodes: check that a system slice is alive"
1471 # netflow currently not working in the lxc distro
1472 # drl not built at all in the wtx distro
1473 # if we find either of them we're happy
1474 return self.check_netflow() or self.check_drl()
1477 def check_netflow(self): return self._check_system_slice('netflow')
1478 def check_drl(self): return self._check_system_slice('drl')
1480 # we have the slices up already here, so it should not take too long
1481 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1482 class CompleterTaskSystemSlice(CompleterTask):
1483 def __init__(self, test_node, dry_run):
1484 self.test_node = test_node
1485 self.dry_run = dry_run
1486 def actual_run(self):
1487 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1489 return "System slice {} @ {}".format(slicename, self.test_node.name())
1490 def failure_epilogue(self):
1491 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1492 timeout = timedelta(minutes=timeout_minutes)
1493 silent = timedelta(0)
1494 period = timedelta(seconds=period_seconds)
1495 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1496 for test_node in self.all_nodes() ]
1497 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1499 def plcsh_stress_test(self):
1500 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1501 # install the stress-test in the plc image
1502 location = "/usr/share/plc_api/plcsh_stress_test.py"
1503 remote = "{}/{}".format(self.vm_root_in_host(), location)
1504 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1506 command += " -- --check"
1507 if self.options.size == 1:
1508 command += " --tiny"
1509 return self.run_in_guest(command) == 0
1511 # populate runs the same utility without slightly different options
1512 # in particular runs with --preserve (dont cleanup) and without --check
1513 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1515 def sfa_install_all(self):
1516 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1517 return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
1519 def sfa_install_core(self):
1521 return self.yum_install("sfa")
1523 def sfa_install_plc(self):
1524 "yum install sfa-plc"
1525 return self.yum_install("sfa-plc")
1527 def sfa_install_sfatables(self):
1528 "yum install sfa-sfatables"
1529 return self.yum_install("sfa-sfatables")
1531 # for some very odd reason, this sometimes fails with the following symptom
1532 # # yum install sfa-client
1533 # Setting up Install Process
1535 # Downloading Packages:
1536 # Running rpm_check_debug
1537 # Running Transaction Test
1538 # Transaction Test Succeeded
1539 # Running Transaction
1540 # Transaction couldn't start:
1541 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1542 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1543 # even though in the same context I have
1544 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1545 # Filesystem Size Used Avail Use% Mounted on
1546 # /dev/hdv1 806G 264G 501G 35% /
1547 # none 16M 36K 16M 1% /tmp
1549 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1550 def sfa_install_client(self):
1551 "yum install sfa-client"
1552 first_try = self.yum_install("sfa-client")
1555 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1556 code, cached_rpm_path = \
1557 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1558 utils.header("rpm_path=<<{}>>".format(rpm_path))
1560 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1561 return self.yum_check_installed("sfa-client")
1563 def sfa_dbclean(self):
1564 "thoroughly wipes off the SFA database"
1565 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1566 self.run_in_guest("sfa-nuke.py") == 0 or \
1567 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1568 self.run_in_guest("sfaadmin registry nuke") == 0
1570 def sfa_fsclean(self):
1571 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1572 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1575 def sfa_plcclean(self):
1576 "cleans the PLC entries that were created as a side effect of running the script"
1578 sfa_spec = self.plc_spec['sfa']
1580 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1581 login_base = auth_sfa_spec['login_base']
1583 self.apiserver.DeleteSite(self.auth_root(),login_base)
1585 print("Site {} already absent from PLC db".format(login_base))
1587 for spec_name in ['pi_spec','user_spec']:
1588 user_spec = auth_sfa_spec[spec_name]
1589 username = user_spec['email']
1591 self.apiserver.DeletePerson(self.auth_root(),username)
1593 # this in fact is expected as sites delete their members
1594 #print "User {} already absent from PLC db".format(username)
1597 print("REMEMBER TO RUN sfa_import AGAIN")
1600 def sfa_uninstall(self):
1601 "uses rpm to uninstall sfa - ignore result"
1602 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1603 self.run_in_guest("rm -rf /var/lib/sfa")
1604 self.run_in_guest("rm -rf /etc/sfa")
1605 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1607 self.run_in_guest("rpm -e --noscripts sfa-plc")
1610 ### run unit tests for SFA
1611 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1612 # Running Transaction
1613 # Transaction couldn't start:
1614 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1615 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1616 # no matter how many Gbs are available on the testplc
1617 # could not figure out what's wrong, so...
1618 # if the yum install phase fails, consider the test is successful
1619 # other combinations will eventually run it hopefully
1620 def sfa_utest(self):
1621 "yum install sfa-tests and run SFA unittests"
1622 self.run_in_guest("yum -y install sfa-tests")
1623 # failed to install - forget it
1624 if self.run_in_guest("rpm -q sfa-tests") != 0:
1625 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1627 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1631 dirname = "conf.{}".format(self.plc_spec['name'])
1632 if not os.path.isdir(dirname):
1633 utils.system("mkdir -p {}".format(dirname))
1634 if not os.path.isdir(dirname):
1635 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1638 def conffile(self, filename):
1639 return "{}/{}".format(self.confdir(), filename)
1640 def confsubdir(self, dirname, clean, dry_run=False):
1641 subdirname = "{}/{}".format(self.confdir(), dirname)
1643 utils.system("rm -rf {}".format(subdirname))
1644 if not os.path.isdir(subdirname):
1645 utils.system("mkdir -p {}".format(subdirname))
1646 if not dry_run and not os.path.isdir(subdirname):
1647 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1650 def conffile_clean(self, filename):
1651 filename=self.conffile(filename)
1652 return utils.system("rm -rf {}".format(filename))==0
1655 def sfa_configure(self):
1656 "run sfa-config-tty"
1657 tmpname = self.conffile("sfa-config-tty")
1658 with open(tmpname,'w') as fileconf:
1659 for (var,value) in self.plc_spec['sfa']['settings'].items():
1660 fileconf.write('e {}\n{}\n'.format(var, value))
1661 fileconf.write('w\n')
1662 fileconf.write('R\n')
1663 fileconf.write('q\n')
1664 utils.system('cat {}'.format(tmpname))
1665 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1668 def aggregate_xml_line(self):
1669 port = self.plc_spec['sfa']['neighbours-port']
1670 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1671 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1673 def registry_xml_line(self):
1674 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1675 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1678 # a cross step that takes all other plcs in argument
1679 def cross_sfa_configure(self, other_plcs):
1680 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1681 # of course with a single plc, other_plcs is an empty list
1684 agg_fname = self.conffile("agg.xml")
1685 with open(agg_fname,"w") as out:
1686 out.write("<aggregates>{}</aggregates>\n"\
1687 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1688 utils.header("(Over)wrote {}".format(agg_fname))
1689 reg_fname=self.conffile("reg.xml")
1690 with open(reg_fname,"w") as out:
1691 out.write("<registries>{}</registries>\n"\
1692 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1693 utils.header("(Over)wrote {}".format(reg_fname))
1694 return self.test_ssh.copy_abs(agg_fname,
1695 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1696 and self.test_ssh.copy_abs(reg_fname,
1697 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1699 def sfa_import(self):
1700 "use sfaadmin to import from plc"
1701 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1702 return self.run_in_guest('sfaadmin reg import_registry') == 0
1704 def sfa_start(self):
1706 return self.start_service('sfa')
1709 def sfi_configure(self):
1710 "Create /root/sfi on the plc side for sfi client configuration"
1711 if self.options.dry_run:
1712 utils.header("DRY RUN - skipping step")
1714 sfa_spec = self.plc_spec['sfa']
1715 # cannot use auth_sfa_mapper to pass dir_name
1716 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1717 test_slice = TestAuthSfa(self, slice_spec)
1718 dir_basename = os.path.basename(test_slice.sfi_path())
1719 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1720 clean=True, dry_run=self.options.dry_run)
1721 test_slice.sfi_configure(dir_name)
1722 # push into the remote /root/sfi area
1723 location = test_slice.sfi_path()
1724 remote = "{}/{}".format(self.vm_root_in_host(), location)
1725 self.test_ssh.mkdir(remote, abs=True)
1726 # need to strip last level or remote otherwise we get an extra dir level
1727 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1731 def sfi_clean(self):
1732 "clean up /root/sfi on the plc side"
1733 self.run_in_guest("rm -rf /root/sfi")
1736 def sfa_rspec_empty(self):
1737 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1738 filename = "empty-rspec.xml"
1740 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1741 test_slice = TestAuthSfa(self, slice_spec)
1742 in_vm = test_slice.sfi_path()
1743 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1744 if self.test_ssh.copy_abs(filename, remote) !=0:
1749 def sfa_register_site(self): pass
1751 def sfa_register_pi(self): pass
1753 def sfa_register_user(self): pass
1755 def sfa_update_user(self): pass
1757 def sfa_register_slice(self): pass
1759 def sfa_renew_slice(self): pass
1761 def sfa_get_expires(self): pass
1763 def sfa_discover(self): pass
1765 def sfa_rspec(self): pass
1767 def sfa_allocate(self): pass
1769 def sfa_allocate_empty(self): pass
1771 def sfa_provision(self): pass
1773 def sfa_provision_empty(self): pass
1775 def sfa_describe(self): pass
1777 def sfa_check_slice_plc(self): pass
1779 def sfa_check_slice_plc_empty(self): pass
1781 def sfa_update_slice(self): pass
1783 def sfa_remove_user_from_slice(self): pass
1785 def sfa_insert_user_in_slice(self): pass
1787 def sfi_list(self): pass
1789 def sfi_show_site(self): pass
1791 def sfi_show_slice(self): pass
1793 def sfi_show_slice_researchers(self): pass
1795 def ssh_slice_sfa(self): pass
1797 def sfa_delete_user(self): pass
1799 def sfa_delete_slice(self): pass
1803 return self.stop_service('sfa')
1806 "creates random entries in the PLCAPI"
1807 # install the stress-test in the plc image
1808 location = "/usr/share/plc_api/plcsh_stress_test.py"
1809 remote = "{}/{}".format(self.vm_root_in_host(), location)
1810 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1812 command += " -- --preserve --short-names"
1813 local = (self.run_in_guest(command) == 0);
1814 # second run with --foreign
1815 command += ' --foreign'
1816 remote = (self.run_in_guest(command) == 0);
1817 return local and remote
1820 ####################
1822 def bonding_init_partial(self): pass
1825 def bonding_add_yum(self): pass
1828 def bonding_install_rpms(self): pass
1830 ####################
1832 def gather_logs(self):
1833 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1834 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1835 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1836 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1837 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1838 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1839 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1841 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1842 self.gather_var_logs()
1844 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1845 self.gather_pgsql_logs()
1847 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1848 self.gather_root_sfi()
1850 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1851 for site_spec in self.plc_spec['sites']:
1852 test_site = TestSite(self,site_spec)
1853 for node_spec in site_spec['nodes']:
1854 test_node = TestNode(self, test_site, node_spec)
1855 test_node.gather_qemu_logs()
1857 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1858 self.gather_nodes_var_logs()
1860 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1861 self.gather_slivers_var_logs()
1864 def gather_slivers_var_logs(self):
1865 for test_sliver in self.all_sliver_objs():
1866 remote = test_sliver.tar_var_logs()
1867 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1868 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1869 utils.system(command)
1872 def gather_var_logs(self):
1873 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1874 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1875 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1876 utils.system(command)
1877 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1878 utils.system(command)
1880 def gather_pgsql_logs(self):
1881 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1882 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1883 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1884 utils.system(command)
1886 def gather_root_sfi(self):
1887 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1888 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1889 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1890 utils.system(command)
1892 def gather_nodes_var_logs(self):
1893 for site_spec in self.plc_spec['sites']:
1894 test_site = TestSite(self, site_spec)
1895 for node_spec in site_spec['nodes']:
1896 test_node = TestNode(self, test_site, node_spec)
1897 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1898 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1899 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1900 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1901 utils.system(command)
1904 # returns the filename to use for sql dump/restore, using options.dbname if set
1905 def dbfile(self, database):
1906 # uses options.dbname if it is found
1908 name = self.options.dbname
1909 if not isinstance(name, str):
1915 return "/root/{}-{}.sql".format(database, name)
1917 def plc_db_dump(self):
1918 'dump the planetlab5 DB in /root in the PLC - filename has time'
1919 dump=self.dbfile("planetab5")
1920 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1921 utils.header('Dumped planetlab5 database in {}'.format(dump))
1924 def plc_db_restore(self):
1925 'restore the planetlab5 DB - looks broken, but run -n might help'
1926 dump = self.dbfile("planetab5")
1927 ##stop httpd service
1928 self.run_in_guest('service httpd stop')
1929 # xxx - need another wrapper
1930 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1931 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1932 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1933 ##starting httpd service
1934 self.run_in_guest('service httpd start')
1936 utils.header('Database restored from ' + dump)
1939 def create_ignore_steps():
1940 for step in TestPlc.default_steps + TestPlc.other_steps:
1941 # default step can have a plc qualifier
1943 step, qualifier = step.split('@')
1944 # or be defined as forced or ignored by default
1945 for keyword in ['_ignore','_force']:
1946 if step.endswith(keyword):
1947 step=step.replace(keyword,'')
1948 if step == SEP or step == SEPSFA :
1950 method = getattr(TestPlc,step)
1951 name = step + '_ignore'
1952 wrapped = ignore_result(method)
1953 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1954 setattr(TestPlc, name, wrapped)
1957 # def ssh_slice_again_ignore (self): pass
1959 # def check_initscripts_ignore (self): pass
1961 def standby_1_through_20(self):
1962 """convenience function to wait for a specified number of minutes"""
1965 def standby_1(): pass
1967 def standby_2(): pass
1969 def standby_3(): pass
1971 def standby_4(): pass
1973 def standby_5(): pass
1975 def standby_6(): pass
1977 def standby_7(): pass
1979 def standby_8(): pass
1981 def standby_9(): pass
1983 def standby_10(): pass
1985 def standby_11(): pass
1987 def standby_12(): pass
1989 def standby_13(): pass
1991 def standby_14(): pass
1993 def standby_15(): pass
1995 def standby_16(): pass
1997 def standby_17(): pass
1999 def standby_18(): pass
2001 def standby_19(): pass
2003 def standby_20(): pass
2005 # convenience for debugging the test logic
2006 def yes(self): return True
2007 def no(self): return False
2008 def fail(self): return False