1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
156 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls', 'speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this out of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
165 'bootcd', 'qemu_local_config', SEP,
166 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
167 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
168 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
169 'sfa_start', 'sfa_import', SEPSFA,
170 'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
171 'sfa_register_user@1', 'sfa_update_user@1',
172 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
173 'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
174 'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
175 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
176 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
177 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
178 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
179 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
180 # but as the stress test might take a while, we sometimes missed the debug mode..
181 'probe_kvm_iptables',
182 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
183 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
184 'ssh_slice_sfa@1', SEPSFA,
185 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
186 'sfa_check_slice_plc_empty@1', SEPSFA,
187 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
188 'cross_check_tcp@1', 'check_system_slice', SEP,
189 # for inspecting the slice while it runs the first time
191 # check slices are turned off properly
193 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
194 # check they are properly re-created with the same name
195 'fill_slices', 'ssh_slice_again', SEP,
196 'gather_logs_force', SEP,
199 'export', 'show_boxes', 'super_speed_up_slices', SEP,
200 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
201 'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
202 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
203 'delete_leases', 'list_leases', SEP,
205 'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
206 'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
207 'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
208 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
209 'sfa_install_core', 'sfa_install_sfatables',
210 'sfa_install_plc', 'sfa_install_client', SEPSFA,
211 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
212 'sfa_get_expires', SEPSFA,
213 'plc_db_dump', 'plc_db_restore', SEP,
214 'check_netflow', 'check_drl', SEP,
215 'slice_fs_present', 'check_initscripts', SEP,
216 'standby_1_through_20', 'yes', 'no', SEP,
217 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
219 default_bonding_steps = [
220 'bonding_init_partial',
222 'bonding_install_rpms', SEP,
226 def printable_steps(list):
227 single_line = " ".join(list) + " "
228 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
230 def valid_step(step):
231 return step != SEP and step != SEPSFA
233 # turn off the sfa-related steps when build has skipped SFA
234 # this was originally for centos5 but is still valid
235 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
237 def _has_sfa_cached(rpms_url):
238 if os.path.isfile(has_sfa_cache_filename):
239 with open(has_sfa_cache_filename) as cache:
240 cached = cache.read() == "yes"
241 utils.header("build provides SFA (cached):{}".format(cached))
243 # warning, we're now building 'sface' so let's be a bit more picky
244 # full builds are expected to return with 0 here
245 utils.header("Checking if build provides SFA package...")
246 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
247 encoded = 'yes' if retcod else 'no'
248 with open(has_sfa_cache_filename,'w') as cache:
253 def check_whether_build_has_sfa(rpms_url):
254 has_sfa = TestPlc._has_sfa_cached(rpms_url)
256 utils.header("build does provide SFA")
258 # move all steps containing 'sfa' from default_steps to other_steps
259 utils.header("SFA package not found - removing steps with sfa or sfi")
260 sfa_steps = [ step for step in TestPlc.default_steps
261 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
262 TestPlc.other_steps += sfa_steps
263 for step in sfa_steps:
264 TestPlc.default_steps.remove(step)
266 def __init__(self, plc_spec, options):
267 self.plc_spec = plc_spec
268 self.options = options
269 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
270 self.vserverip = plc_spec['vserverip']
271 self.vservername = plc_spec['vservername']
272 self.vplchostname = self.vservername.split('-')[-1]
273 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
274 self.apiserver = TestApiserver(self.url, options.dry_run)
275 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
276 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
278 def has_addresses_api(self):
279 return self.apiserver.has_method('AddIpAddress')
282 name = self.plc_spec['name']
283 return "{}.{}".format(name,self.vservername)
286 return self.plc_spec['host_box']
289 return self.test_ssh.is_local()
291 # define the API methods on this object through xmlrpc
292 # would help, but not strictly necessary
296 def actual_command_in_guest(self,command, backslash=False):
297 raw1 = self.host_to_guest(command)
298 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
301 def start_guest(self):
302 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
303 dry_run=self.options.dry_run))
305 def stop_guest(self):
306 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
307 dry_run=self.options.dry_run))
309 def run_in_guest(self, command, backslash=False):
310 raw = self.actual_command_in_guest(command, backslash)
311 return utils.system(raw)
313 def run_in_host(self,command):
314 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
316 # backslashing turned out so awful at some point that I've turned off auto-backslashing
317 # see e.g. plc_start esp. the version for f14
318 #command gets run in the plc's vm
319 def host_to_guest(self, command):
320 ssh_leg = TestSsh(self.vplchostname)
321 return ssh_leg.actual_command(command, keep_stdin=True)
323 # this /vservers thing is legacy...
324 def vm_root_in_host(self):
325 return "/vservers/{}/".format(self.vservername)
327 def vm_timestamp_path(self):
328 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
330 #start/stop the vserver
331 def start_guest_in_host(self):
332 return "virsh -c lxc:/// start {}".format(self.vservername)
334 def stop_guest_in_host(self):
335 return "virsh -c lxc:/// destroy {}".format(self.vservername)
338 def run_in_guest_piped(self,local,remote):
339 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
342 def dnf_check_installed(self, rpms):
343 if isinstance(rpms, list):
345 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
347 # does a yum install in the vs, ignore yum retcod, check with rpm
348 def dnf_install(self, rpms):
349 if isinstance(rpms, list):
351 yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
353 self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
354 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
355 # nothing similar with dnf, forget about this for now
356 # self.run_in_guest("yum-complete-transaction -y")
357 return self.dnf_check_installed(rpms)
359 def pip_install(self, package):
360 return self.run_in_guest("pip3 install {}".format(package)) == 0
363 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
364 'AuthMethod' : 'password',
365 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
366 'Role' : self.plc_spec['role'],
369 def locate_site(self,sitename):
370 for site in self.plc_spec['sites']:
371 if site['site_fields']['name'] == sitename:
373 if site['site_fields']['login_base'] == sitename:
375 raise Exception("Cannot locate site {}".format(sitename))
377 def locate_node(self, nodename):
378 for site in self.plc_spec['sites']:
379 for node in site['nodes']:
380 if node['name'] == nodename:
382 raise Exception("Cannot locate node {}".format(nodename))
384 def locate_hostname(self, hostname):
385 for site in self.plc_spec['sites']:
386 for node in site['nodes']:
387 if node['node_fields']['hostname'] == hostname:
389 raise Exception("Cannot locate hostname {}".format(hostname))
391 def locate_key(self, key_name):
392 for key in self.plc_spec['keys']:
393 if key['key_name'] == key_name:
395 raise Exception("Cannot locate key {}".format(key_name))
397 def locate_private_key_from_key_names(self, key_names):
398 # locate the first avail. key
400 for key_name in key_names:
401 key_spec = self.locate_key(key_name)
402 test_key = TestKey(self,key_spec)
403 publickey = test_key.publicpath()
404 privatekey = test_key.privatepath()
405 if os.path.isfile(publickey) and os.path.isfile(privatekey):
412 def locate_slice(self, slicename):
413 for slice in self.plc_spec['slices']:
414 if slice['slice_fields']['name'] == slicename:
416 raise Exception("Cannot locate slice {}".format(slicename))
418 def all_sliver_objs(self):
420 for slice_spec in self.plc_spec['slices']:
421 slicename = slice_spec['slice_fields']['name']
422 for nodename in slice_spec['nodenames']:
423 result.append(self.locate_sliver_obj(nodename, slicename))
426 def locate_sliver_obj(self, nodename, slicename):
427 site,node = self.locate_node(nodename)
428 slice = self.locate_slice(slicename)
430 test_site = TestSite(self, site)
431 test_node = TestNode(self, test_site, node)
432 # xxx the slice site is assumed to be the node site - mhh - probably harmless
433 test_slice = TestSlice(self, test_site, slice)
434 return TestSliver(self, test_node, test_slice)
436 def locate_first_node(self):
437 nodename = self.plc_spec['slices'][0]['nodenames'][0]
438 site,node = self.locate_node(nodename)
439 test_site = TestSite(self, site)
440 test_node = TestNode(self, test_site, node)
443 def locate_first_sliver(self):
444 slice_spec = self.plc_spec['slices'][0]
445 slicename = slice_spec['slice_fields']['name']
446 nodename = slice_spec['nodenames'][0]
447 return self.locate_sliver_obj(nodename,slicename)
449 # all different hostboxes used in this plc
450 def get_BoxNodes(self):
451 # maps on sites and nodes, return [ (host_box,test_node) ]
453 for site_spec in self.plc_spec['sites']:
454 test_site = TestSite(self,site_spec)
455 for node_spec in site_spec['nodes']:
456 test_node = TestNode(self, test_site, node_spec)
457 if not test_node.is_real():
458 tuples.append( (test_node.host_box(),test_node) )
459 # transform into a dict { 'host_box' -> [ test_node .. ] }
461 for (box,node) in tuples:
462 if box not in result:
465 result[box].append(node)
468 # a step for checking this stuff
469 def show_boxes(self):
470 'print summary of nodes location'
471 for box,nodes in self.get_BoxNodes().items():
472 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
475 # make this a valid step
476 def qemu_kill_all(self):
477 'kill all qemu instances on the qemu boxes involved by this setup'
478 # this is the brute force version, kill all qemus on that host box
479 for (box,nodes) in self.get_BoxNodes().items():
480 # pass the first nodename, as we don't push template-qemu on testboxes
481 nodedir = nodes[0].nodedir()
482 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
485 # make this a valid step
486 def qemu_list_all(self):
487 'list all qemu instances on the qemu boxes involved by this setup'
488 for box,nodes in self.get_BoxNodes().items():
489 # this is the brute force version, kill all qemus on that host box
490 TestBoxQemu(box, self.options.buildname).qemu_list_all()
493 # kill only the qemus related to this test
494 def qemu_list_mine(self):
495 'list qemu instances for our nodes'
496 for (box,nodes) in self.get_BoxNodes().items():
497 # the fine-grain version
502 # kill only the qemus related to this test
503 def qemu_clean_mine(self):
504 'cleanup (rm -rf) qemu instances for our nodes'
505 for box,nodes in self.get_BoxNodes().items():
506 # the fine-grain version
511 # kill only the right qemus
512 def qemu_kill_mine(self):
513 'kill the qemu instances for our nodes'
514 for box,nodes in self.get_BoxNodes().items():
515 # the fine-grain version
520 #################### display config
522 "show test configuration after localization"
527 # uggly hack to make sure 'run export' only reports about the 1st plc
528 # to avoid confusion - also we use 'inri_slice1' in various aliases..
531 "print cut'n paste-able stuff to export env variables to your shell"
532 # guess local domain from hostname
533 if TestPlc.exported_id > 1:
534 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
536 TestPlc.exported_id += 1
537 domain = socket.gethostname().split('.',1)[1]
538 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
539 print("export BUILD={}".format(self.options.buildname))
540 print("export PLCHOSTLXC={}".format(fqdn))
541 print("export GUESTNAME={}".format(self.vservername))
542 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
543 # find hostname of first node
544 hostname, qemubox = self.all_node_infos()[0]
545 print("export KVMHOST={}.{}".format(qemubox, domain))
546 print("export NODE={}".format(hostname))
550 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
551 def show_pass(self, passno):
552 for (key,val) in self.plc_spec.items():
553 if not self.options.verbose and key not in TestPlc.always_display_keys:
558 self.display_site_spec(site)
559 for node in site['nodes']:
560 self.display_node_spec(node)
561 elif key == 'initscripts':
562 for initscript in val:
563 self.display_initscript_spec(initscript)
564 elif key == 'slices':
566 self.display_slice_spec(slice)
569 self.display_key_spec(key)
571 if key not in ['sites', 'initscripts', 'slices', 'keys']:
572 print('+ ', key, ':', val)
574 def display_site_spec(self, site):
575 print('+ ======== site', site['site_fields']['name'])
576 for k,v in site.items():
577 if not self.options.verbose and k not in TestPlc.always_display_keys:
581 print('+ ', 'nodes : ', end=' ')
583 print(node['node_fields']['hostname'],'', end=' ')
587 print('+ users : ', end=' ')
589 print(user['name'],'', end=' ')
591 elif k == 'site_fields':
592 print('+ login_base', ':', v['login_base'])
593 elif k == 'address_fields':
599 def display_initscript_spec(self, initscript):
600 print('+ ======== initscript', initscript['initscript_fields']['name'])
602 def display_key_spec(self, key):
603 print('+ ======== key', key['key_name'])
605 def display_slice_spec(self, slice):
606 print('+ ======== slice', slice['slice_fields']['name'])
607 for k,v in slice.items():
610 print('+ nodes : ', end=' ')
612 print(nodename,'', end=' ')
614 elif k == 'usernames':
616 print('+ users : ', end=' ')
618 print(username,'', end=' ')
620 elif k == 'slice_fields':
621 print('+ fields', ':', end=' ')
622 print('max_nodes=',v['max_nodes'], end=' ')
627 def display_node_spec(self, node):
628 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
629 print("hostname=", node['node_fields']['hostname'], end=' ')
630 print("ip=", node['interface_fields']['ip'])
631 if self.options.verbose:
632 utils.pprint("node details", node, depth=3)
634 # another entry point for just showing the boxes involved
635 def display_mapping(self):
636 TestPlc.display_mapping_plc(self.plc_spec)
640 def display_mapping_plc(plc_spec):
641 print('+ MyPLC',plc_spec['name'])
642 # WARNING this would not be right for lxc-based PLC's - should be harmless though
643 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
644 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
645 for site_spec in plc_spec['sites']:
646 for node_spec in site_spec['nodes']:
647 TestPlc.display_mapping_node(node_spec)
650 def display_mapping_node(node_spec):
651 print('+ NODE {}'.format(node_spec['name']))
652 print('+\tqemu box {}'.format(node_spec['host_box']))
653 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
655 # write a timestamp in /vservers/<>.timestamp
656 # cannot be inside the vserver, that causes vserver .. build to cough
657 def plcvm_timestamp(self):
658 "Create a timestamp to remember creation date for this plc"
659 now = int(time.time())
660 # TODO-lxc check this one
661 # a first approx. is to store the timestamp close to the VM root like vs does
662 stamp_path = self.vm_timestamp_path()
663 stamp_dir = os.path.dirname(stamp_path)
664 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
665 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
667 # this is called inconditionnally at the beginning of the test sequence
668 # just in case this is a rerun, so if the vm is not running it's fine
669 def plcvm_delete(self):
670 "vserver delete the test myplc"
671 stamp_path = self.vm_timestamp_path()
672 self.run_in_host("rm -f {}".format(stamp_path))
673 self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
674 self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
675 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
679 # historically the build was being fetched by the tests
680 # now the build pushes itself as a subdir of the tests workdir
681 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
682 def plcvm_create(self):
683 "vserver creation (no install done)"
684 # push the local build/ dir to the testplc box
686 # a full path for the local calls
687 build_dir = os.path.dirname(sys.argv[0])
688 # sometimes this is empty - set to "." in such a case
691 build_dir += "/build"
693 # use a standard name - will be relative to remote buildname
695 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
696 self.test_ssh.rmdir(build_dir)
697 self.test_ssh.copy(build_dir, recursive=True)
698 # the repo url is taken from arch-rpms-url
699 # with the last step (i386) removed
700 repo_url = self.options.arch_rpms_url
701 for level in [ 'arch' ]:
702 repo_url = os.path.dirname(repo_url)
704 # invoke initvm (drop support for vs)
705 script = "lbuild-initvm.sh"
707 # pass the vbuild-nightly options to [lv]test-initvm
708 script_options += " -p {}".format(self.options.personality)
709 script_options += " -d {}".format(self.options.pldistro)
710 script_options += " -f {}".format(self.options.fcdistro)
711 script_options += " -r {}".format(repo_url)
712 vserver_name = self.vservername
714 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
715 script_options += " -n {}".format(vserver_hostname)
717 print("Cannot reverse lookup {}".format(self.vserverip))
718 print("This is considered fatal, as this might pollute the test results")
720 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
721 return self.run_in_host(create_vserver) == 0
723 ### install django through pip
724 def django_install(self):
725 # plcapi requires Django, that is no longer provided py fedora as an rpm
726 # so we use pip instead
730 return self.pip_install('Django')
733 def plc_install(self):
735 yum install myplc, noderepo
739 if self.options.personality == "linux32":
741 elif self.options.personality == "linux64":
744 raise Exception("Unsupported personality {}".format(self.options.personality))
745 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
747 # check it's possible to install just 'myplc-core' first
748 if not self.dnf_install("myplc-core"):
752 pkgs_list.append("myplc")
753 pkgs_list.append("slicerepo-{}".format(nodefamily))
754 pkgs_list.append("noderepo-{}".format(nodefamily))
755 pkgs_string=" ".join(pkgs_list)
756 return self.dnf_install(pkgs_list)
758 def install_syslinux6(self):
760 install syslinux6 from the fedora21 release
762 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
765 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
766 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
767 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
769 # this can be done several times
770 self.run_in_guest("rpm --import {key}".format(**locals()))
771 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
773 def bonding_builds(self):
775 list /etc/yum.repos.d on the myplc side
777 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
780 def bonding_nodes(self):
782 List nodes known to the myplc together with their nodefamiliy
784 print("---------------------------------------- nodes")
785 for node in self.apiserver.GetNodes(self.auth_root()):
786 print("{} -> {}".format(node['hostname'],
787 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
788 print("---------------------------------------- nodes")
792 def mod_python(self):
793 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
794 return self.dnf_install( ['mod_python'] )
797 def plc_configure(self):
799 tmpname = '{}.plc-config-tty'.format(self.name())
800 with open(tmpname,'w') as fileconf:
801 for var, value in self.plc_spec['settings'].items():
802 fileconf.write('e {}\n{}\n'.format(var, value))
803 fileconf.write('w\n')
804 fileconf.write('q\n')
805 utils.system('cat {}'.format(tmpname))
806 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
807 utils.system('rm {}'.format(tmpname))
810 # care only about f>=27
811 def start_stop_systemd(self, service, start_or_stop):
812 "utility to start/stop a systemd-defined service (sfa)"
813 return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
816 "start plc through systemclt"
817 return self.start_stop_systemd('plc', 'start')
820 "stop plc through systemctl"
821 return self.start_stop_systemd('plc', 'stop')
823 def plcvm_start(self):
824 "start the PLC vserver"
828 def plcvm_stop(self):
829 "stop the PLC vserver"
833 # stores the keys from the config for further use
834 def keys_store(self):
835 "stores test users ssh keys in keys/"
836 for key_spec in self.plc_spec['keys']:
837 TestKey(self,key_spec).store_key()
840 def keys_clean(self):
841 "removes keys cached in keys/"
842 utils.system("rm -rf ./keys")
845 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
846 # for later direct access to the nodes
847 def keys_fetch(self):
848 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
850 if not os.path.isdir(dir):
852 vservername = self.vservername
853 vm_root = self.vm_root_in_host()
855 prefix = 'debug_ssh_key'
856 for ext in ['pub', 'rsa'] :
857 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
858 dst = "keys/{vservername}-debug.{ext}".format(**locals())
859 if self.test_ssh.fetch(src, dst) != 0:
864 "create sites with PLCAPI"
865 return self.do_sites()
867 def delete_sites(self):
868 "delete sites with PLCAPI"
869 return self.do_sites(action="delete")
871 def do_sites(self, action="add"):
872 for site_spec in self.plc_spec['sites']:
873 test_site = TestSite(self,site_spec)
874 if (action != "add"):
875 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
876 test_site.delete_site()
877 # deleted with the site
878 #test_site.delete_users()
881 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
882 test_site.create_site()
883 test_site.create_users()
886 def delete_all_sites(self):
887 "Delete all sites in PLC, and related objects"
888 print('auth_root', self.auth_root())
889 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
891 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
892 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
894 site_id = site['site_id']
895 print('Deleting site_id', site_id)
896 self.apiserver.DeleteSite(self.auth_root(), site_id)
900 "create nodes with PLCAPI"
901 return self.do_nodes()
902 def delete_nodes(self):
903 "delete nodes with PLCAPI"
904 return self.do_nodes(action="delete")
906 def do_nodes(self, action="add"):
907 for site_spec in self.plc_spec['sites']:
908 test_site = TestSite(self, site_spec)
910 utils.header("Deleting nodes in site {}".format(test_site.name()))
911 for node_spec in site_spec['nodes']:
912 test_node = TestNode(self, test_site, node_spec)
913 utils.header("Deleting {}".format(test_node.name()))
914 test_node.delete_node()
916 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
917 for node_spec in site_spec['nodes']:
918 utils.pprint('Creating node {}'.format(node_spec), node_spec)
919 test_node = TestNode(self, test_site, node_spec)
920 test_node.create_node()
923 def nodegroups(self):
924 "create nodegroups with PLCAPI"
925 return self.do_nodegroups("add")
926 def delete_nodegroups(self):
927 "delete nodegroups with PLCAPI"
928 return self.do_nodegroups("delete")
932 def translate_timestamp(start, grain, timestamp):
933 if timestamp < TestPlc.YEAR:
934 return start + timestamp*grain
939 def timestamp_printable(timestamp):
940 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
943 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
944 now = int(time.time())
945 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
946 print('API answered grain=', grain)
947 start = (now//grain)*grain
949 # find out all nodes that are reservable
950 nodes = self.all_reservable_nodenames()
952 utils.header("No reservable node found - proceeding without leases")
955 # attach them to the leases as specified in plc_specs
956 # this is where the 'leases' field gets interpreted as relative of absolute
957 for lease_spec in self.plc_spec['leases']:
958 # skip the ones that come with a null slice id
959 if not lease_spec['slice']:
961 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
962 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
963 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
964 lease_spec['t_from'], lease_spec['t_until'])
965 if lease_addition['errors']:
966 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
969 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
970 .format(nodes, lease_spec['slice'],
971 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
972 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
976 def delete_leases(self):
977 "remove all leases in the myplc side"
978 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
979 utils.header("Cleaning leases {}".format(lease_ids))
980 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
983 def list_leases(self):
984 "list all leases known to the myplc"
985 leases = self.apiserver.GetLeases(self.auth_root())
986 now = int(time.time())
988 current = l['t_until'] >= now
989 if self.options.verbose or current:
990 utils.header("{} {} from {} until {}"\
991 .format(l['hostname'], l['name'],
992 TestPlc.timestamp_printable(l['t_from']),
993 TestPlc.timestamp_printable(l['t_until'])))
996 # create nodegroups if needed, and populate
997 def do_nodegroups(self, action="add"):
998 # 1st pass to scan contents
1000 for site_spec in self.plc_spec['sites']:
1001 test_site = TestSite(self,site_spec)
1002 for node_spec in site_spec['nodes']:
1003 test_node = TestNode(self, test_site, node_spec)
1004 if 'nodegroups' in node_spec:
1005 nodegroupnames = node_spec['nodegroups']
1006 if isinstance(nodegroupnames, str):
1007 nodegroupnames = [ nodegroupnames ]
1008 for nodegroupname in nodegroupnames:
1009 if nodegroupname not in groups_dict:
1010 groups_dict[nodegroupname] = []
1011 groups_dict[nodegroupname].append(test_node.name())
1012 auth = self.auth_root()
1014 for (nodegroupname,group_nodes) in groups_dict.items():
1016 print('nodegroups:', 'dealing with nodegroup',\
1017 nodegroupname, 'on nodes', group_nodes)
1018 # first, check if the nodetagtype is here
1019 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1021 tag_type_id = tag_types[0]['tag_type_id']
1023 tag_type_id = self.apiserver.AddTagType(auth,
1024 {'tagname' : nodegroupname,
1025 'description' : 'for nodegroup {}'.format(nodegroupname),
1026 'category' : 'test'})
1027 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1029 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1031 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1032 print('created nodegroup', nodegroupname, \
1033 'from tagname', nodegroupname, 'and value', 'yes')
1034 # set node tag on all nodes, value='yes'
1035 for nodename in group_nodes:
1037 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1039 traceback.print_exc()
1040 print('node', nodename, 'seems to already have tag', nodegroupname)
1043 expect_yes = self.apiserver.GetNodeTags(auth,
1044 {'hostname' : nodename,
1045 'tagname' : nodegroupname},
1046 ['value'])[0]['value']
1047 if expect_yes != "yes":
1048 print('Mismatch node tag on node',nodename,'got',expect_yes)
1051 if not self.options.dry_run:
1052 print('Cannot find tag', nodegroupname, 'on node', nodename)
1056 print('cleaning nodegroup', nodegroupname)
1057 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1059 traceback.print_exc()
1063 # a list of TestNode objs
1064 def all_nodes(self):
1066 for site_spec in self.plc_spec['sites']:
1067 test_site = TestSite(self,site_spec)
1068 for node_spec in site_spec['nodes']:
1069 nodes.append(TestNode(self, test_site, node_spec))
1072 # return a list of tuples (nodename,qemuname)
1073 def all_node_infos(self) :
1075 for site_spec in self.plc_spec['sites']:
1076 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1077 for node_spec in site_spec['nodes'] ]
1080 def all_nodenames(self):
1081 return [ x[0] for x in self.all_node_infos() ]
1082 def all_reservable_nodenames(self):
1084 for site_spec in self.plc_spec['sites']:
1085 for node_spec in site_spec['nodes']:
1086 node_fields = node_spec['node_fields']
1087 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1088 res.append(node_fields['hostname'])
1091 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1092 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1093 silent_minutes, period_seconds = 15):
1094 if self.options.dry_run:
1098 class CompleterTaskBootState(CompleterTask):
1099 def __init__(self, test_plc, hostname):
1100 self.test_plc = test_plc
1101 self.hostname = hostname
1102 self.last_boot_state = 'undef'
1103 def actual_run(self):
1105 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1108 self.last_boot_state = node['boot_state']
1109 return self.last_boot_state == target_boot_state
1113 return "CompleterTaskBootState with node {}".format(self.hostname)
1114 def failure_epilogue(self):
1115 print("node {} in state {} - expected {}"\
1116 .format(self.hostname, self.last_boot_state, target_boot_state))
1118 timeout = timedelta(minutes=timeout_minutes)
1119 graceout = timedelta(minutes=silent_minutes)
1120 period = timedelta(seconds=period_seconds)
1121 # the nodes that haven't checked yet - start with a full list and shrink over time
1122 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1123 tasks = [ CompleterTaskBootState(self,hostname) \
1124 for (hostname,_) in self.all_node_infos() ]
1125 message = 'check_boot_state={}'.format(target_boot_state)
1126 return Completer(tasks, message=message).run(timeout, graceout, period)
1128 def nodes_booted(self):
1129 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1131 def probe_kvm_iptables(self):
1132 (_,kvmbox) = self.all_node_infos()[0]
1133 TestSsh(kvmbox).run("iptables-save")
1137 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1138 class CompleterTaskPingNode(CompleterTask):
1139 def __init__(self, hostname):
1140 self.hostname = hostname
1141 def run(self, silent):
1142 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1143 return utils.system(command, silent=silent) == 0
1144 def failure_epilogue(self):
1145 print("Cannot ping node with name {}".format(self.hostname))
1146 timeout = timedelta(seconds = timeout_seconds)
1148 period = timedelta(seconds = period_seconds)
1149 node_infos = self.all_node_infos()
1150 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1151 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1153 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1154 def ping_node(self):
1156 return self.check_nodes_ping()
1158 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1160 timeout = timedelta(minutes=timeout_minutes)
1161 graceout = timedelta(minutes=silent_minutes)
1162 period = timedelta(seconds=period_seconds)
1163 vservername = self.vservername
1166 completer_message = 'ssh_node_debug'
1167 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1170 completer_message = 'ssh_node_boot'
1171 local_key = "keys/key_admin.rsa"
1172 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1173 node_infos = self.all_node_infos()
1174 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1175 boot_state=message, dry_run=self.options.dry_run) \
1176 for (nodename, qemuname) in node_infos ]
1177 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1179 def ssh_node_debug(self):
1180 "Tries to ssh into nodes in debug mode with the debug ssh key"
1181 return self.check_nodes_ssh(debug = True,
1182 timeout_minutes = self.ssh_node_debug_timeout,
1183 silent_minutes = self.ssh_node_debug_silent)
1185 def ssh_node_boot(self):
1186 "Tries to ssh into nodes in production mode with the root ssh key"
1187 return self.check_nodes_ssh(debug = False,
1188 timeout_minutes = self.ssh_node_boot_timeout,
1189 silent_minutes = self.ssh_node_boot_silent)
1191 def node_bmlogs(self):
1192 "Checks that there's a non-empty dir. /var/log/bm/raw"
1193 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1196 def qemu_local_init(self): pass
1198 def bootcd(self): pass
1200 def qemu_local_config(self): pass
1202 def qemu_export(self): pass
1204 def qemu_cleanlog(self): pass
1206 def nodestate_reinstall(self): pass
1208 def nodestate_upgrade(self): pass
1210 def nodestate_safeboot(self): pass
1212 def nodestate_boot(self): pass
1214 def nodestate_show(self): pass
1216 def nodedistro_f14(self): pass
1218 def nodedistro_f18(self): pass
1220 def nodedistro_f20(self): pass
1222 def nodedistro_f21(self): pass
1224 def nodedistro_f22(self): pass
1226 def nodedistro_show(self): pass
1228 ### check hooks : invoke scripts from hooks/{node,slice}
1229 def check_hooks_node(self):
1230 return self.locate_first_node().check_hooks()
1231 def check_hooks_sliver(self) :
1232 return self.locate_first_sliver().check_hooks()
1234 def check_hooks(self):
1235 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1236 return self.check_hooks_node() and self.check_hooks_sliver()
1239 def do_check_initscripts(self):
1240 class CompleterTaskInitscript(CompleterTask):
1241 def __init__(self, test_sliver, stamp):
1242 self.test_sliver = test_sliver
1244 def actual_run(self):
1245 return self.test_sliver.check_initscript_stamp(self.stamp)
1247 return "initscript checker for {}".format(self.test_sliver.name())
1248 def failure_epilogue(self):
1249 print("initscript stamp {} not found in sliver {}"\
1250 .format(self.stamp, self.test_sliver.name()))
1253 for slice_spec in self.plc_spec['slices']:
1254 if 'initscriptstamp' not in slice_spec:
1256 stamp = slice_spec['initscriptstamp']
1257 slicename = slice_spec['slice_fields']['name']
1258 for nodename in slice_spec['nodenames']:
1259 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1260 site,node = self.locate_node(nodename)
1261 # xxx - passing the wrong site - probably harmless
1262 test_site = TestSite(self, site)
1263 test_slice = TestSlice(self, test_site, slice_spec)
1264 test_node = TestNode(self, test_site, node)
1265 test_sliver = TestSliver(self, test_node, test_slice)
1266 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1267 return Completer(tasks, message='check_initscripts').\
1268 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1270 def check_initscripts(self):
1271 "check that the initscripts have triggered"
1272 return self.do_check_initscripts()
1274 def initscripts(self):
1275 "create initscripts with PLCAPI"
1276 for initscript in self.plc_spec['initscripts']:
1277 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1278 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1281 def delete_initscripts(self):
1282 "delete initscripts with PLCAPI"
1283 for initscript in self.plc_spec['initscripts']:
1284 initscript_name = initscript['initscript_fields']['name']
1285 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1287 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1288 print(initscript_name, 'deleted')
1290 print('deletion went wrong - probably did not exist')
1295 "create slices with PLCAPI"
1296 return self.do_slices(action="add")
1298 def delete_slices(self):
1299 "delete slices with PLCAPI"
1300 return self.do_slices(action="delete")
1302 def fill_slices(self):
1303 "add nodes in slices with PLCAPI"
1304 return self.do_slices(action="fill")
1306 def empty_slices(self):
1307 "remove nodes from slices with PLCAPI"
1308 return self.do_slices(action="empty")
1310 def do_slices(self, action="add"):
1311 for slice in self.plc_spec['slices']:
1312 site_spec = self.locate_site(slice['sitename'])
1313 test_site = TestSite(self,site_spec)
1314 test_slice=TestSlice(self,test_site,slice)
1315 if action == "delete":
1316 test_slice.delete_slice()
1317 elif action == "fill":
1318 test_slice.add_nodes()
1319 elif action == "empty":
1320 test_slice.delete_nodes()
1322 test_slice.create_slice()
1325 @slice_mapper__tasks(20, 10, 15)
1326 def ssh_slice(self): pass
1327 @slice_mapper__tasks(20, 19, 15)
1328 def ssh_slice_off(self): pass
1329 @slice_mapper__tasks(1, 1, 15)
1330 def slice_fs_present(self): pass
1331 @slice_mapper__tasks(1, 1, 15)
1332 def slice_fs_deleted(self): pass
1334 # use another name so we can exclude/ignore it from the tests on the nightly command line
1335 def ssh_slice_again(self): return self.ssh_slice()
1336 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1337 # but for some reason the ignore-wrapping thing would not
1340 def ssh_slice_basics(self): pass
1342 def check_vsys_defaults(self): pass
1345 def keys_clear_known_hosts(self): pass
1347 def plcapi_urls(self):
1349 attempts to reach the PLCAPI with various forms for the URL
1351 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1353 def speed_up_slices(self):
1354 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1355 return self._speed_up_slices (30, 10)
1356 def super_speed_up_slices(self):
1357 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1358 return self._speed_up_slices(5, 1)
1360 def _speed_up_slices(self, p, r):
1361 # create the template on the server-side
1362 template = "{}.nodemanager".format(self.name())
1363 with open(template,"w") as template_file:
1364 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1365 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1366 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1367 self.test_ssh.copy_abs(template, remote)
1369 if not self.apiserver.GetConfFiles(self.auth_root(),
1370 {'dest' : '/etc/sysconfig/nodemanager'}):
1371 self.apiserver.AddConfFile(self.auth_root(),
1372 {'dest' : '/etc/sysconfig/nodemanager',
1373 'source' : 'PlanetLabConf/nodemanager',
1374 'postinstall_cmd' : 'service nm restart',})
1377 def debug_nodemanager(self):
1378 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1379 template = "{}.nodemanager".format(self.name())
1380 with open(template,"w") as template_file:
1381 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1382 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1383 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1384 self.test_ssh.copy_abs(template, remote)
1388 def qemu_start(self) : pass
1391 def qemu_timestamp(self) : pass
1394 def qemu_nodefamily(self): pass
1396 # when a spec refers to a node possibly on another plc
1397 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1398 for plc in [ self ] + other_plcs:
1400 return plc.locate_sliver_obj(nodename, slicename)
1403 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1405 # implement this one as a cross step so that we can take advantage of different nodes
1406 # in multi-plcs mode
1407 def cross_check_tcp(self, other_plcs):
1408 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1409 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1410 utils.header("check_tcp: no/empty config found")
1412 specs = self.plc_spec['tcp_specs']
1415 # first wait for the network to be up and ready from the slices
1416 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1417 def __init__(self, test_sliver):
1418 self.test_sliver = test_sliver
1419 def actual_run(self):
1420 return self.test_sliver.check_tcp_ready(port = 9999)
1422 return "network ready checker for {}".format(self.test_sliver.name())
1423 def failure_epilogue(self):
1424 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1428 managed_sliver_names = set()
1430 # locate the TestSliver instances involved, and cache them in the spec instance
1431 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1432 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1433 message = "Will check TCP between s={} and c={}"\
1434 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1435 if 'client_connect' in spec:
1436 message += " (using {})".format(spec['client_connect'])
1437 utils.header(message)
1438 # we need to check network presence in both slivers, but also
1439 # avoid to insert a sliver several times
1440 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1441 if sliver.name() not in managed_sliver_names:
1442 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1443 # add this sliver's name in the set
1444 managed_sliver_names .update( {sliver.name()} )
1446 # wait for the netork to be OK in all server sides
1447 if not Completer(tasks, message='check for network readiness in slivers').\
1448 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1451 # run server and client
1455 # the issue here is that we have the server run in background
1456 # and so we have no clue if it took off properly or not
1457 # looks like in some cases it does not
1458 address = spec['s_sliver'].test_node.name()
1459 if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1463 # idem for the client side
1464 # use nodename from located sliver, unless 'client_connect' is set
1465 if 'client_connect' in spec:
1466 destination = spec['client_connect']
1468 destination = spec['s_sliver'].test_node.name()
1469 if not spec['c_sliver'].run_tcp_client(destination, port):
1473 # painfully enough, we need to allow for some time as netflow might show up last
1474 def check_system_slice(self):
1475 "all nodes: check that a system slice is alive"
1476 # netflow currently not working in the lxc distro
1477 # drl not built at all in the wtx distro
1478 # if we find either of them we're happy
1479 return self.check_netflow() or self.check_drl()
1482 def check_netflow(self): return self._check_system_slice('netflow')
1483 def check_drl(self): return self._check_system_slice('drl')
1485 # we have the slices up already here, so it should not take too long
1486 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1487 class CompleterTaskSystemSlice(CompleterTask):
1488 def __init__(self, test_node, dry_run):
1489 self.test_node = test_node
1490 self.dry_run = dry_run
1491 def actual_run(self):
1492 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1494 return "System slice {} @ {}".format(slicename, self.test_node.name())
1495 def failure_epilogue(self):
1496 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1497 timeout = timedelta(minutes=timeout_minutes)
1498 silent = timedelta(0)
1499 period = timedelta(seconds=period_seconds)
1500 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1501 for test_node in self.all_nodes() ]
1502 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1504 def plcsh_stress_test(self):
1505 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1506 # install the stress-test in the plc image
1507 location = "/usr/share/plc_api/plcsh_stress_test.py"
1508 remote = "{}/{}".format(self.vm_root_in_host(), location)
1509 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1511 command += " -- --check"
1512 if self.options.size == 1:
1513 command += " --tiny"
1514 return self.run_in_guest(command) == 0
1516 # populate runs the same utility without slightly different options
1517 # in particular runs with --preserve (dont cleanup) and without --check
1518 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1520 def sfa_install_all(self):
1521 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1522 return (self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client") and
1523 self.run_in_guest("systemctl enable sfa-registry")==0 and
1524 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1526 def sfa_install_core(self):
1528 return self.dnf_install("sfa")
1530 def sfa_install_plc(self):
1531 "yum install sfa-plc"
1532 return self.dnf_install("sfa-plc")
1534 def sfa_install_sfatables(self):
1535 "yum install sfa-sfatables"
1536 return self.dnf_install("sfa-sfatables")
1538 # for some very odd reason, this sometimes fails with the following symptom
1539 # # yum install sfa-client
1540 # Setting up Install Process
1542 # Downloading Packages:
1543 # Running rpm_check_debug
1544 # Running Transaction Test
1545 # Transaction Test Succeeded
1546 # Running Transaction
1547 # Transaction couldn't start:
1548 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1549 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1550 # even though in the same context I have
1551 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1552 # Filesystem Size Used Avail Use% Mounted on
1553 # /dev/hdv1 806G 264G 501G 35% /
1554 # none 16M 36K 16M 1% /tmp
1556 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1557 def sfa_install_client(self):
1558 "yum install sfa-client"
1559 first_try = self.dnf_install("sfa-client")
1562 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1563 code, cached_rpm_path = \
1564 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1565 utils.header("rpm_path=<<{}>>".format(rpm_path))
1567 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1568 return self.dnf_check_installed("sfa-client")
1570 def sfa_dbclean(self):
1571 "thoroughly wipes off the SFA database"
1572 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1573 self.run_in_guest("sfa-nuke.py") == 0 or \
1574 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1575 self.run_in_guest("sfaadmin registry nuke") == 0
1577 def sfa_fsclean(self):
1578 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1579 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1582 def sfa_plcclean(self):
1583 "cleans the PLC entries that were created as a side effect of running the script"
1585 sfa_spec = self.plc_spec['sfa']
1587 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1588 login_base = auth_sfa_spec['login_base']
1590 self.apiserver.DeleteSite(self.auth_root(),login_base)
1592 print("Site {} already absent from PLC db".format(login_base))
1594 for spec_name in ['pi_spec', 'user_spec']:
1595 user_spec = auth_sfa_spec[spec_name]
1596 username = user_spec['email']
1598 self.apiserver.DeletePerson(self.auth_root(),username)
1600 # this in fact is expected as sites delete their members
1601 #print "User {} already absent from PLC db".format(username)
1604 print("REMEMBER TO RUN sfa_import AGAIN")
1607 def sfa_uninstall(self):
1608 "uses rpm to uninstall sfa - ignore result"
1609 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1610 self.run_in_guest("rm -rf /var/lib/sfa")
1611 self.run_in_guest("rm -rf /etc/sfa")
1612 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1614 self.run_in_guest("rpm -e --noscripts sfa-plc")
1617 ### run unit tests for SFA
1618 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1619 # Running Transaction
1620 # Transaction couldn't start:
1621 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1622 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1623 # no matter how many Gbs are available on the testplc
1624 # could not figure out what's wrong, so...
1625 # if the yum install phase fails, consider the test is successful
1626 # other combinations will eventually run it hopefully
1627 def sfa_utest(self):
1628 "dnf install sfa-tests and run SFA unittests"
1629 self.run_in_guest("dnf -y install sfa-tests")
1630 # failed to install - forget it
1631 if self.run_in_guest("rpm -q sfa-tests") != 0:
1632 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1634 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1638 dirname = "conf.{}".format(self.plc_spec['name'])
1639 if not os.path.isdir(dirname):
1640 utils.system("mkdir -p {}".format(dirname))
1641 if not os.path.isdir(dirname):
1642 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1645 def conffile(self, filename):
1646 return "{}/{}".format(self.confdir(), filename)
1647 def confsubdir(self, dirname, clean, dry_run=False):
1648 subdirname = "{}/{}".format(self.confdir(), dirname)
1650 utils.system("rm -rf {}".format(subdirname))
1651 if not os.path.isdir(subdirname):
1652 utils.system("mkdir -p {}".format(subdirname))
1653 if not dry_run and not os.path.isdir(subdirname):
1654 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1657 def conffile_clean(self, filename):
1658 filename=self.conffile(filename)
1659 return utils.system("rm -rf {}".format(filename))==0
1662 def sfa_configure(self):
1663 "run sfa-config-tty"
1664 tmpname = self.conffile("sfa-config-tty")
1665 with open(tmpname,'w') as fileconf:
1666 for var, value in self.plc_spec['sfa']['settings'].items():
1667 fileconf.write('e {}\n{}\n'.format(var, value))
1668 fileconf.write('w\n')
1669 fileconf.write('R\n')
1670 fileconf.write('q\n')
1671 utils.system('cat {}'.format(tmpname))
1672 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1675 def aggregate_xml_line(self):
1676 port = self.plc_spec['sfa']['neighbours-port']
1677 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1678 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1680 def registry_xml_line(self):
1681 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1682 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1685 # a cross step that takes all other plcs in argument
1686 def cross_sfa_configure(self, other_plcs):
1687 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1688 # of course with a single plc, other_plcs is an empty list
1691 agg_fname = self.conffile("agg.xml")
1692 with open(agg_fname,"w") as out:
1693 out.write("<aggregates>{}</aggregates>\n"\
1694 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1695 utils.header("(Over)wrote {}".format(agg_fname))
1696 reg_fname=self.conffile("reg.xml")
1697 with open(reg_fname,"w") as out:
1698 out.write("<registries>{}</registries>\n"\
1699 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1700 utils.header("(Over)wrote {}".format(reg_fname))
1701 return self.test_ssh.copy_abs(agg_fname,
1702 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1703 and self.test_ssh.copy_abs(reg_fname,
1704 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1706 def sfa_import(self):
1707 "use sfaadmin to import from plc"
1708 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1709 return self.run_in_guest('sfaadmin reg import_registry') == 0
1711 def sfa_start(self):
1712 "start SFA through systemctl"
1713 return (self.start_stop_systemd('sfa-registry', 'start') and
1714 self.start_stop_systemd('sfa-aggregate', 'start'))
1717 def sfi_configure(self):
1718 "Create /root/sfi on the plc side for sfi client configuration"
1719 if self.options.dry_run:
1720 utils.header("DRY RUN - skipping step")
1722 sfa_spec = self.plc_spec['sfa']
1723 # cannot use auth_sfa_mapper to pass dir_name
1724 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1725 test_slice = TestAuthSfa(self, slice_spec)
1726 dir_basename = os.path.basename(test_slice.sfi_path())
1727 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1728 clean=True, dry_run=self.options.dry_run)
1729 test_slice.sfi_configure(dir_name)
1730 # push into the remote /root/sfi area
1731 location = test_slice.sfi_path()
1732 remote = "{}/{}".format(self.vm_root_in_host(), location)
1733 self.test_ssh.mkdir(remote, abs=True)
1734 # need to strip last level or remote otherwise we get an extra dir level
1735 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1739 def sfi_clean(self):
1740 "clean up /root/sfi on the plc side"
1741 self.run_in_guest("rm -rf /root/sfi")
1744 def sfa_rspec_empty(self):
1745 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1746 filename = "empty-rspec.xml"
1748 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1749 test_slice = TestAuthSfa(self, slice_spec)
1750 in_vm = test_slice.sfi_path()
1751 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1752 if self.test_ssh.copy_abs(filename, remote) !=0:
1757 def sfa_register_site(self): pass
1759 def sfa_register_pi(self): pass
1761 def sfa_register_user(self): pass
1763 def sfa_update_user(self): pass
1765 def sfa_register_slice(self): pass
1767 def sfa_renew_slice(self): pass
1769 def sfa_get_expires(self): pass
1771 def sfa_discover(self): pass
1773 def sfa_rspec(self): pass
1775 def sfa_allocate(self): pass
1777 def sfa_allocate_empty(self): pass
1779 def sfa_provision(self): pass
1781 def sfa_provision_empty(self): pass
1783 def sfa_describe(self): pass
1785 def sfa_check_slice_plc(self): pass
1787 def sfa_check_slice_plc_empty(self): pass
1789 def sfa_update_slice(self): pass
1791 def sfa_remove_user_from_slice(self): pass
1793 def sfa_insert_user_in_slice(self): pass
1795 def sfi_list(self): pass
1797 def sfi_show_site(self): pass
1799 def sfi_show_slice(self): pass
1801 def sfi_show_slice_researchers(self): pass
1803 def ssh_slice_sfa(self): pass
1805 def sfa_delete_user(self): pass
1807 def sfa_delete_slice(self): pass
1810 "stop sfa through systemclt"
1811 return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1812 self.start_stop_systemd('sfa-registry', 'stop'))
1815 "creates random entries in the PLCAPI"
1816 # install the stress-test in the plc image
1817 location = "/usr/share/plc_api/plcsh_stress_test.py"
1818 remote = "{}/{}".format(self.vm_root_in_host(), location)
1819 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1821 command += " -- --preserve --short-names"
1822 local = (self.run_in_guest(command) == 0);
1823 # second run with --foreign
1824 command += ' --foreign'
1825 remote = (self.run_in_guest(command) == 0);
1826 return local and remote
1829 ####################
1831 def bonding_init_partial(self): pass
1834 def bonding_add_yum(self): pass
1837 def bonding_install_rpms(self): pass
1839 ####################
1841 def gather_logs(self):
1842 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1843 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1844 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1845 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1846 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1847 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1848 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1850 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1851 self.gather_var_logs()
1853 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1854 self.gather_pgsql_logs()
1856 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1857 self.gather_root_sfi()
1859 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1860 for site_spec in self.plc_spec['sites']:
1861 test_site = TestSite(self,site_spec)
1862 for node_spec in site_spec['nodes']:
1863 test_node = TestNode(self, test_site, node_spec)
1864 test_node.gather_qemu_logs()
1866 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1867 self.gather_nodes_var_logs()
1869 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1870 self.gather_slivers_var_logs()
1873 def gather_slivers_var_logs(self):
1874 for test_sliver in self.all_sliver_objs():
1875 remote = test_sliver.tar_var_logs()
1876 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1877 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1878 utils.system(command)
1881 def gather_var_logs(self):
1882 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1883 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1884 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1885 utils.system(command)
1886 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1887 utils.system(command)
1889 def gather_pgsql_logs(self):
1890 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1891 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1892 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1893 utils.system(command)
1895 def gather_root_sfi(self):
1896 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1897 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1898 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1899 utils.system(command)
1901 def gather_nodes_var_logs(self):
1902 for site_spec in self.plc_spec['sites']:
1903 test_site = TestSite(self, site_spec)
1904 for node_spec in site_spec['nodes']:
1905 test_node = TestNode(self, test_site, node_spec)
1906 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1907 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1908 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1909 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1910 utils.system(command)
1913 # returns the filename to use for sql dump/restore, using options.dbname if set
1914 def dbfile(self, database):
1915 # uses options.dbname if it is found
1917 name = self.options.dbname
1918 if not isinstance(name, str):
1924 return "/root/{}-{}.sql".format(database, name)
1926 def plc_db_dump(self):
1927 'dump the planetlab5 DB in /root in the PLC - filename has time'
1928 dump=self.dbfile("planetab5")
1929 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1930 utils.header('Dumped planetlab5 database in {}'.format(dump))
1933 def plc_db_restore(self):
1934 'restore the planetlab5 DB - looks broken, but run -n might help'
1935 dump = self.dbfile("planetab5")
1936 self.run_in_guest('systemctl stop httpd')
1937 # xxx - need another wrapper
1938 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1939 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1940 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1941 ##starting httpd service
1942 self.run_in_guest('systemctl start httpd')
1944 utils.header('Database restored from ' + dump)
1947 def create_ignore_steps():
1948 for step in TestPlc.default_steps + TestPlc.other_steps:
1949 # default step can have a plc qualifier
1951 step, qualifier = step.split('@')
1952 # or be defined as forced or ignored by default
1953 for keyword in ['_ignore', '_force']:
1954 if step.endswith(keyword):
1955 step=step.replace(keyword,'')
1956 if step == SEP or step == SEPSFA :
1958 method = getattr(TestPlc,step)
1959 name = step + '_ignore'
1960 wrapped = ignore_result(method)
1961 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1962 setattr(TestPlc, name, wrapped)
1965 # def ssh_slice_again_ignore (self): pass
1967 # def check_initscripts_ignore (self): pass
1969 def standby_1_through_20(self):
1970 """convenience function to wait for a specified number of minutes"""
1973 def standby_1(): pass
1975 def standby_2(): pass
1977 def standby_3(): pass
1979 def standby_4(): pass
1981 def standby_5(): pass
1983 def standby_6(): pass
1985 def standby_7(): pass
1987 def standby_8(): pass
1989 def standby_9(): pass
1991 def standby_10(): pass
1993 def standby_11(): pass
1995 def standby_12(): pass
1997 def standby_13(): pass
1999 def standby_14(): pass
2001 def standby_15(): pass
2003 def standby_16(): pass
2005 def standby_17(): pass
2007 def standby_18(): pass
2009 def standby_19(): pass
2011 def standby_20(): pass
2013 # convenience for debugging the test logic
2014 def yes(self): return True
2015 def no(self): return False
2016 def fail(self): return False