1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
156 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls', 'speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this out of the way for now
162 'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
165 'bootcd', 'qemu_local_config', SEP,
166 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
167 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
168 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
169 'sfa_start', 'sfa_import', SEPSFA,
170 'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
171 'sfa_register_user@1', 'sfa_update_user@1',
172 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
173 'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
174 'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
175 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
176 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
177 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
178 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
179 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
180 # but as the stress test might take a while, we sometimes missed the debug mode..
181 'probe_kvm_iptables',
182 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
183 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
184 'ssh_slice_sfa@1', SEPSFA,
185 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
186 'sfa_check_slice_plc_empty@1', SEPSFA,
187 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
188 'check_system_slice', SEP,
189 # for inspecting the slice while it runs the first time
191 # check slices are turned off properly
193 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
194 # check they are properly re-created with the same name
195 'fill_slices', 'ssh_slice_again', SEP,
196 'gather_logs_force', SEP,
199 'export', 'show_boxes', 'super_speed_up_slices', SEP,
200 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
201 'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
202 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
203 'delete_leases', 'list_leases', SEP,
205 'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
206 'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
207 'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
208 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
209 'sfa_install_core', 'sfa_install_sfatables',
210 'sfa_install_plc', 'sfa_install_client', SEPSFA,
211 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
212 'sfa_get_expires', SEPSFA,
213 'plc_db_dump', 'plc_db_restore', SEP,
214 'check_netflow', 'check_drl', SEP,
215 # used to be part of default steps but won't work since f27
217 'slice_fs_present', 'check_initscripts', SEP,
218 'standby_1_through_20', 'yes', 'no', SEP,
219 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
221 default_bonding_steps = [
222 'bonding_init_partial',
224 'bonding_install_rpms', SEP,
228 def printable_steps(list):
229 single_line = " ".join(list) + " "
230 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
232 def valid_step(step):
233 return step != SEP and step != SEPSFA
235 # turn off the sfa-related steps when build has skipped SFA
236 # this was originally for centos5 but is still valid
237 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
239 def _has_sfa_cached(rpms_url):
240 if os.path.isfile(has_sfa_cache_filename):
241 with open(has_sfa_cache_filename) as cache:
242 cached = cache.read() == "yes"
243 utils.header("build provides SFA (cached):{}".format(cached))
245 # warning, we're now building 'sface' so let's be a bit more picky
246 # full builds are expected to return with 0 here
247 utils.header("Checking if build provides SFA package...")
248 retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
249 encoded = 'yes' if retcod else 'no'
250 with open(has_sfa_cache_filename,'w') as cache:
255 def check_whether_build_has_sfa(rpms_url):
256 has_sfa = TestPlc._has_sfa_cached(rpms_url)
258 utils.header("build does provide SFA")
260 # move all steps containing 'sfa' from default_steps to other_steps
261 utils.header("SFA package not found - removing steps with sfa or sfi")
262 sfa_steps = [ step for step in TestPlc.default_steps
263 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
264 TestPlc.other_steps += sfa_steps
265 for step in sfa_steps:
266 TestPlc.default_steps.remove(step)
268 def __init__(self, plc_spec, options):
269 self.plc_spec = plc_spec
270 self.options = options
271 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
272 self.vserverip = plc_spec['vserverip']
273 self.vservername = plc_spec['vservername']
274 self.vplchostname = self.vservername.split('-')[-1]
275 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
276 self.apiserver = TestApiserver(self.url, options.dry_run)
277 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
278 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
280 def has_addresses_api(self):
281 return self.apiserver.has_method('AddIpAddress')
284 name = self.plc_spec['name']
285 return "{}.{}".format(name,self.vservername)
288 return self.plc_spec['host_box']
291 return self.test_ssh.is_local()
293 # define the API methods on this object through xmlrpc
294 # would help, but not strictly necessary
298 def actual_command_in_guest(self,command, backslash=False):
299 raw1 = self.host_to_guest(command)
300 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
303 def start_guest(self):
304 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
305 dry_run=self.options.dry_run))
307 def stop_guest(self):
308 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
309 dry_run=self.options.dry_run))
311 def run_in_guest(self, command, backslash=False):
312 raw = self.actual_command_in_guest(command, backslash)
313 return utils.system(raw)
315 def run_in_host(self,command):
316 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
318 # backslashing turned out so awful at some point that I've turned off auto-backslashing
319 # see e.g. plc_start esp. the version for f14
320 #command gets run in the plc's vm
321 def host_to_guest(self, command):
322 ssh_leg = TestSsh(self.vplchostname)
323 return ssh_leg.actual_command(command, keep_stdin=True)
325 # this /vservers thing is legacy...
326 def vm_root_in_host(self):
327 return "/vservers/{}/".format(self.vservername)
329 def vm_timestamp_path(self):
330 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
332 #start/stop the vserver
333 def start_guest_in_host(self):
334 return "virsh -c lxc:/// start {}".format(self.vservername)
336 def stop_guest_in_host(self):
337 return "virsh -c lxc:/// destroy {}".format(self.vservername)
340 def run_in_guest_piped(self,local,remote):
341 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
344 def dnf_check_installed(self, rpms):
345 if isinstance(rpms, list):
347 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
349 # does a yum install in the vs, ignore yum retcod, check with rpm
350 def dnf_install(self, rpms):
351 if isinstance(rpms, list):
353 yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
355 self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
356 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
357 # nothing similar with dnf, forget about this for now
358 # self.run_in_guest("yum-complete-transaction -y")
359 return self.dnf_check_installed(rpms)
361 def pip_install(self, package):
362 return self.run_in_guest("pip3 install {}".format(package)) == 0
365 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
366 'AuthMethod' : 'password',
367 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
368 'Role' : self.plc_spec['role'],
371 def locate_site(self,sitename):
372 for site in self.plc_spec['sites']:
373 if site['site_fields']['name'] == sitename:
375 if site['site_fields']['login_base'] == sitename:
377 raise Exception("Cannot locate site {}".format(sitename))
379 def locate_node(self, nodename):
380 for site in self.plc_spec['sites']:
381 for node in site['nodes']:
382 if node['name'] == nodename:
384 raise Exception("Cannot locate node {}".format(nodename))
386 def locate_hostname(self, hostname):
387 for site in self.plc_spec['sites']:
388 for node in site['nodes']:
389 if node['node_fields']['hostname'] == hostname:
391 raise Exception("Cannot locate hostname {}".format(hostname))
393 def locate_key(self, key_name):
394 for key in self.plc_spec['keys']:
395 if key['key_name'] == key_name:
397 raise Exception("Cannot locate key {}".format(key_name))
399 def locate_private_key_from_key_names(self, key_names):
400 # locate the first avail. key
402 for key_name in key_names:
403 key_spec = self.locate_key(key_name)
404 test_key = TestKey(self,key_spec)
405 publickey = test_key.publicpath()
406 privatekey = test_key.privatepath()
407 if os.path.isfile(publickey) and os.path.isfile(privatekey):
414 def locate_slice(self, slicename):
415 for slice in self.plc_spec['slices']:
416 if slice['slice_fields']['name'] == slicename:
418 raise Exception("Cannot locate slice {}".format(slicename))
420 def all_sliver_objs(self):
422 for slice_spec in self.plc_spec['slices']:
423 slicename = slice_spec['slice_fields']['name']
424 for nodename in slice_spec['nodenames']:
425 result.append(self.locate_sliver_obj(nodename, slicename))
428 def locate_sliver_obj(self, nodename, slicename):
429 site,node = self.locate_node(nodename)
430 slice = self.locate_slice(slicename)
432 test_site = TestSite(self, site)
433 test_node = TestNode(self, test_site, node)
434 # xxx the slice site is assumed to be the node site - mhh - probably harmless
435 test_slice = TestSlice(self, test_site, slice)
436 return TestSliver(self, test_node, test_slice)
438 def locate_first_node(self):
439 nodename = self.plc_spec['slices'][0]['nodenames'][0]
440 site,node = self.locate_node(nodename)
441 test_site = TestSite(self, site)
442 test_node = TestNode(self, test_site, node)
445 def locate_first_sliver(self):
446 slice_spec = self.plc_spec['slices'][0]
447 slicename = slice_spec['slice_fields']['name']
448 nodename = slice_spec['nodenames'][0]
449 return self.locate_sliver_obj(nodename,slicename)
451 # all different hostboxes used in this plc
452 def get_BoxNodes(self):
453 # maps on sites and nodes, return [ (host_box,test_node) ]
455 for site_spec in self.plc_spec['sites']:
456 test_site = TestSite(self,site_spec)
457 for node_spec in site_spec['nodes']:
458 test_node = TestNode(self, test_site, node_spec)
459 if not test_node.is_real():
460 tuples.append( (test_node.host_box(),test_node) )
461 # transform into a dict { 'host_box' -> [ test_node .. ] }
463 for (box,node) in tuples:
464 if box not in result:
467 result[box].append(node)
470 # a step for checking this stuff
471 def show_boxes(self):
472 'print summary of nodes location'
473 for box,nodes in self.get_BoxNodes().items():
474 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
477 # make this a valid step
478 def qemu_kill_all(self):
479 'kill all qemu instances on the qemu boxes involved by this setup'
480 # this is the brute force version, kill all qemus on that host box
481 for (box,nodes) in self.get_BoxNodes().items():
482 # pass the first nodename, as we don't push template-qemu on testboxes
483 nodedir = nodes[0].nodedir()
484 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
487 # make this a valid step
488 def qemu_list_all(self):
489 'list all qemu instances on the qemu boxes involved by this setup'
490 for box,nodes in self.get_BoxNodes().items():
491 # this is the brute force version, kill all qemus on that host box
492 TestBoxQemu(box, self.options.buildname).qemu_list_all()
495 # kill only the qemus related to this test
496 def qemu_list_mine(self):
497 'list qemu instances for our nodes'
498 for (box,nodes) in self.get_BoxNodes().items():
499 # the fine-grain version
504 # kill only the qemus related to this test
505 def qemu_clean_mine(self):
506 'cleanup (rm -rf) qemu instances for our nodes'
507 for box,nodes in self.get_BoxNodes().items():
508 # the fine-grain version
513 # kill only the right qemus
514 def qemu_kill_mine(self):
515 'kill the qemu instances for our nodes'
516 for box,nodes in self.get_BoxNodes().items():
517 # the fine-grain version
522 #################### display config
524 "show test configuration after localization"
529 # uggly hack to make sure 'run export' only reports about the 1st plc
530 # to avoid confusion - also we use 'inri_slice1' in various aliases..
533 "print cut'n paste-able stuff to export env variables to your shell"
534 # guess local domain from hostname
535 if TestPlc.exported_id > 1:
536 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
538 TestPlc.exported_id += 1
539 domain = socket.gethostname().split('.',1)[1]
540 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
541 print("export BUILD={}".format(self.options.buildname))
542 print("export PLCHOSTLXC={}".format(fqdn))
543 print("export GUESTNAME={}".format(self.vservername))
544 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
545 # find hostname of first node
546 hostname, qemubox = self.all_node_infos()[0]
547 print("export KVMHOST={}.{}".format(qemubox, domain))
548 print("export NODE={}".format(hostname))
552 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
553 def show_pass(self, passno):
554 for (key,val) in self.plc_spec.items():
555 if not self.options.verbose and key not in TestPlc.always_display_keys:
560 self.display_site_spec(site)
561 for node in site['nodes']:
562 self.display_node_spec(node)
563 elif key == 'initscripts':
564 for initscript in val:
565 self.display_initscript_spec(initscript)
566 elif key == 'slices':
568 self.display_slice_spec(slice)
571 self.display_key_spec(key)
573 if key not in ['sites', 'initscripts', 'slices', 'keys']:
574 print('+ ', key, ':', val)
576 def display_site_spec(self, site):
577 print('+ ======== site', site['site_fields']['name'])
578 for k,v in site.items():
579 if not self.options.verbose and k not in TestPlc.always_display_keys:
583 print('+ ', 'nodes : ', end=' ')
585 print(node['node_fields']['hostname'],'', end=' ')
589 print('+ users : ', end=' ')
591 print(user['name'],'', end=' ')
593 elif k == 'site_fields':
594 print('+ login_base', ':', v['login_base'])
595 elif k == 'address_fields':
601 def display_initscript_spec(self, initscript):
602 print('+ ======== initscript', initscript['initscript_fields']['name'])
604 def display_key_spec(self, key):
605 print('+ ======== key', key['key_name'])
607 def display_slice_spec(self, slice):
608 print('+ ======== slice', slice['slice_fields']['name'])
609 for k,v in slice.items():
612 print('+ nodes : ', end=' ')
614 print(nodename,'', end=' ')
616 elif k == 'usernames':
618 print('+ users : ', end=' ')
620 print(username,'', end=' ')
622 elif k == 'slice_fields':
623 print('+ fields', ':', end=' ')
624 print('max_nodes=',v['max_nodes'], end=' ')
629 def display_node_spec(self, node):
630 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
631 print("hostname=", node['node_fields']['hostname'], end=' ')
632 print("ip=", node['interface_fields']['ip'])
633 if self.options.verbose:
634 utils.pprint("node details", node, depth=3)
636 # another entry point for just showing the boxes involved
637 def display_mapping(self):
638 TestPlc.display_mapping_plc(self.plc_spec)
642 def display_mapping_plc(plc_spec):
643 print('+ MyPLC',plc_spec['name'])
644 # WARNING this would not be right for lxc-based PLC's - should be harmless though
645 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
646 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
647 for site_spec in plc_spec['sites']:
648 for node_spec in site_spec['nodes']:
649 TestPlc.display_mapping_node(node_spec)
652 def display_mapping_node(node_spec):
653 print('+ NODE {}'.format(node_spec['name']))
654 print('+\tqemu box {}'.format(node_spec['host_box']))
655 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
657 # write a timestamp in /vservers/<>.timestamp
658 # cannot be inside the vserver, that causes vserver .. build to cough
659 def plcvm_timestamp(self):
660 "Create a timestamp to remember creation date for this plc"
661 now = int(time.time())
662 # TODO-lxc check this one
663 # a first approx. is to store the timestamp close to the VM root like vs does
664 stamp_path = self.vm_timestamp_path()
665 stamp_dir = os.path.dirname(stamp_path)
666 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
667 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
669 # this is called inconditionnally at the beginning of the test sequence
670 # just in case this is a rerun, so if the vm is not running it's fine
671 def plcvm_delete(self):
672 "vserver delete the test myplc"
673 stamp_path = self.vm_timestamp_path()
674 self.run_in_host("rm -f {}".format(stamp_path))
675 self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
676 self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
677 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
681 # historically the build was being fetched by the tests
682 # now the build pushes itself as a subdir of the tests workdir
683 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
684 def plcvm_create(self):
685 "vserver creation (no install done)"
686 # push the local build/ dir to the testplc box
688 # a full path for the local calls
689 build_dir = os.path.dirname(sys.argv[0])
690 # sometimes this is empty - set to "." in such a case
693 build_dir += "/build"
695 # use a standard name - will be relative to remote buildname
697 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
698 self.test_ssh.rmdir(build_dir)
699 self.test_ssh.copy(build_dir, recursive=True)
700 # the repo url is taken from arch-rpms-url
701 # with the last step (i386) removed
702 repo_url = self.options.arch_rpms_url
703 for level in [ 'arch' ]:
704 repo_url = os.path.dirname(repo_url)
706 # invoke initvm (drop support for vs)
707 script = "lbuild-initvm.sh"
709 # pass the vbuild-nightly options to [lv]test-initvm
710 script_options += " -p {}".format(self.options.personality)
711 script_options += " -d {}".format(self.options.pldistro)
712 script_options += " -f {}".format(self.options.fcdistro)
713 script_options += " -r {}".format(repo_url)
714 vserver_name = self.vservername
716 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
717 script_options += " -n {}".format(vserver_hostname)
719 print("Cannot reverse lookup {}".format(self.vserverip))
720 print("This is considered fatal, as this might pollute the test results")
722 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
723 return self.run_in_host(create_vserver) == 0
725 ### install django through pip
726 def django_install(self):
727 # plcapi requires Django, that is no longer provided py fedora as an rpm
728 # so we use pip instead
732 return self.pip_install('Django')
735 def plc_install(self):
737 yum install myplc, noderepo
741 if self.options.personality == "linux32":
743 elif self.options.personality == "linux64":
746 raise Exception("Unsupported personality {}".format(self.options.personality))
747 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
749 # check it's possible to install just 'myplc-core' first
750 if not self.dnf_install("myplc-core"):
754 pkgs_list.append("myplc")
755 pkgs_list.append("slicerepo-{}".format(nodefamily))
756 pkgs_list.append("noderepo-{}".format(nodefamily))
757 pkgs_string=" ".join(pkgs_list)
758 return self.dnf_install(pkgs_list)
760 def install_syslinux6(self):
762 install syslinux6 from the fedora21 release
764 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
767 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
768 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
769 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
771 # this can be done several times
772 self.run_in_guest("rpm --import {key}".format(**locals()))
773 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
775 def bonding_builds(self):
777 list /etc/yum.repos.d on the myplc side
779 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
782 def bonding_nodes(self):
784 List nodes known to the myplc together with their nodefamiliy
786 print("---------------------------------------- nodes")
787 for node in self.apiserver.GetNodes(self.auth_root()):
788 print("{} -> {}".format(node['hostname'],
789 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
790 print("---------------------------------------- nodes")
794 def mod_python(self):
795 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
796 return self.dnf_install( ['mod_python'] )
799 def plc_configure(self):
801 tmpname = '{}.plc-config-tty'.format(self.name())
802 with open(tmpname,'w') as fileconf:
803 for var, value in self.plc_spec['settings'].items():
804 fileconf.write('e {}\n{}\n'.format(var, value))
805 fileconf.write('w\n')
806 fileconf.write('q\n')
807 utils.system('cat {}'.format(tmpname))
808 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
809 utils.system('rm {}'.format(tmpname))
812 # care only about f>=27
813 def start_stop_systemd(self, service, start_or_stop):
814 "utility to start/stop a systemd-defined service (sfa)"
815 return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
818 "start plc through systemclt"
819 return self.start_stop_systemd('plc', 'start')
822 "stop plc through systemctl"
823 return self.start_stop_systemd('plc', 'stop')
825 def plcvm_start(self):
826 "start the PLC vserver"
830 def plcvm_stop(self):
831 "stop the PLC vserver"
835 # stores the keys from the config for further use
836 def keys_store(self):
837 "stores test users ssh keys in keys/"
838 for key_spec in self.plc_spec['keys']:
839 TestKey(self,key_spec).store_key()
842 def keys_clean(self):
843 "removes keys cached in keys/"
844 utils.system("rm -rf ./keys")
847 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
848 # for later direct access to the nodes
849 def keys_fetch(self):
850 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
852 if not os.path.isdir(dir):
854 vservername = self.vservername
855 vm_root = self.vm_root_in_host()
857 prefix = 'debug_ssh_key'
858 for ext in ['pub', 'rsa'] :
859 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
860 dst = "keys/{vservername}-debug.{ext}".format(**locals())
861 if self.test_ssh.fetch(src, dst) != 0:
866 "create sites with PLCAPI"
867 return self.do_sites()
869 def delete_sites(self):
870 "delete sites with PLCAPI"
871 return self.do_sites(action="delete")
873 def do_sites(self, action="add"):
874 for site_spec in self.plc_spec['sites']:
875 test_site = TestSite(self,site_spec)
876 if (action != "add"):
877 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
878 test_site.delete_site()
879 # deleted with the site
880 #test_site.delete_users()
883 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
884 test_site.create_site()
885 test_site.create_users()
888 def delete_all_sites(self):
889 "Delete all sites in PLC, and related objects"
890 print('auth_root', self.auth_root())
891 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
893 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
894 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
896 site_id = site['site_id']
897 print('Deleting site_id', site_id)
898 self.apiserver.DeleteSite(self.auth_root(), site_id)
902 "create nodes with PLCAPI"
903 return self.do_nodes()
904 def delete_nodes(self):
905 "delete nodes with PLCAPI"
906 return self.do_nodes(action="delete")
908 def do_nodes(self, action="add"):
909 for site_spec in self.plc_spec['sites']:
910 test_site = TestSite(self, site_spec)
912 utils.header("Deleting nodes in site {}".format(test_site.name()))
913 for node_spec in site_spec['nodes']:
914 test_node = TestNode(self, test_site, node_spec)
915 utils.header("Deleting {}".format(test_node.name()))
916 test_node.delete_node()
918 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
919 for node_spec in site_spec['nodes']:
920 utils.pprint('Creating node {}'.format(node_spec), node_spec)
921 test_node = TestNode(self, test_site, node_spec)
922 test_node.create_node()
925 def nodegroups(self):
926 "create nodegroups with PLCAPI"
927 return self.do_nodegroups("add")
928 def delete_nodegroups(self):
929 "delete nodegroups with PLCAPI"
930 return self.do_nodegroups("delete")
934 def translate_timestamp(start, grain, timestamp):
935 if timestamp < TestPlc.YEAR:
936 return start + timestamp*grain
941 def timestamp_printable(timestamp):
942 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
945 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
946 now = int(time.time())
947 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
948 print('API answered grain=', grain)
949 start = (now//grain)*grain
951 # find out all nodes that are reservable
952 nodes = self.all_reservable_nodenames()
954 utils.header("No reservable node found - proceeding without leases")
957 # attach them to the leases as specified in plc_specs
958 # this is where the 'leases' field gets interpreted as relative of absolute
959 for lease_spec in self.plc_spec['leases']:
960 # skip the ones that come with a null slice id
961 if not lease_spec['slice']:
963 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
964 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
965 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
966 lease_spec['t_from'], lease_spec['t_until'])
967 if lease_addition['errors']:
968 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
971 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
972 .format(nodes, lease_spec['slice'],
973 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
974 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
978 def delete_leases(self):
979 "remove all leases in the myplc side"
980 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
981 utils.header("Cleaning leases {}".format(lease_ids))
982 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
985 def list_leases(self):
986 "list all leases known to the myplc"
987 leases = self.apiserver.GetLeases(self.auth_root())
988 now = int(time.time())
990 current = l['t_until'] >= now
991 if self.options.verbose or current:
992 utils.header("{} {} from {} until {}"\
993 .format(l['hostname'], l['name'],
994 TestPlc.timestamp_printable(l['t_from']),
995 TestPlc.timestamp_printable(l['t_until'])))
998 # create nodegroups if needed, and populate
999 def do_nodegroups(self, action="add"):
1000 # 1st pass to scan contents
1002 for site_spec in self.plc_spec['sites']:
1003 test_site = TestSite(self,site_spec)
1004 for node_spec in site_spec['nodes']:
1005 test_node = TestNode(self, test_site, node_spec)
1006 if 'nodegroups' in node_spec:
1007 nodegroupnames = node_spec['nodegroups']
1008 if isinstance(nodegroupnames, str):
1009 nodegroupnames = [ nodegroupnames ]
1010 for nodegroupname in nodegroupnames:
1011 if nodegroupname not in groups_dict:
1012 groups_dict[nodegroupname] = []
1013 groups_dict[nodegroupname].append(test_node.name())
1014 auth = self.auth_root()
1016 for (nodegroupname,group_nodes) in groups_dict.items():
1018 print('nodegroups:', 'dealing with nodegroup',\
1019 nodegroupname, 'on nodes', group_nodes)
1020 # first, check if the nodetagtype is here
1021 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1023 tag_type_id = tag_types[0]['tag_type_id']
1025 tag_type_id = self.apiserver.AddTagType(auth,
1026 {'tagname' : nodegroupname,
1027 'description' : 'for nodegroup {}'.format(nodegroupname),
1028 'category' : 'test'})
1029 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1031 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1033 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1034 print('created nodegroup', nodegroupname, \
1035 'from tagname', nodegroupname, 'and value', 'yes')
1036 # set node tag on all nodes, value='yes'
1037 for nodename in group_nodes:
1039 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1041 traceback.print_exc()
1042 print('node', nodename, 'seems to already have tag', nodegroupname)
1045 expect_yes = self.apiserver.GetNodeTags(auth,
1046 {'hostname' : nodename,
1047 'tagname' : nodegroupname},
1048 ['value'])[0]['value']
1049 if expect_yes != "yes":
1050 print('Mismatch node tag on node',nodename,'got',expect_yes)
1053 if not self.options.dry_run:
1054 print('Cannot find tag', nodegroupname, 'on node', nodename)
1058 print('cleaning nodegroup', nodegroupname)
1059 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1061 traceback.print_exc()
1065 # a list of TestNode objs
1066 def all_nodes(self):
1068 for site_spec in self.plc_spec['sites']:
1069 test_site = TestSite(self,site_spec)
1070 for node_spec in site_spec['nodes']:
1071 nodes.append(TestNode(self, test_site, node_spec))
1074 # return a list of tuples (nodename,qemuname)
1075 def all_node_infos(self) :
1077 for site_spec in self.plc_spec['sites']:
1078 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1079 for node_spec in site_spec['nodes'] ]
1082 def all_nodenames(self):
1083 return [ x[0] for x in self.all_node_infos() ]
1084 def all_reservable_nodenames(self):
1086 for site_spec in self.plc_spec['sites']:
1087 for node_spec in site_spec['nodes']:
1088 node_fields = node_spec['node_fields']
1089 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1090 res.append(node_fields['hostname'])
1093 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1094 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1095 silent_minutes, period_seconds = 15):
1096 if self.options.dry_run:
1100 class CompleterTaskBootState(CompleterTask):
1101 def __init__(self, test_plc, hostname):
1102 self.test_plc = test_plc
1103 self.hostname = hostname
1104 self.last_boot_state = 'undef'
1105 def actual_run(self):
1107 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1110 self.last_boot_state = node['boot_state']
1111 return self.last_boot_state == target_boot_state
1115 return "CompleterTaskBootState with node {}".format(self.hostname)
1116 def failure_epilogue(self):
1117 print("node {} in state {} - expected {}"\
1118 .format(self.hostname, self.last_boot_state, target_boot_state))
1120 timeout = timedelta(minutes=timeout_minutes)
1121 graceout = timedelta(minutes=silent_minutes)
1122 period = timedelta(seconds=period_seconds)
1123 # the nodes that haven't checked yet - start with a full list and shrink over time
1124 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1125 tasks = [ CompleterTaskBootState(self,hostname) \
1126 for (hostname,_) in self.all_node_infos() ]
1127 message = 'check_boot_state={}'.format(target_boot_state)
1128 return Completer(tasks, message=message).run(timeout, graceout, period)
1130 def nodes_booted(self):
1131 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1133 def probe_kvm_iptables(self):
1134 (_,kvmbox) = self.all_node_infos()[0]
1135 TestSsh(kvmbox).run("iptables-save")
1139 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1140 class CompleterTaskPingNode(CompleterTask):
1141 def __init__(self, hostname):
1142 self.hostname = hostname
1143 def run(self, silent):
1144 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1145 return utils.system(command, silent=silent) == 0
1146 def failure_epilogue(self):
1147 print("Cannot ping node with name {}".format(self.hostname))
1148 timeout = timedelta(seconds = timeout_seconds)
1150 period = timedelta(seconds = period_seconds)
1151 node_infos = self.all_node_infos()
1152 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1153 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1155 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1156 def ping_node(self):
1158 return self.check_nodes_ping()
1160 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1162 timeout = timedelta(minutes=timeout_minutes)
1163 graceout = timedelta(minutes=silent_minutes)
1164 period = timedelta(seconds=period_seconds)
1165 vservername = self.vservername
1168 completer_message = 'ssh_node_debug'
1169 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1172 completer_message = 'ssh_node_boot'
1173 local_key = "keys/key_admin.rsa"
1174 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1175 node_infos = self.all_node_infos()
1176 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1177 boot_state=message, dry_run=self.options.dry_run) \
1178 for (nodename, qemuname) in node_infos ]
1179 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1181 def ssh_node_debug(self):
1182 "Tries to ssh into nodes in debug mode with the debug ssh key"
1183 return self.check_nodes_ssh(debug = True,
1184 timeout_minutes = self.ssh_node_debug_timeout,
1185 silent_minutes = self.ssh_node_debug_silent)
1187 def ssh_node_boot(self):
1188 "Tries to ssh into nodes in production mode with the root ssh key"
1189 return self.check_nodes_ssh(debug = False,
1190 timeout_minutes = self.ssh_node_boot_timeout,
1191 silent_minutes = self.ssh_node_boot_silent)
1193 def node_bmlogs(self):
1194 "Checks that there's a non-empty dir. /var/log/bm/raw"
1195 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1198 def qemu_local_init(self): pass
1200 def bootcd(self): pass
1202 def qemu_local_config(self): pass
1204 def qemu_export(self): pass
1206 def qemu_cleanlog(self): pass
1208 def nodestate_reinstall(self): pass
1210 def nodestate_upgrade(self): pass
1212 def nodestate_safeboot(self): pass
1214 def nodestate_boot(self): pass
1216 def nodestate_show(self): pass
1218 def nodedistro_f14(self): pass
1220 def nodedistro_f18(self): pass
1222 def nodedistro_f20(self): pass
1224 def nodedistro_f21(self): pass
1226 def nodedistro_f22(self): pass
1228 def nodedistro_show(self): pass
1230 ### check hooks : invoke scripts from hooks/{node,slice}
1231 def check_hooks_node(self):
1232 return self.locate_first_node().check_hooks()
1233 def check_hooks_sliver(self) :
1234 return self.locate_first_sliver().check_hooks()
1236 def check_hooks(self):
1237 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1238 return self.check_hooks_node() and self.check_hooks_sliver()
1241 def do_check_initscripts(self):
1242 class CompleterTaskInitscript(CompleterTask):
1243 def __init__(self, test_sliver, stamp):
1244 self.test_sliver = test_sliver
1246 def actual_run(self):
1247 return self.test_sliver.check_initscript_stamp(self.stamp)
1249 return "initscript checker for {}".format(self.test_sliver.name())
1250 def failure_epilogue(self):
1251 print("initscript stamp {} not found in sliver {}"\
1252 .format(self.stamp, self.test_sliver.name()))
1255 for slice_spec in self.plc_spec['slices']:
1256 if 'initscriptstamp' not in slice_spec:
1258 stamp = slice_spec['initscriptstamp']
1259 slicename = slice_spec['slice_fields']['name']
1260 for nodename in slice_spec['nodenames']:
1261 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1262 site,node = self.locate_node(nodename)
1263 # xxx - passing the wrong site - probably harmless
1264 test_site = TestSite(self, site)
1265 test_slice = TestSlice(self, test_site, slice_spec)
1266 test_node = TestNode(self, test_site, node)
1267 test_sliver = TestSliver(self, test_node, test_slice)
1268 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1269 return Completer(tasks, message='check_initscripts').\
1270 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1272 def check_initscripts(self):
1273 "check that the initscripts have triggered"
1274 return self.do_check_initscripts()
1276 def initscripts(self):
1277 "create initscripts with PLCAPI"
1278 for initscript in self.plc_spec['initscripts']:
1279 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1280 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1283 def delete_initscripts(self):
1284 "delete initscripts with PLCAPI"
1285 for initscript in self.plc_spec['initscripts']:
1286 initscript_name = initscript['initscript_fields']['name']
1287 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1289 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1290 print(initscript_name, 'deleted')
1292 print('deletion went wrong - probably did not exist')
1297 "create slices with PLCAPI"
1298 return self.do_slices(action="add")
1300 def delete_slices(self):
1301 "delete slices with PLCAPI"
1302 return self.do_slices(action="delete")
1304 def fill_slices(self):
1305 "add nodes in slices with PLCAPI"
1306 return self.do_slices(action="fill")
1308 def empty_slices(self):
1309 "remove nodes from slices with PLCAPI"
1310 return self.do_slices(action="empty")
1312 def do_slices(self, action="add"):
1313 for slice in self.plc_spec['slices']:
1314 site_spec = self.locate_site(slice['sitename'])
1315 test_site = TestSite(self,site_spec)
1316 test_slice=TestSlice(self,test_site,slice)
1317 if action == "delete":
1318 test_slice.delete_slice()
1319 elif action == "fill":
1320 test_slice.add_nodes()
1321 elif action == "empty":
1322 test_slice.delete_nodes()
1324 test_slice.create_slice()
1327 @slice_mapper__tasks(20, 10, 15)
1328 def ssh_slice(self): pass
1329 @slice_mapper__tasks(20, 19, 15)
1330 def ssh_slice_off(self): pass
1331 @slice_mapper__tasks(1, 1, 15)
1332 def slice_fs_present(self): pass
1333 @slice_mapper__tasks(1, 1, 15)
1334 def slice_fs_deleted(self): pass
1336 # use another name so we can exclude/ignore it from the tests on the nightly command line
1337 def ssh_slice_again(self): return self.ssh_slice()
1338 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1339 # but for some reason the ignore-wrapping thing would not
1342 def ssh_slice_basics(self): pass
1344 def check_vsys_defaults(self): pass
1347 def keys_clear_known_hosts(self): pass
1349 def plcapi_urls(self):
1351 attempts to reach the PLCAPI with various forms for the URL
1353 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1355 def speed_up_slices(self):
1356 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1357 return self._speed_up_slices (30, 10)
1358 def super_speed_up_slices(self):
1359 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1360 return self._speed_up_slices(5, 1)
1362 def _speed_up_slices(self, p, r):
1363 # create the template on the server-side
1364 template = "{}.nodemanager".format(self.name())
1365 with open(template,"w") as template_file:
1366 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1367 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1368 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1369 self.test_ssh.copy_abs(template, remote)
1371 if not self.apiserver.GetConfFiles(self.auth_root(),
1372 {'dest' : '/etc/sysconfig/nodemanager'}):
1373 self.apiserver.AddConfFile(self.auth_root(),
1374 {'dest' : '/etc/sysconfig/nodemanager',
1375 'source' : 'PlanetLabConf/nodemanager',
1376 'postinstall_cmd' : 'service nm restart',})
1379 def debug_nodemanager(self):
1380 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1381 template = "{}.nodemanager".format(self.name())
1382 with open(template,"w") as template_file:
1383 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1384 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1385 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1386 self.test_ssh.copy_abs(template, remote)
1390 def qemu_start(self) : pass
1393 def qemu_timestamp(self) : pass
1396 def qemu_nodefamily(self): pass
1398 # when a spec refers to a node possibly on another plc
1399 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1400 for plc in [ self ] + other_plcs:
1402 return plc.locate_sliver_obj(nodename, slicename)
1405 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1407 # implement this one as a cross step so that we can take advantage of different nodes
1408 # in multi-plcs mode
1409 def cross_check_tcp(self, other_plcs):
1410 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1411 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1412 utils.header("check_tcp: no/empty config found")
1414 specs = self.plc_spec['tcp_specs']
1417 # first wait for the network to be up and ready from the slices
1418 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1419 def __init__(self, test_sliver):
1420 self.test_sliver = test_sliver
1421 def actual_run(self):
1422 return self.test_sliver.check_tcp_ready(port = 9999)
1424 return "network ready checker for {}".format(self.test_sliver.name())
1425 def failure_epilogue(self):
1426 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1430 managed_sliver_names = set()
1432 # locate the TestSliver instances involved, and cache them in the spec instance
1433 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1434 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1435 message = "Will check TCP between s={} and c={}"\
1436 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1437 if 'client_connect' in spec:
1438 message += " (using {})".format(spec['client_connect'])
1439 utils.header(message)
1440 # we need to check network presence in both slivers, but also
1441 # avoid to insert a sliver several times
1442 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1443 if sliver.name() not in managed_sliver_names:
1444 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1445 # add this sliver's name in the set
1446 managed_sliver_names .update( {sliver.name()} )
1448 # wait for the netork to be OK in all server sides
1449 if not Completer(tasks, message='check for network readiness in slivers').\
1450 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1453 # run server and client
1457 # the issue here is that we have the server run in background
1458 # and so we have no clue if it took off properly or not
1459 # looks like in some cases it does not
1460 address = spec['s_sliver'].test_node.name()
1461 if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1465 # idem for the client side
1466 # use nodename from located sliver, unless 'client_connect' is set
1467 if 'client_connect' in spec:
1468 destination = spec['client_connect']
1470 destination = spec['s_sliver'].test_node.name()
1471 if not spec['c_sliver'].run_tcp_client(destination, port):
1475 # painfully enough, we need to allow for some time as netflow might show up last
1476 def check_system_slice(self):
1477 "all nodes: check that a system slice is alive"
1478 # netflow currently not working in the lxc distro
1479 # drl not built at all in the wtx distro
1480 # if we find either of them we're happy
1481 return self.check_netflow() or self.check_drl()
1484 def check_netflow(self): return self._check_system_slice('netflow')
1485 def check_drl(self): return self._check_system_slice('drl')
1487 # we have the slices up already here, so it should not take too long
1488 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1489 class CompleterTaskSystemSlice(CompleterTask):
1490 def __init__(self, test_node, dry_run):
1491 self.test_node = test_node
1492 self.dry_run = dry_run
1493 def actual_run(self):
1494 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1496 return "System slice {} @ {}".format(slicename, self.test_node.name())
1497 def failure_epilogue(self):
1498 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1499 timeout = timedelta(minutes=timeout_minutes)
1500 silent = timedelta(0)
1501 period = timedelta(seconds=period_seconds)
1502 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1503 for test_node in self.all_nodes() ]
1504 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1506 def plcsh_stress_test(self):
1507 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1508 # install the stress-test in the plc image
1509 location = "/usr/share/plc_api/plcsh_stress_test.py"
1510 remote = "{}/{}".format(self.vm_root_in_host(), location)
1511 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1513 command += " -- --check"
1514 if self.options.size == 1:
1515 command += " --tiny"
1516 return self.run_in_guest(command) == 0
1518 # populate runs the same utility without slightly different options
1519 # in particular runs with --preserve (dont cleanup) and without --check
1520 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1522 def sfa_install_all(self):
1523 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1524 return (self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client") and
1525 self.run_in_guest("systemctl enable sfa-registry")==0 and
1526 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1528 def sfa_install_core(self):
1530 return self.dnf_install("sfa")
1532 def sfa_install_plc(self):
1533 "yum install sfa-plc"
1534 return self.dnf_install("sfa-plc")
1536 def sfa_install_sfatables(self):
1537 "yum install sfa-sfatables"
1538 return self.dnf_install("sfa-sfatables")
1540 # for some very odd reason, this sometimes fails with the following symptom
1541 # # yum install sfa-client
1542 # Setting up Install Process
1544 # Downloading Packages:
1545 # Running rpm_check_debug
1546 # Running Transaction Test
1547 # Transaction Test Succeeded
1548 # Running Transaction
1549 # Transaction couldn't start:
1550 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1551 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1552 # even though in the same context I have
1553 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1554 # Filesystem Size Used Avail Use% Mounted on
1555 # /dev/hdv1 806G 264G 501G 35% /
1556 # none 16M 36K 16M 1% /tmp
1558 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1559 def sfa_install_client(self):
1560 "yum install sfa-client"
1561 first_try = self.dnf_install("sfa-client")
1564 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1565 code, cached_rpm_path = \
1566 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1567 utils.header("rpm_path=<<{}>>".format(rpm_path))
1569 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1570 return self.dnf_check_installed("sfa-client")
1572 def sfa_dbclean(self):
1573 "thoroughly wipes off the SFA database"
1574 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1575 self.run_in_guest("sfa-nuke.py") == 0 or \
1576 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1577 self.run_in_guest("sfaadmin registry nuke") == 0
1579 def sfa_fsclean(self):
1580 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1581 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1584 def sfa_plcclean(self):
1585 "cleans the PLC entries that were created as a side effect of running the script"
1587 sfa_spec = self.plc_spec['sfa']
1589 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1590 login_base = auth_sfa_spec['login_base']
1592 self.apiserver.DeleteSite(self.auth_root(),login_base)
1594 print("Site {} already absent from PLC db".format(login_base))
1596 for spec_name in ['pi_spec', 'user_spec']:
1597 user_spec = auth_sfa_spec[spec_name]
1598 username = user_spec['email']
1600 self.apiserver.DeletePerson(self.auth_root(),username)
1602 # this in fact is expected as sites delete their members
1603 #print "User {} already absent from PLC db".format(username)
1606 print("REMEMBER TO RUN sfa_import AGAIN")
1609 def sfa_uninstall(self):
1610 "uses rpm to uninstall sfa - ignore result"
1611 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1612 self.run_in_guest("rm -rf /var/lib/sfa")
1613 self.run_in_guest("rm -rf /etc/sfa")
1614 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1616 self.run_in_guest("rpm -e --noscripts sfa-plc")
1619 ### run unit tests for SFA
1620 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1621 # Running Transaction
1622 # Transaction couldn't start:
1623 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1624 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1625 # no matter how many Gbs are available on the testplc
1626 # could not figure out what's wrong, so...
1627 # if the yum install phase fails, consider the test is successful
1628 # other combinations will eventually run it hopefully
1629 def sfa_utest(self):
1630 "dnf install sfa-tests and run SFA unittests"
1631 self.run_in_guest("dnf -y install sfa-tests")
1632 # failed to install - forget it
1633 if self.run_in_guest("rpm -q sfa-tests") != 0:
1634 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1636 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1640 dirname = "conf.{}".format(self.plc_spec['name'])
1641 if not os.path.isdir(dirname):
1642 utils.system("mkdir -p {}".format(dirname))
1643 if not os.path.isdir(dirname):
1644 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1647 def conffile(self, filename):
1648 return "{}/{}".format(self.confdir(), filename)
1649 def confsubdir(self, dirname, clean, dry_run=False):
1650 subdirname = "{}/{}".format(self.confdir(), dirname)
1652 utils.system("rm -rf {}".format(subdirname))
1653 if not os.path.isdir(subdirname):
1654 utils.system("mkdir -p {}".format(subdirname))
1655 if not dry_run and not os.path.isdir(subdirname):
1656 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1659 def conffile_clean(self, filename):
1660 filename=self.conffile(filename)
1661 return utils.system("rm -rf {}".format(filename))==0
1664 def sfa_configure(self):
1665 "run sfa-config-tty"
1666 tmpname = self.conffile("sfa-config-tty")
1667 with open(tmpname,'w') as fileconf:
1668 for var, value in self.plc_spec['sfa']['settings'].items():
1669 fileconf.write('e {}\n{}\n'.format(var, value))
1670 fileconf.write('w\n')
1671 fileconf.write('R\n')
1672 fileconf.write('q\n')
1673 utils.system('cat {}'.format(tmpname))
1674 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1677 def aggregate_xml_line(self):
1678 port = self.plc_spec['sfa']['neighbours-port']
1679 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1680 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1682 def registry_xml_line(self):
1683 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1684 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1687 # a cross step that takes all other plcs in argument
1688 def cross_sfa_configure(self, other_plcs):
1689 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1690 # of course with a single plc, other_plcs is an empty list
1693 agg_fname = self.conffile("agg.xml")
1694 with open(agg_fname,"w") as out:
1695 out.write("<aggregates>{}</aggregates>\n"\
1696 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1697 utils.header("(Over)wrote {}".format(agg_fname))
1698 reg_fname=self.conffile("reg.xml")
1699 with open(reg_fname,"w") as out:
1700 out.write("<registries>{}</registries>\n"\
1701 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1702 utils.header("(Over)wrote {}".format(reg_fname))
1703 return self.test_ssh.copy_abs(agg_fname,
1704 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1705 and self.test_ssh.copy_abs(reg_fname,
1706 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1708 def sfa_import(self):
1709 "use sfaadmin to import from plc"
1710 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1711 return self.run_in_guest('sfaadmin reg import_registry') == 0
1713 def sfa_start(self):
1714 "start SFA through systemctl"
1715 sfa_dependencies = [
1716 'sqlalchemy-migrate',
1721 deps = all((self.run_in_guest(f"pip2 install {dep}") == 0)
1722 for dep in sfa_dependencies)
1724 and self.start_stop_systemd('sfa-registry', 'start')
1725 and self.start_stop_systemd('sfa-aggregate', 'start'))
1728 def sfi_configure(self):
1729 "Create /root/sfi on the plc side for sfi client configuration"
1730 if self.options.dry_run:
1731 utils.header("DRY RUN - skipping step")
1733 sfa_spec = self.plc_spec['sfa']
1734 # cannot use auth_sfa_mapper to pass dir_name
1735 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1736 test_slice = TestAuthSfa(self, slice_spec)
1737 dir_basename = os.path.basename(test_slice.sfi_path())
1738 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1739 clean=True, dry_run=self.options.dry_run)
1740 test_slice.sfi_configure(dir_name)
1741 # push into the remote /root/sfi area
1742 location = test_slice.sfi_path()
1743 remote = "{}/{}".format(self.vm_root_in_host(), location)
1744 self.test_ssh.mkdir(remote, abs=True)
1745 # need to strip last level or remote otherwise we get an extra dir level
1746 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1750 def sfi_clean(self):
1751 "clean up /root/sfi on the plc side"
1752 self.run_in_guest("rm -rf /root/sfi")
1755 def sfa_rspec_empty(self):
1756 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1757 filename = "empty-rspec.xml"
1759 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1760 test_slice = TestAuthSfa(self, slice_spec)
1761 in_vm = test_slice.sfi_path()
1762 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1763 if self.test_ssh.copy_abs(filename, remote) !=0:
1768 def sfa_register_site(self): pass
1770 def sfa_register_pi(self): pass
1772 def sfa_register_user(self): pass
1774 def sfa_update_user(self): pass
1776 def sfa_register_slice(self): pass
1778 def sfa_renew_slice(self): pass
1780 def sfa_get_expires(self): pass
1782 def sfa_discover(self): pass
1784 def sfa_rspec(self): pass
1786 def sfa_allocate(self): pass
1788 def sfa_allocate_empty(self): pass
1790 def sfa_provision(self): pass
1792 def sfa_provision_empty(self): pass
1794 def sfa_describe(self): pass
1796 def sfa_check_slice_plc(self): pass
1798 def sfa_check_slice_plc_empty(self): pass
1800 def sfa_update_slice(self): pass
1802 def sfa_remove_user_from_slice(self): pass
1804 def sfa_insert_user_in_slice(self): pass
1806 def sfi_list(self): pass
1808 def sfi_show_site(self): pass
1810 def sfi_show_slice(self): pass
1812 def sfi_show_slice_researchers(self): pass
1814 def ssh_slice_sfa(self): pass
1816 def sfa_delete_user(self): pass
1818 def sfa_delete_slice(self): pass
1821 "stop sfa through systemclt"
1822 return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1823 self.start_stop_systemd('sfa-registry', 'stop'))
1826 "creates random entries in the PLCAPI"
1827 # install the stress-test in the plc image
1828 location = "/usr/share/plc_api/plcsh_stress_test.py"
1829 remote = "{}/{}".format(self.vm_root_in_host(), location)
1830 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1832 command += " -- --preserve --short-names"
1833 local = (self.run_in_guest(command) == 0);
1834 # second run with --foreign
1835 command += ' --foreign'
1836 remote = (self.run_in_guest(command) == 0);
1837 return local and remote
1840 ####################
1842 def bonding_init_partial(self): pass
1845 def bonding_add_yum(self): pass
1848 def bonding_install_rpms(self): pass
1850 ####################
1852 def gather_logs(self):
1853 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1854 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1855 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1856 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1857 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1858 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1859 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1861 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1862 self.gather_var_logs()
1864 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1865 self.gather_pgsql_logs()
1867 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1868 self.gather_root_sfi()
1870 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1871 for site_spec in self.plc_spec['sites']:
1872 test_site = TestSite(self,site_spec)
1873 for node_spec in site_spec['nodes']:
1874 test_node = TestNode(self, test_site, node_spec)
1875 test_node.gather_qemu_logs()
1877 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1878 self.gather_nodes_var_logs()
1880 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1881 self.gather_slivers_var_logs()
1884 def gather_slivers_var_logs(self):
1885 for test_sliver in self.all_sliver_objs():
1886 remote = test_sliver.tar_var_logs()
1887 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1888 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1889 utils.system(command)
1892 def gather_var_logs(self):
1893 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1894 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1895 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1896 utils.system(command)
1897 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1898 utils.system(command)
1900 def gather_pgsql_logs(self):
1901 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1902 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1903 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1904 utils.system(command)
1906 def gather_root_sfi(self):
1907 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1908 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1909 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1910 utils.system(command)
1912 def gather_nodes_var_logs(self):
1913 for site_spec in self.plc_spec['sites']:
1914 test_site = TestSite(self, site_spec)
1915 for node_spec in site_spec['nodes']:
1916 test_node = TestNode(self, test_site, node_spec)
1917 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1918 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1919 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1920 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1921 utils.system(command)
1924 # returns the filename to use for sql dump/restore, using options.dbname if set
1925 def dbfile(self, database):
1926 # uses options.dbname if it is found
1928 name = self.options.dbname
1929 if not isinstance(name, str):
1935 return "/root/{}-{}.sql".format(database, name)
1937 def plc_db_dump(self):
1938 'dump the planetlab5 DB in /root in the PLC - filename has time'
1939 dump=self.dbfile("planetab5")
1940 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1941 utils.header('Dumped planetlab5 database in {}'.format(dump))
1944 def plc_db_restore(self):
1945 'restore the planetlab5 DB - looks broken, but run -n might help'
1946 dump = self.dbfile("planetab5")
1947 self.run_in_guest('systemctl stop httpd')
1948 # xxx - need another wrapper
1949 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1950 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1951 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1952 ##starting httpd service
1953 self.run_in_guest('systemctl start httpd')
1955 utils.header('Database restored from ' + dump)
1958 def create_ignore_steps():
1959 for step in TestPlc.default_steps + TestPlc.other_steps:
1960 # default step can have a plc qualifier
1962 step, qualifier = step.split('@')
1963 # or be defined as forced or ignored by default
1964 for keyword in ['_ignore', '_force']:
1965 if step.endswith(keyword):
1966 step=step.replace(keyword,'')
1967 if step == SEP or step == SEPSFA :
1969 method = getattr(TestPlc,step)
1970 name = step + '_ignore'
1971 wrapped = ignore_result(method)
1972 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1973 setattr(TestPlc, name, wrapped)
1976 # def ssh_slice_again_ignore (self): pass
1978 # def check_initscripts_ignore (self): pass
1980 def standby_1_through_20(self):
1981 """convenience function to wait for a specified number of minutes"""
1984 def standby_1(): pass
1986 def standby_2(): pass
1988 def standby_3(): pass
1990 def standby_4(): pass
1992 def standby_5(): pass
1994 def standby_6(): pass
1996 def standby_7(): pass
1998 def standby_8(): pass
2000 def standby_9(): pass
2002 def standby_10(): pass
2004 def standby_11(): pass
2006 def standby_12(): pass
2008 def standby_13(): pass
2010 def standby_14(): pass
2012 def standby_15(): pass
2014 def standby_16(): pass
2016 def standby_17(): pass
2018 def standby_18(): pass
2020 def standby_19(): pass
2022 def standby_20(): pass
2024 # convenience for debugging the test logic
2025 def yes(self): return True
2026 def no(self): return False
2027 def fail(self): return False