1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 from TestBonding import TestBonding
27 has_sfa_cache_filename="sfa-cache"
29 # step methods must take (self) and return a boolean (options is a member of the class)
31 def standby(minutes, dry_run):
32 utils.header('Entering StandBy for {:d} mn'.format(minutes))
36 time.sleep(60*minutes)
39 def standby_generic(func):
41 minutes = int(func.__name__.split("_")[1])
42 return standby(minutes, self.options.dry_run)
45 def node_mapper(method):
46 def map_on_nodes(self, *args, **kwds):
48 node_method = TestNode.__dict__[method.__name__]
49 for test_node in self.all_nodes():
50 if not node_method(test_node, *args, **kwds):
53 # maintain __name__ for ignore_result
54 map_on_nodes.__name__ = method.__name__
55 # restore the doc text
56 map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
59 def slice_mapper(method):
60 def map_on_slices(self):
62 slice_method = TestSlice.__dict__[method.__name__]
63 for slice_spec in self.plc_spec['slices']:
64 site_spec = self.locate_site (slice_spec['sitename'])
65 test_site = TestSite(self,site_spec)
66 test_slice = TestSlice(self,test_site,slice_spec)
67 if not slice_method(test_slice, self.options):
70 # maintain __name__ for ignore_result
71 map_on_slices.__name__ = method.__name__
72 # restore the doc text
73 map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
76 def bonding_redirector(method):
77 bonding_name = method.__name__.replace('bonding_', '')
79 bonding_method = TestBonding.__dict__[bonding_name]
80 return bonding_method(self.test_bonding)
81 # maintain __name__ for ignore_result
82 redirect.__name__ = method.__name__
83 # restore the doc text
84 redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
87 # run a step but return True so that we can go on
88 def ignore_result(method):
90 # ssh_slice_ignore->ssh_slice
91 ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92 ref_method = TestPlc.__dict__[ref_name]
93 result = ref_method(self)
94 print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95 return Ignored(result)
96 name = method.__name__.replace('_ignore', '').replace('force_', '')
97 ignoring.__name__ = name
98 ignoring.__doc__ = "ignored version of " + name
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106 # could not get this to work with named arguments
107 def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108 self.timeout = timedelta(minutes = timeout_minutes)
109 self.silent = timedelta(minutes = silent_minutes)
110 self.period = timedelta(seconds = period_seconds)
111 def __call__(self, method):
113 # compute augmented method name
114 method_name = method.__name__ + "__tasks"
115 # locate in TestSlice
116 slice_method = TestSlice.__dict__[ method_name ]
119 for slice_spec in self.plc_spec['slices']:
120 site_spec = self.locate_site (slice_spec['sitename'])
121 test_site = TestSite(self, site_spec)
122 test_slice = TestSlice(self, test_site, slice_spec)
123 tasks += slice_method (test_slice, self.options)
124 return Completer (tasks, message=method.__name__).\
125 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126 # restore the doc text from the TestSlice method even if a bit odd
127 wrappee.__name__ = method.__name__
128 wrappee.__doc__ = slice_method.__doc__
131 def auth_sfa_mapper(method):
134 auth_method = TestAuthSfa.__dict__[method.__name__]
135 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136 test_auth = TestAuthSfa(self, auth_spec)
137 if not auth_method(test_auth, self.options):
140 # restore the doc text
141 actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
145 def __init__(self, result):
155 'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
156 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158 'plcapi_urls', 'speed_up_slices', SEP,
159 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # ss # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # ss # keep this out of the way for now
162 # ss 'check_vsys_defaults_ignore', SEP,
163 # ss # run this first off so it's easier to re-run on another qemu box
164 # ss 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
165 # ss 'bootcd', 'qemu_local_config', SEP,
166 # ss 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
167 # ss 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
168 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
169 'sfa_start', 'sfa_import', SEPSFA,
170 'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
171 'sfa_register_user@1', 'sfa_update_user@1',
172 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
173 'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
174 'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
175 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
176 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
177 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
178 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
179 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
180 # but as the stress test might take a while, we sometimes missed the debug mode..
181 # ss 'probe_kvm_iptables',
182 # ss 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
183 # ss 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
184 # ss 'ssh_slice_sfa@1', SEPSFA,
185 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
186 'sfa_check_slice_plc_empty@1', SEPSFA,
187 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
188 # ss 'check_system_slice', SEP,
189 # for inspecting the slice while it runs the first time
191 # check slices are turned off properly
192 # ss 'debug_nodemanager',
193 # ss 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
194 # ss # check they are properly re-created with the same name
195 # ss 'fill_slices', 'ssh_slice_again', SEP,
196 'gather_logs_force', SEP,
199 'export', 'show_boxes', 'super_speed_up_slices', SEP,
200 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
201 'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
202 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
203 'delete_leases', 'list_leases', SEP,
205 'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
206 'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
207 'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
208 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
209 'sfa_install_core', 'sfa_install_sfatables',
210 'sfa_install_plc', 'sfa_install_client', SEPSFA,
211 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
212 'sfa_get_expires', SEPSFA,
213 'plc_db_dump', 'plc_db_restore', SEP,
214 'check_netflow', 'check_drl', SEP,
215 # used to be part of default steps but won't work since f27
217 'slice_fs_present', 'check_initscripts', SEP,
218 'standby_1_through_20', 'yes', 'no', SEP,
219 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
221 default_bonding_steps = [
222 'bonding_init_partial',
224 'bonding_install_rpms', SEP,
228 def printable_steps(list):
229 single_line = " ".join(list) + " "
230 return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
232 def valid_step(step):
233 return step != SEP and step != SEPSFA
235 # turn off the sfa-related steps when build has skipped SFA
236 # this was originally for centos5 but is still valid
237 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
239 def _has_sfa_cached(rpms_url):
240 if os.path.isfile(has_sfa_cache_filename):
241 with open(has_sfa_cache_filename) as cache:
242 cached = cache.read() == "yes"
243 utils.header("build provides SFA (cached):{}".format(cached))
245 # warning, we're now building 'sface' so let's be a bit more picky
246 # full builds are expected to return with 0 here
247 utils.header("Checking if build provides SFA package...")
248 retcod = utils.system("curl --silent {}/ | grep -q sfa-4".format(rpms_url)) == 0
249 encoded = 'yes' if retcod else 'no'
250 with open(has_sfa_cache_filename,'w') as cache:
255 def check_whether_build_has_sfa(rpms_url):
256 has_sfa = TestPlc._has_sfa_cached(rpms_url)
258 utils.header("build does provide SFA")
260 # move all steps containing 'sfa' from default_steps to other_steps
261 utils.header("SFA package not found - removing steps with sfa or sfi")
262 sfa_steps = [ step for step in TestPlc.default_steps
263 if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
264 TestPlc.other_steps += sfa_steps
265 for step in sfa_steps:
266 TestPlc.default_steps.remove(step)
268 def __init__(self, plc_spec, options):
269 self.plc_spec = plc_spec
270 self.options = options
271 self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
272 self.vserverip = plc_spec['vserverip']
273 self.vservername = plc_spec['vservername']
274 self.vplchostname = self.vservername.split('-')[-1]
275 self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
276 self.apiserver = TestApiserver(self.url, options.dry_run)
277 (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
278 (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
280 def has_addresses_api(self):
281 return self.apiserver.has_method('AddIpAddress')
284 name = self.plc_spec['name']
285 return "{}.{}".format(name,self.vservername)
288 return self.plc_spec['host_box']
291 return self.test_ssh.is_local()
293 # define the API methods on this object through xmlrpc
294 # would help, but not strictly necessary
298 def actual_command_in_guest(self,command, backslash=False):
299 raw1 = self.host_to_guest(command)
300 raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
303 def start_guest(self):
304 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
305 dry_run=self.options.dry_run))
307 def stop_guest(self):
308 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
309 dry_run=self.options.dry_run))
311 def run_in_guest(self, command, backslash=False):
312 raw = self.actual_command_in_guest(command, backslash)
313 return utils.system(raw)
315 def run_in_host(self,command):
316 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
318 # backslashing turned out so awful at some point that I've turned off auto-backslashing
319 # see e.g. plc_start esp. the version for f14
320 #command gets run in the plc's vm
321 def host_to_guest(self, command):
322 ssh_leg = TestSsh(self.vplchostname)
323 return ssh_leg.actual_command(command, keep_stdin=True)
325 # this /vservers thing is legacy...
326 def vm_root_in_host(self):
327 return "/vservers/{}/".format(self.vservername)
329 def vm_timestamp_path(self):
330 return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
332 #start/stop the vserver
333 def start_guest_in_host(self):
334 return "virsh -c lxc:/// start {}".format(self.vservername)
336 def stop_guest_in_host(self):
337 return "virsh -c lxc:/// destroy {}".format(self.vservername)
340 def run_in_guest_piped(self,local,remote):
341 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
344 def dnf_check_installed(self, rpms):
345 if isinstance(rpms, list):
347 return self.run_in_guest("rpm -q {}".format(rpms)) == 0
349 # does a yum install in the vs, ignore yum retcod, check with rpm
350 def dnf_install(self, rpms):
351 if isinstance(rpms, list):
353 yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
355 self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
356 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
357 # nothing similar with dnf, forget about this for now
358 # self.run_in_guest("yum-complete-transaction -y")
359 return self.dnf_check_installed(rpms)
361 def pip3_install(self, package):
362 return self.run_in_guest(f"pip3 install {package} || pip install {package}") == 0
365 return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
366 'AuthMethod' : 'password',
367 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
368 'Role' : self.plc_spec['role'],
371 def locate_site(self,sitename):
372 for site in self.plc_spec['sites']:
373 if site['site_fields']['name'] == sitename:
375 if site['site_fields']['login_base'] == sitename:
377 raise Exception("Cannot locate site {}".format(sitename))
379 def locate_node(self, nodename):
380 for site in self.plc_spec['sites']:
381 for node in site['nodes']:
382 if node['name'] == nodename:
384 raise Exception("Cannot locate node {}".format(nodename))
386 def locate_hostname(self, hostname):
387 for site in self.plc_spec['sites']:
388 for node in site['nodes']:
389 if node['node_fields']['hostname'] == hostname:
391 raise Exception("Cannot locate hostname {}".format(hostname))
393 def locate_key(self, key_name):
394 for key in self.plc_spec['keys']:
395 if key['key_name'] == key_name:
397 raise Exception("Cannot locate key {}".format(key_name))
399 def locate_private_key_from_key_names(self, key_names):
400 # locate the first avail. key
402 for key_name in key_names:
403 key_spec = self.locate_key(key_name)
404 test_key = TestKey(self,key_spec)
405 publickey = test_key.publicpath()
406 privatekey = test_key.privatepath()
407 if os.path.isfile(publickey) and os.path.isfile(privatekey):
414 def locate_slice(self, slicename):
415 for slice in self.plc_spec['slices']:
416 if slice['slice_fields']['name'] == slicename:
418 raise Exception("Cannot locate slice {}".format(slicename))
420 def all_sliver_objs(self):
422 for slice_spec in self.plc_spec['slices']:
423 slicename = slice_spec['slice_fields']['name']
424 for nodename in slice_spec['nodenames']:
425 result.append(self.locate_sliver_obj(nodename, slicename))
428 def locate_sliver_obj(self, nodename, slicename):
429 site,node = self.locate_node(nodename)
430 slice = self.locate_slice(slicename)
432 test_site = TestSite(self, site)
433 test_node = TestNode(self, test_site, node)
434 # xxx the slice site is assumed to be the node site - mhh - probably harmless
435 test_slice = TestSlice(self, test_site, slice)
436 return TestSliver(self, test_node, test_slice)
438 def locate_first_node(self):
439 nodename = self.plc_spec['slices'][0]['nodenames'][0]
440 site,node = self.locate_node(nodename)
441 test_site = TestSite(self, site)
442 test_node = TestNode(self, test_site, node)
445 def locate_first_sliver(self):
446 slice_spec = self.plc_spec['slices'][0]
447 slicename = slice_spec['slice_fields']['name']
448 nodename = slice_spec['nodenames'][0]
449 return self.locate_sliver_obj(nodename,slicename)
451 # all different hostboxes used in this plc
452 def get_BoxNodes(self):
453 # maps on sites and nodes, return [ (host_box,test_node) ]
455 for site_spec in self.plc_spec['sites']:
456 test_site = TestSite(self,site_spec)
457 for node_spec in site_spec['nodes']:
458 test_node = TestNode(self, test_site, node_spec)
459 if not test_node.is_real():
460 tuples.append( (test_node.host_box(),test_node) )
461 # transform into a dict { 'host_box' -> [ test_node .. ] }
463 for (box,node) in tuples:
464 if box not in result:
467 result[box].append(node)
470 # a step for checking this stuff
471 def show_boxes(self):
472 'print summary of nodes location'
473 for box,nodes in self.get_BoxNodes().items():
474 print(box,":"," + ".join( [ node.name() for node in nodes ] ))
477 # make this a valid step
478 def qemu_kill_all(self):
479 'kill all qemu instances on the qemu boxes involved by this setup'
480 # this is the brute force version, kill all qemus on that host box
481 for (box,nodes) in self.get_BoxNodes().items():
482 # pass the first nodename, as we don't push template-qemu on testboxes
483 nodedir = nodes[0].nodedir()
484 TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
487 # make this a valid step
488 def qemu_list_all(self):
489 'list all qemu instances on the qemu boxes involved by this setup'
490 for box,nodes in self.get_BoxNodes().items():
491 # this is the brute force version, kill all qemus on that host box
492 TestBoxQemu(box, self.options.buildname).qemu_list_all()
495 # kill only the qemus related to this test
496 def qemu_list_mine(self):
497 'list qemu instances for our nodes'
498 for (box,nodes) in self.get_BoxNodes().items():
499 # the fine-grain version
504 # kill only the qemus related to this test
505 def qemu_clean_mine(self):
506 'cleanup (rm -rf) qemu instances for our nodes'
507 for box,nodes in self.get_BoxNodes().items():
508 # the fine-grain version
513 # kill only the right qemus
514 def qemu_kill_mine(self):
515 'kill the qemu instances for our nodes'
516 for box,nodes in self.get_BoxNodes().items():
517 # the fine-grain version
522 #################### display config
524 "show test configuration after localization"
529 # uggly hack to make sure 'run export' only reports about the 1st plc
530 # to avoid confusion - also we use 'inri_slice1' in various aliases..
533 "print cut'n paste-able stuff to export env variables to your shell"
534 # guess local domain from hostname
535 if TestPlc.exported_id > 1:
536 print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
538 TestPlc.exported_id += 1
539 domain = socket.gethostname().split('.',1)[1]
540 fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
541 print("export BUILD={}".format(self.options.buildname))
542 print("export PLCHOSTLXC={}".format(fqdn))
543 print("export GUESTNAME={}".format(self.vservername))
544 print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
545 # find hostname of first node
546 hostname, qemubox = self.all_node_infos()[0]
547 print("export KVMHOST={}.{}".format(qemubox, domain))
548 print("export NODE={}".format(hostname))
552 always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
553 def show_pass(self, passno):
554 for (key,val) in self.plc_spec.items():
555 if not self.options.verbose and key not in TestPlc.always_display_keys:
560 self.display_site_spec(site)
561 for node in site['nodes']:
562 self.display_node_spec(node)
563 elif key == 'initscripts':
564 for initscript in val:
565 self.display_initscript_spec(initscript)
566 elif key == 'slices':
568 self.display_slice_spec(slice)
571 self.display_key_spec(key)
573 if key not in ['sites', 'initscripts', 'slices', 'keys']:
574 print('+ ', key, ':', val)
576 def display_site_spec(self, site):
577 print('+ ======== site', site['site_fields']['name'])
578 for k,v in site.items():
579 if not self.options.verbose and k not in TestPlc.always_display_keys:
583 print('+ ', 'nodes : ', end=' ')
585 print(node['node_fields']['hostname'],'', end=' ')
589 print('+ users : ', end=' ')
591 print(user['name'],'', end=' ')
593 elif k == 'site_fields':
594 print('+ login_base', ':', v['login_base'])
595 elif k == 'address_fields':
601 def display_initscript_spec(self, initscript):
602 print('+ ======== initscript', initscript['initscript_fields']['name'])
604 def display_key_spec(self, key):
605 print('+ ======== key', key['key_name'])
607 def display_slice_spec(self, slice):
608 print('+ ======== slice', slice['slice_fields']['name'])
609 for k,v in slice.items():
612 print('+ nodes : ', end=' ')
614 print(nodename,'', end=' ')
616 elif k == 'usernames':
618 print('+ users : ', end=' ')
620 print(username,'', end=' ')
622 elif k == 'slice_fields':
623 print('+ fields', ':', end=' ')
624 print('max_nodes=',v['max_nodes'], end=' ')
629 def display_node_spec(self, node):
630 print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
631 print("hostname=", node['node_fields']['hostname'], end=' ')
632 print("ip=", node['interface_fields']['ip'])
633 if self.options.verbose:
634 utils.pprint("node details", node, depth=3)
636 # another entry point for just showing the boxes involved
637 def display_mapping(self):
638 TestPlc.display_mapping_plc(self.plc_spec)
642 def display_mapping_plc(plc_spec):
643 print('+ MyPLC',plc_spec['name'])
644 # WARNING this would not be right for lxc-based PLC's - should be harmless though
645 print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
646 print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
647 for site_spec in plc_spec['sites']:
648 for node_spec in site_spec['nodes']:
649 TestPlc.display_mapping_node(node_spec)
652 def display_mapping_node(node_spec):
653 print('+ NODE {}'.format(node_spec['name']))
654 print('+\tqemu box {}'.format(node_spec['host_box']))
655 print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
657 # write a timestamp in /vservers/<>.timestamp
658 # cannot be inside the vserver, that causes vserver .. build to cough
659 def plcvm_timestamp(self):
660 "Create a timestamp to remember creation date for this plc"
661 now = int(time.time())
662 # TODO-lxc check this one
663 # a first approx. is to store the timestamp close to the VM root like vs does
664 stamp_path = self.vm_timestamp_path()
665 stamp_dir = os.path.dirname(stamp_path)
666 utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
667 return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
669 # this is called inconditionnally at the beginning of the test sequence
670 # just in case this is a rerun, so if the vm is not running it's fine
671 def plcvm_delete(self):
672 "vserver delete the test myplc"
673 stamp_path = self.vm_timestamp_path()
674 self.run_in_host("rm -f {}".format(stamp_path))
675 self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
676 self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
677 self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
681 # historically the build was being fetched by the tests
682 # now the build pushes itself as a subdir of the tests workdir
683 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
684 def plcvm_create(self):
685 "vserver creation (no install done)"
686 # push the local build/ dir to the testplc box
688 # a full path for the local calls
689 build_dir = os.path.dirname(sys.argv[0])
690 # sometimes this is empty - set to "." in such a case
693 build_dir += "/build"
695 # use a standard name - will be relative to remote buildname
697 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
698 self.test_ssh.rmdir(build_dir)
699 self.test_ssh.copy(build_dir, recursive=True)
700 # the repo url is taken from arch-rpms-url
701 # with the last step (i386) removed
702 repo_url = self.options.arch_rpms_url
703 for level in [ 'arch' ]:
704 repo_url = os.path.dirname(repo_url)
707 # on the virsh containers, DNS resolution using gethostbyaddr
708 # won't work fine, for the hosts under .pl.sophia.inria.fr
709 # although these IPs can be reversed from virtually everywhere else
711 # this has started with something around fedora35 so I am suspecting python-3.10
713 # in any case, here's a quick and dirty workaround, as I have bumped my head
714 # against the screen for two good hours and not found any single clue
715 # about how to deal with this properly
719 def workaround_gethostaddr(ip):
720 command = f"host {ip} 8.8.8.8"
721 completed = subprocess.run(command, shell=True, capture_output=True)
722 pieces = completed.stdout.decode().split("domain name pointer ")
724 return pieces[1].replace(".\n", "")
728 # invoke initvm (drop support for vs)
729 script = "lbuild-initvm.sh"
731 # pass the vbuild-nightly options to [lv]test-initvm
732 script_options += " -p {}".format(self.options.personality)
733 script_options += " -d {}".format(self.options.pldistro)
734 script_options += " -f {}".format(self.options.fcdistro)
735 script_options += " -r {}".format(repo_url)
736 vserver_name = self.vservername
738 vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
740 # read more above about this workaround
741 vserver_hostname = workaround_gethostaddr(self.vserverip)
742 if not vserver_hostname:
743 print("Cannot reverse lookup {}".format(self.vserverip))
744 print("This is considered fatal, as this might pollute the test results")
746 script_options += " -n {}".format(vserver_hostname)
747 create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
748 return self.run_in_host(create_vserver) == 0
750 ### install django through pip
751 def django_install(self):
752 # plcapi requires Django, that is no longer provided py fedora as an rpm
753 # so we use pip instead
757 return self.pip3_install('Django')
760 def plc_install(self):
762 yum install myplc, noderepo
766 if self.options.personality == "linux32":
768 elif self.options.personality == "linux64":
771 raise Exception("Unsupported personality {}".format(self.options.personality))
772 nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
774 # check it's possible to install just 'myplc-core' first
775 if not self.dnf_install("myplc-core"):
779 pkgs_list.append("myplc")
780 # pkgs_list.append("slicerepo-{}".format(nodefamily))
781 # pkgs_list.append("noderepo-{}".format(nodefamily))
782 pkgs_string=" ".join(pkgs_list)
783 return self.dnf_install(pkgs_list)
785 def install_syslinux6(self):
787 install syslinux6 from the fedora21 release
789 key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
792 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
793 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
794 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
796 # this can be done several times
797 self.run_in_guest("rpm --import {key}".format(**locals()))
798 return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
800 def bonding_builds(self):
802 list /etc/yum.repos.d on the myplc side
804 self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
807 def bonding_nodes(self):
809 List nodes known to the myplc together with their nodefamiliy
811 print("---------------------------------------- nodes")
812 for node in self.apiserver.GetNodes(self.auth_root()):
813 print("{} -> {}".format(node['hostname'],
814 self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
815 print("---------------------------------------- nodes")
819 def mod_python(self):
820 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
821 return self.dnf_install( ['mod_python'] )
824 def plc_configure(self):
826 tmpname = '{}.plc-config-tty'.format(self.name())
827 with open(tmpname,'w') as fileconf:
828 for var, value in self.plc_spec['settings'].items():
829 fileconf.write('e {}\n{}\n'.format(var, value))
830 fileconf.write('w\n')
831 fileconf.write('q\n')
832 utils.system('cat {}'.format(tmpname))
833 self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
834 utils.system('rm {}'.format(tmpname))
837 # care only about f>=27
838 def start_stop_systemd(self, service, start_or_stop):
839 "utility to start/stop a systemd-defined service (sfa)"
840 return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
843 "start plc through systemclt"
844 return self.start_stop_systemd('plc', 'start')
847 "stop plc through systemctl"
848 return self.start_stop_systemd('plc', 'stop')
850 def plcvm_start(self):
851 "start the PLC vserver"
855 def plcvm_stop(self):
856 "stop the PLC vserver"
860 # stores the keys from the config for further use
861 def keys_store(self):
862 "stores test users ssh keys in keys/"
863 for key_spec in self.plc_spec['keys']:
864 TestKey(self,key_spec).store_key()
867 def keys_clean(self):
868 "removes keys cached in keys/"
869 utils.system("rm -rf ./keys")
872 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
873 # for later direct access to the nodes
874 def keys_fetch(self):
875 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
877 if not os.path.isdir(dir):
879 vservername = self.vservername
880 vm_root = self.vm_root_in_host()
882 prefix = 'debug_ssh_key'
883 for ext in ['pub', 'rsa'] :
884 src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
885 dst = "keys/{vservername}-debug.{ext}".format(**locals())
886 if self.test_ssh.fetch(src, dst) != 0:
891 "create sites with PLCAPI"
892 return self.do_sites()
894 def delete_sites(self):
895 "delete sites with PLCAPI"
896 return self.do_sites(action="delete")
898 def do_sites(self, action="add"):
899 for site_spec in self.plc_spec['sites']:
900 test_site = TestSite(self,site_spec)
901 if (action != "add"):
902 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
903 test_site.delete_site()
904 # deleted with the site
905 #test_site.delete_users()
908 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
909 test_site.create_site()
910 test_site.create_users()
913 def delete_all_sites(self):
914 "Delete all sites in PLC, and related objects"
915 print('auth_root', self.auth_root())
916 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
918 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
919 if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
921 site_id = site['site_id']
922 print('Deleting site_id', site_id)
923 self.apiserver.DeleteSite(self.auth_root(), site_id)
927 "create nodes with PLCAPI"
928 return self.do_nodes()
929 def delete_nodes(self):
930 "delete nodes with PLCAPI"
931 return self.do_nodes(action="delete")
933 def do_nodes(self, action="add"):
934 for site_spec in self.plc_spec['sites']:
935 test_site = TestSite(self, site_spec)
937 utils.header("Deleting nodes in site {}".format(test_site.name()))
938 for node_spec in site_spec['nodes']:
939 test_node = TestNode(self, test_site, node_spec)
940 utils.header("Deleting {}".format(test_node.name()))
941 test_node.delete_node()
943 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
944 for node_spec in site_spec['nodes']:
945 utils.pprint('Creating node {}'.format(node_spec), node_spec)
946 test_node = TestNode(self, test_site, node_spec)
947 test_node.create_node()
950 def nodegroups(self):
951 "create nodegroups with PLCAPI"
952 return self.do_nodegroups("add")
953 def delete_nodegroups(self):
954 "delete nodegroups with PLCAPI"
955 return self.do_nodegroups("delete")
959 def translate_timestamp(start, grain, timestamp):
960 if timestamp < TestPlc.YEAR:
961 return start + timestamp*grain
966 def timestamp_printable(timestamp):
967 return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
970 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
971 now = int(time.time())
972 grain = self.apiserver.GetLeaseGranularity(self.auth_root())
973 print('API answered grain=', grain)
974 start = (now//grain)*grain
976 # find out all nodes that are reservable
977 nodes = self.all_reservable_nodenames()
979 utils.header("No reservable node found - proceeding without leases")
982 # attach them to the leases as specified in plc_specs
983 # this is where the 'leases' field gets interpreted as relative of absolute
984 for lease_spec in self.plc_spec['leases']:
985 # skip the ones that come with a null slice id
986 if not lease_spec['slice']:
988 lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
989 lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
990 lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
991 lease_spec['t_from'], lease_spec['t_until'])
992 if lease_addition['errors']:
993 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
996 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
997 .format(nodes, lease_spec['slice'],
998 lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
999 lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
1003 def delete_leases(self):
1004 "remove all leases in the myplc side"
1005 lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
1006 utils.header("Cleaning leases {}".format(lease_ids))
1007 self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
1010 def list_leases(self):
1011 "list all leases known to the myplc"
1012 leases = self.apiserver.GetLeases(self.auth_root())
1013 now = int(time.time())
1015 current = l['t_until'] >= now
1016 if self.options.verbose or current:
1017 utils.header("{} {} from {} until {}"\
1018 .format(l['hostname'], l['name'],
1019 TestPlc.timestamp_printable(l['t_from']),
1020 TestPlc.timestamp_printable(l['t_until'])))
1023 # create nodegroups if needed, and populate
1024 def do_nodegroups(self, action="add"):
1025 # 1st pass to scan contents
1027 for site_spec in self.plc_spec['sites']:
1028 test_site = TestSite(self,site_spec)
1029 for node_spec in site_spec['nodes']:
1030 test_node = TestNode(self, test_site, node_spec)
1031 if 'nodegroups' in node_spec:
1032 nodegroupnames = node_spec['nodegroups']
1033 if isinstance(nodegroupnames, str):
1034 nodegroupnames = [ nodegroupnames ]
1035 for nodegroupname in nodegroupnames:
1036 if nodegroupname not in groups_dict:
1037 groups_dict[nodegroupname] = []
1038 groups_dict[nodegroupname].append(test_node.name())
1039 auth = self.auth_root()
1041 for (nodegroupname,group_nodes) in groups_dict.items():
1043 print('nodegroups:', 'dealing with nodegroup',\
1044 nodegroupname, 'on nodes', group_nodes)
1045 # first, check if the nodetagtype is here
1046 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1048 tag_type_id = tag_types[0]['tag_type_id']
1050 tag_type_id = self.apiserver.AddTagType(auth,
1051 {'tagname' : nodegroupname,
1052 'description' : 'for nodegroup {}'.format(nodegroupname),
1053 'category' : 'test'})
1054 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1056 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1058 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1059 print('created nodegroup', nodegroupname, \
1060 'from tagname', nodegroupname, 'and value', 'yes')
1061 # set node tag on all nodes, value='yes'
1062 for nodename in group_nodes:
1064 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1066 traceback.print_exc()
1067 print('node', nodename, 'seems to already have tag', nodegroupname)
1070 expect_yes = self.apiserver.GetNodeTags(auth,
1071 {'hostname' : nodename,
1072 'tagname' : nodegroupname},
1073 ['value'])[0]['value']
1074 if expect_yes != "yes":
1075 print('Mismatch node tag on node',nodename,'got',expect_yes)
1078 if not self.options.dry_run:
1079 print('Cannot find tag', nodegroupname, 'on node', nodename)
1083 print('cleaning nodegroup', nodegroupname)
1084 self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1086 traceback.print_exc()
1090 # a list of TestNode objs
1091 def all_nodes(self):
1093 for site_spec in self.plc_spec['sites']:
1094 test_site = TestSite(self,site_spec)
1095 for node_spec in site_spec['nodes']:
1096 nodes.append(TestNode(self, test_site, node_spec))
1099 # return a list of tuples (nodename,qemuname)
1100 def all_node_infos(self) :
1102 for site_spec in self.plc_spec['sites']:
1103 node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1104 for node_spec in site_spec['nodes'] ]
1107 def all_nodenames(self):
1108 return [ x[0] for x in self.all_node_infos() ]
1109 def all_reservable_nodenames(self):
1111 for site_spec in self.plc_spec['sites']:
1112 for node_spec in site_spec['nodes']:
1113 node_fields = node_spec['node_fields']
1114 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1115 res.append(node_fields['hostname'])
1118 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1119 def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1120 silent_minutes, period_seconds = 15):
1121 if self.options.dry_run:
1125 class CompleterTaskBootState(CompleterTask):
1126 def __init__(self, test_plc, hostname):
1127 self.test_plc = test_plc
1128 self.hostname = hostname
1129 self.last_boot_state = 'undef'
1130 def actual_run(self):
1132 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1135 self.last_boot_state = node['boot_state']
1136 return self.last_boot_state == target_boot_state
1140 return "CompleterTaskBootState with node {}".format(self.hostname)
1141 def failure_epilogue(self):
1142 print("node {} in state {} - expected {}"\
1143 .format(self.hostname, self.last_boot_state, target_boot_state))
1145 timeout = timedelta(minutes=timeout_minutes)
1146 graceout = timedelta(minutes=silent_minutes)
1147 period = timedelta(seconds=period_seconds)
1148 # the nodes that haven't checked yet - start with a full list and shrink over time
1149 utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1150 tasks = [ CompleterTaskBootState(self,hostname) \
1151 for (hostname,_) in self.all_node_infos() ]
1152 message = 'check_boot_state={}'.format(target_boot_state)
1153 return Completer(tasks, message=message).run(timeout, graceout, period)
1155 def nodes_booted(self):
1156 return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1158 def probe_kvm_iptables(self):
1159 (_,kvmbox) = self.all_node_infos()[0]
1160 TestSsh(kvmbox).run("iptables-save")
1164 def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1165 class CompleterTaskPingNode(CompleterTask):
1166 def __init__(self, hostname):
1167 self.hostname = hostname
1168 def run(self, silent):
1169 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1170 return utils.system(command, silent=silent) == 0
1171 def failure_epilogue(self):
1172 print("Cannot ping node with name {}".format(self.hostname))
1173 timeout = timedelta(seconds = timeout_seconds)
1175 period = timedelta(seconds = period_seconds)
1176 node_infos = self.all_node_infos()
1177 tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1178 return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1180 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1181 def ping_node(self):
1183 return self.check_nodes_ping()
1185 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1187 timeout = timedelta(minutes=timeout_minutes)
1188 graceout = timedelta(minutes=silent_minutes)
1189 period = timedelta(seconds=period_seconds)
1190 vservername = self.vservername
1193 completer_message = 'ssh_node_debug'
1194 local_key = "keys/{vservername}-debug.rsa".format(**locals())
1197 completer_message = 'ssh_node_boot'
1198 local_key = "keys/key_admin.rsa"
1199 utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1200 node_infos = self.all_node_infos()
1201 tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1202 boot_state=message, dry_run=self.options.dry_run) \
1203 for (nodename, qemuname) in node_infos ]
1204 return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1206 def ssh_node_debug(self):
1207 "Tries to ssh into nodes in debug mode with the debug ssh key"
1208 return self.check_nodes_ssh(debug = True,
1209 timeout_minutes = self.ssh_node_debug_timeout,
1210 silent_minutes = self.ssh_node_debug_silent)
1212 def ssh_node_boot(self):
1213 "Tries to ssh into nodes in production mode with the root ssh key"
1214 return self.check_nodes_ssh(debug = False,
1215 timeout_minutes = self.ssh_node_boot_timeout,
1216 silent_minutes = self.ssh_node_boot_silent)
1218 def node_bmlogs(self):
1219 "Checks that there's a non-empty dir. /var/log/bm/raw"
1220 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1223 def qemu_local_init(self): pass
1225 def bootcd(self): pass
1227 def qemu_local_config(self): pass
1229 def qemu_export(self): pass
1231 def qemu_cleanlog(self): pass
1233 def nodestate_reinstall(self): pass
1235 def nodestate_upgrade(self): pass
1237 def nodestate_safeboot(self): pass
1239 def nodestate_boot(self): pass
1241 def nodestate_show(self): pass
1243 def nodedistro_f14(self): pass
1245 def nodedistro_f18(self): pass
1247 def nodedistro_f20(self): pass
1249 def nodedistro_f21(self): pass
1251 def nodedistro_f22(self): pass
1253 def nodedistro_show(self): pass
1255 ### check hooks : invoke scripts from hooks/{node,slice}
1256 def check_hooks_node(self):
1257 return self.locate_first_node().check_hooks()
1258 def check_hooks_sliver(self) :
1259 return self.locate_first_sliver().check_hooks()
1261 def check_hooks(self):
1262 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1263 return self.check_hooks_node() and self.check_hooks_sliver()
1266 def do_check_initscripts(self):
1267 class CompleterTaskInitscript(CompleterTask):
1268 def __init__(self, test_sliver, stamp):
1269 self.test_sliver = test_sliver
1271 def actual_run(self):
1272 return self.test_sliver.check_initscript_stamp(self.stamp)
1274 return "initscript checker for {}".format(self.test_sliver.name())
1275 def failure_epilogue(self):
1276 print("initscript stamp {} not found in sliver {}"\
1277 .format(self.stamp, self.test_sliver.name()))
1280 for slice_spec in self.plc_spec['slices']:
1281 if 'initscriptstamp' not in slice_spec:
1283 stamp = slice_spec['initscriptstamp']
1284 slicename = slice_spec['slice_fields']['name']
1285 for nodename in slice_spec['nodenames']:
1286 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1287 site,node = self.locate_node(nodename)
1288 # xxx - passing the wrong site - probably harmless
1289 test_site = TestSite(self, site)
1290 test_slice = TestSlice(self, test_site, slice_spec)
1291 test_node = TestNode(self, test_site, node)
1292 test_sliver = TestSliver(self, test_node, test_slice)
1293 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1294 return Completer(tasks, message='check_initscripts').\
1295 run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1297 def check_initscripts(self):
1298 "check that the initscripts have triggered"
1299 return self.do_check_initscripts()
1301 def initscripts(self):
1302 "create initscripts with PLCAPI"
1303 for initscript in self.plc_spec['initscripts']:
1304 utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1305 self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1308 def delete_initscripts(self):
1309 "delete initscripts with PLCAPI"
1310 for initscript in self.plc_spec['initscripts']:
1311 initscript_name = initscript['initscript_fields']['name']
1312 print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1314 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1315 print(initscript_name, 'deleted')
1317 print('deletion went wrong - probably did not exist')
1322 "create slices with PLCAPI"
1323 return self.do_slices(action="add")
1325 def delete_slices(self):
1326 "delete slices with PLCAPI"
1327 return self.do_slices(action="delete")
1329 def fill_slices(self):
1330 "add nodes in slices with PLCAPI"
1331 return self.do_slices(action="fill")
1333 def empty_slices(self):
1334 "remove nodes from slices with PLCAPI"
1335 return self.do_slices(action="empty")
1337 def do_slices(self, action="add"):
1338 for slice in self.plc_spec['slices']:
1339 site_spec = self.locate_site(slice['sitename'])
1340 test_site = TestSite(self,site_spec)
1341 test_slice=TestSlice(self,test_site,slice)
1342 if action == "delete":
1343 test_slice.delete_slice()
1344 elif action == "fill":
1345 test_slice.add_nodes()
1346 elif action == "empty":
1347 test_slice.delete_nodes()
1349 test_slice.create_slice()
1352 @slice_mapper__tasks(20, 10, 15)
1353 def ssh_slice(self): pass
1354 @slice_mapper__tasks(20, 19, 15)
1355 def ssh_slice_off(self): pass
1356 @slice_mapper__tasks(1, 1, 15)
1357 def slice_fs_present(self): pass
1358 @slice_mapper__tasks(1, 1, 15)
1359 def slice_fs_deleted(self): pass
1361 # use another name so we can exclude/ignore it from the tests on the nightly command line
1362 def ssh_slice_again(self): return self.ssh_slice()
1363 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1364 # but for some reason the ignore-wrapping thing would not
1367 def ssh_slice_basics(self): pass
1369 def check_vsys_defaults(self): pass
1372 def keys_clear_known_hosts(self): pass
1374 def plcapi_urls(self):
1376 attempts to reach the PLCAPI with various forms for the URL
1378 return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1380 def speed_up_slices(self):
1381 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1382 return self._speed_up_slices (30, 10)
1383 def super_speed_up_slices(self):
1384 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1385 return self._speed_up_slices(5, 1)
1387 def _speed_up_slices(self, p, r):
1388 # create the template on the server-side
1389 template = "{}.nodemanager".format(self.name())
1390 with open(template,"w") as template_file:
1391 template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1392 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1393 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1394 self.test_ssh.copy_abs(template, remote)
1396 if not self.apiserver.GetConfFiles(self.auth_root(),
1397 {'dest' : '/etc/sysconfig/nodemanager'}):
1398 self.apiserver.AddConfFile(self.auth_root(),
1399 {'dest' : '/etc/sysconfig/nodemanager',
1400 'source' : 'PlanetLabConf/nodemanager',
1401 'postinstall_cmd' : 'service nm restart',})
1404 def debug_nodemanager(self):
1405 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1406 template = "{}.nodemanager".format(self.name())
1407 with open(template,"w") as template_file:
1408 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1409 in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1410 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1411 self.test_ssh.copy_abs(template, remote)
1415 def qemu_start(self) : pass
1418 def qemu_timestamp(self) : pass
1421 def qemu_nodefamily(self): pass
1423 # when a spec refers to a node possibly on another plc
1424 def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1425 for plc in [ self ] + other_plcs:
1427 return plc.locate_sliver_obj(nodename, slicename)
1430 raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1432 # implement this one as a cross step so that we can take advantage of different nodes
1433 # in multi-plcs mode
1434 def cross_check_tcp(self, other_plcs):
1435 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1436 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1437 utils.header("check_tcp: no/empty config found")
1439 specs = self.plc_spec['tcp_specs']
1442 # first wait for the network to be up and ready from the slices
1443 class CompleterTaskNetworkReadyInSliver(CompleterTask):
1444 def __init__(self, test_sliver):
1445 self.test_sliver = test_sliver
1446 def actual_run(self):
1447 return self.test_sliver.check_tcp_ready(port = 9999)
1449 return "network ready checker for {}".format(self.test_sliver.name())
1450 def failure_epilogue(self):
1451 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1455 managed_sliver_names = set()
1457 # locate the TestSliver instances involved, and cache them in the spec instance
1458 spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1459 spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1460 message = "Will check TCP between s={} and c={}"\
1461 .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1462 if 'client_connect' in spec:
1463 message += " (using {})".format(spec['client_connect'])
1464 utils.header(message)
1465 # we need to check network presence in both slivers, but also
1466 # avoid to insert a sliver several times
1467 for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1468 if sliver.name() not in managed_sliver_names:
1469 tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1470 # add this sliver's name in the set
1471 managed_sliver_names .update( {sliver.name()} )
1473 # wait for the netork to be OK in all server sides
1474 if not Completer(tasks, message='check for network readiness in slivers').\
1475 run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1478 # run server and client
1482 # the issue here is that we have the server run in background
1483 # and so we have no clue if it took off properly or not
1484 # looks like in some cases it does not
1485 address = spec['s_sliver'].test_node.name()
1486 if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1490 # idem for the client side
1491 # use nodename from located sliver, unless 'client_connect' is set
1492 if 'client_connect' in spec:
1493 destination = spec['client_connect']
1495 destination = spec['s_sliver'].test_node.name()
1496 if not spec['c_sliver'].run_tcp_client(destination, port):
1500 # painfully enough, we need to allow for some time as netflow might show up last
1501 def check_system_slice(self):
1502 "all nodes: check that a system slice is alive"
1503 # netflow currently not working in the lxc distro
1504 # drl not built at all in the wtx distro
1505 # if we find either of them we're happy
1506 return self.check_netflow() or self.check_drl()
1509 def check_netflow(self): return self._check_system_slice('netflow')
1510 def check_drl(self): return self._check_system_slice('drl')
1512 # we have the slices up already here, so it should not take too long
1513 def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1514 class CompleterTaskSystemSlice(CompleterTask):
1515 def __init__(self, test_node, dry_run):
1516 self.test_node = test_node
1517 self.dry_run = dry_run
1518 def actual_run(self):
1519 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1521 return "System slice {} @ {}".format(slicename, self.test_node.name())
1522 def failure_epilogue(self):
1523 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1524 timeout = timedelta(minutes=timeout_minutes)
1525 silent = timedelta(0)
1526 period = timedelta(seconds=period_seconds)
1527 tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1528 for test_node in self.all_nodes() ]
1529 return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1531 def plcsh_stress_test(self):
1532 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1533 # install the stress-test in the plc image
1534 location = "/usr/share/plc_api/plcsh_stress_test.py"
1535 remote = "{}/{}".format(self.vm_root_in_host(), location)
1536 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1538 command += " -- --check"
1539 if self.options.size == 1:
1540 command += " --tiny"
1541 return self.run_in_guest(command) == 0
1543 # populate runs the same utility without slightly different options
1544 # in particular runs with --preserve (dont cleanup) and without --check
1545 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1547 def install_pip2(self):
1550 "http://mirror.onelab.eu/third-party/python2-pip-19.1.1-7.fc33.noarch.rpm",
1554 self.run_in_guest("pip2 --version") == 0
1555 or self.run_in_guest("dnf install python2-pip") == 0
1556 or self.run_in_guest("dnf localinstall -y " + " ".join(replacements)) == 0)
1559 def install_m2crypto(self):
1561 # installing m2crypto for python2 is increasingly difficult
1562 # f29 and f31: dnf install python2-m2crypto
1563 # f33: no longer available but the f31 repos below do the job just fine
1564 # note that using pip2 does not look like a viable option because it does
1565 # an install from sources and that's quite awkward
1568 # no longer on our mirror
1569 "https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/31/Everything/x86_64/os/Packages/p/python2-typing-3.6.2-5.fc31.noarch.rpm",
1570 "https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/31/Everything/x86_64/os/Packages/p/python2-m2crypto-0.35.2-2.fc31.x86_64.rpm",
1574 self.run_in_guest('python2 -c "import M2Crypto"', backslash=True) == 0
1575 or self.run_in_guest("pip2 install python2-m2crypto") == 0
1576 or self.run_in_guest("dnf localinstall -y " + " ".join(replacements)) == 0)
1578 # about pip2: the logic goes like this
1579 # check for pip2 command
1580 # if not, try dnf install python2-pip
1581 # if still not, dnf localinstall the above
1584 def sfa_install_all(self):
1585 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1587 # the rpm/dnf packages named in python2-* are getting deprecated
1588 # we use pip2 instead
1589 # but that's not good for m2crypto
1591 pip_dependencies = [
1592 'sqlalchemy-migrate',
1601 and self.install_m2crypto()
1602 and all((self.run_in_guest(f"pip2 install {dep}") == 0)
1603 for dep in pip_dependencies)
1604 and self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client")
1605 and self.run_in_guest("systemctl enable sfa-registry")==0
1606 and self.run_in_guest("systemctl enable sfa-aggregate")==0)
1608 def sfa_install_core(self):
1610 return self.dnf_install("sfa")
1612 def sfa_install_plc(self):
1613 "yum install sfa-plc"
1614 return self.dnf_install("sfa-plc")
1616 def sfa_install_sfatables(self):
1617 "yum install sfa-sfatables"
1618 return self.dnf_install("sfa-sfatables")
1620 # for some very odd reason, this sometimes fails with the following symptom
1621 # # yum install sfa-client
1622 # Setting up Install Process
1624 # Downloading Packages:
1625 # Running rpm_check_debug
1626 # Running Transaction Test
1627 # Transaction Test Succeeded
1628 # Running Transaction
1629 # Transaction couldn't start:
1630 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1631 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1632 # even though in the same context I have
1633 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1634 # Filesystem Size Used Avail Use% Mounted on
1635 # /dev/hdv1 806G 264G 501G 35% /
1636 # none 16M 36K 16M 1% /tmp
1638 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1639 def sfa_install_client(self):
1640 "yum install sfa-client"
1641 first_try = self.dnf_install("sfa-client")
1644 utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1645 code, cached_rpm_path = \
1646 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1647 utils.header("rpm_path=<<{}>>".format(rpm_path))
1649 self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1650 return self.dnf_check_installed("sfa-client")
1652 def sfa_dbclean(self):
1653 "thoroughly wipes off the SFA database"
1654 return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1655 self.run_in_guest("sfa-nuke.py") == 0 or \
1656 self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1657 self.run_in_guest("sfaadmin registry nuke") == 0
1659 def sfa_fsclean(self):
1660 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1661 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1664 def sfa_plcclean(self):
1665 "cleans the PLC entries that were created as a side effect of running the script"
1667 sfa_spec = self.plc_spec['sfa']
1669 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1670 login_base = auth_sfa_spec['login_base']
1672 self.apiserver.DeleteSite(self.auth_root(),login_base)
1674 print("Site {} already absent from PLC db".format(login_base))
1676 for spec_name in ['pi_spec', 'user_spec']:
1677 user_spec = auth_sfa_spec[spec_name]
1678 username = user_spec['email']
1680 self.apiserver.DeletePerson(self.auth_root(),username)
1682 # this in fact is expected as sites delete their members
1683 #print "User {} already absent from PLC db".format(username)
1686 print("REMEMBER TO RUN sfa_import AGAIN")
1689 def sfa_uninstall(self):
1690 "uses rpm to uninstall sfa - ignore result"
1691 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1692 self.run_in_guest("rm -rf /var/lib/sfa")
1693 self.run_in_guest("rm -rf /etc/sfa")
1694 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1696 self.run_in_guest("rpm -e --noscripts sfa-plc")
1699 ### run unit tests for SFA
1700 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1701 # Running Transaction
1702 # Transaction couldn't start:
1703 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1704 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1705 # no matter how many Gbs are available on the testplc
1706 # could not figure out what's wrong, so...
1707 # if the yum install phase fails, consider the test is successful
1708 # other combinations will eventually run it hopefully
1709 def sfa_utest(self):
1710 "dnf install sfa-tests and run SFA unittests"
1711 self.run_in_guest("dnf -y install sfa-tests")
1712 # failed to install - forget it
1713 if self.run_in_guest("rpm -q sfa-tests") != 0:
1714 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1716 return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1720 dirname = "conf.{}".format(self.plc_spec['name'])
1721 if not os.path.isdir(dirname):
1722 utils.system("mkdir -p {}".format(dirname))
1723 if not os.path.isdir(dirname):
1724 raise Exception("Cannot create config dir for plc {}".format(self.name()))
1727 def conffile(self, filename):
1728 return "{}/{}".format(self.confdir(), filename)
1729 def confsubdir(self, dirname, clean, dry_run=False):
1730 subdirname = "{}/{}".format(self.confdir(), dirname)
1732 utils.system("rm -rf {}".format(subdirname))
1733 if not os.path.isdir(subdirname):
1734 utils.system("mkdir -p {}".format(subdirname))
1735 if not dry_run and not os.path.isdir(subdirname):
1736 raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1739 def conffile_clean(self, filename):
1740 filename=self.conffile(filename)
1741 return utils.system("rm -rf {}".format(filename))==0
1744 def sfa_configure(self):
1745 "run sfa-config-tty"
1746 tmpname = self.conffile("sfa-config-tty")
1747 with open(tmpname,'w') as fileconf:
1748 for var, value in self.plc_spec['sfa']['settings'].items():
1749 fileconf.write('e {}\n{}\n'.format(var, value))
1750 fileconf.write('w\n')
1751 fileconf.write('R\n')
1752 fileconf.write('q\n')
1753 utils.system('cat {}'.format(tmpname))
1754 self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1757 def aggregate_xml_line(self):
1758 port = self.plc_spec['sfa']['neighbours-port']
1759 return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1760 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1762 def registry_xml_line(self):
1763 return '<registry addr="{}" hrn="{}" port="12345"/>'\
1764 .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1767 # a cross step that takes all other plcs in argument
1768 def cross_sfa_configure(self, other_plcs):
1769 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1770 # of course with a single plc, other_plcs is an empty list
1773 agg_fname = self.conffile("agg.xml")
1774 with open(agg_fname,"w") as out:
1775 out.write("<aggregates>{}</aggregates>\n"\
1776 .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1777 utils.header("(Over)wrote {}".format(agg_fname))
1778 reg_fname=self.conffile("reg.xml")
1779 with open(reg_fname,"w") as out:
1780 out.write("<registries>{}</registries>\n"\
1781 .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1782 utils.header("(Over)wrote {}".format(reg_fname))
1783 return self.test_ssh.copy_abs(agg_fname,
1784 '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1785 and self.test_ssh.copy_abs(reg_fname,
1786 '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1788 def sfa_import(self):
1789 "use sfaadmin to import from plc"
1790 auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1791 return self.run_in_guest('sfaadmin reg import_registry') == 0
1793 def sfa_start(self):
1794 "start SFA through systemctl - also install dependencies"
1796 return (self.start_stop_systemd('sfa-registry', 'start')
1797 and self.start_stop_systemd('sfa-aggregate', 'start'))
1800 def sfi_configure(self):
1801 "Create /root/sfi on the plc side for sfi client configuration"
1802 if self.options.dry_run:
1803 utils.header("DRY RUN - skipping step")
1805 sfa_spec = self.plc_spec['sfa']
1806 # cannot use auth_sfa_mapper to pass dir_name
1807 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1808 test_slice = TestAuthSfa(self, slice_spec)
1809 dir_basename = os.path.basename(test_slice.sfi_path())
1810 dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1811 clean=True, dry_run=self.options.dry_run)
1812 test_slice.sfi_configure(dir_name)
1813 # push into the remote /root/sfi area
1814 location = test_slice.sfi_path()
1815 remote = "{}/{}".format(self.vm_root_in_host(), location)
1816 self.test_ssh.mkdir(remote, abs=True)
1817 # need to strip last level or remote otherwise we get an extra dir level
1818 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1822 def sfi_clean(self):
1823 "clean up /root/sfi on the plc side"
1824 self.run_in_guest("rm -rf /root/sfi")
1827 def sfa_rspec_empty(self):
1828 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1829 filename = "empty-rspec.xml"
1831 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1832 test_slice = TestAuthSfa(self, slice_spec)
1833 in_vm = test_slice.sfi_path()
1834 remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1835 if self.test_ssh.copy_abs(filename, remote) !=0:
1840 def sfa_register_site(self): pass
1842 def sfa_register_pi(self): pass
1844 def sfa_register_user(self): pass
1846 def sfa_update_user(self): pass
1848 def sfa_register_slice(self): pass
1850 def sfa_renew_slice(self): pass
1852 def sfa_get_expires(self): pass
1854 def sfa_discover(self): pass
1856 def sfa_rspec(self): pass
1858 def sfa_allocate(self): pass
1860 def sfa_allocate_empty(self): pass
1862 def sfa_provision(self): pass
1864 def sfa_provision_empty(self): pass
1866 def sfa_describe(self): pass
1868 def sfa_check_slice_plc(self): pass
1870 def sfa_check_slice_plc_empty(self): pass
1872 def sfa_update_slice(self): pass
1874 def sfa_remove_user_from_slice(self): pass
1876 def sfa_insert_user_in_slice(self): pass
1878 def sfi_list(self): pass
1880 def sfi_show_site(self): pass
1882 def sfi_show_slice(self): pass
1884 def sfi_show_slice_researchers(self): pass
1886 def ssh_slice_sfa(self): pass
1888 def sfa_delete_user(self): pass
1890 def sfa_delete_slice(self): pass
1893 "stop sfa through systemclt"
1894 return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1895 self.start_stop_systemd('sfa-registry', 'stop'))
1898 "creates random entries in the PLCAPI"
1899 # install the stress-test in the plc image
1900 location = "/usr/share/plc_api/plcsh_stress_test.py"
1901 remote = "{}/{}".format(self.vm_root_in_host(), location)
1902 self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1904 command += " -- --preserve --short-names"
1905 local = (self.run_in_guest(command) == 0);
1906 # second run with --foreign
1907 command += ' --foreign'
1908 remote = (self.run_in_guest(command) == 0);
1909 return local and remote
1912 ####################
1914 def bonding_init_partial(self): pass
1917 def bonding_add_yum(self): pass
1920 def bonding_install_rpms(self): pass
1922 ####################
1924 def gather_logs(self):
1925 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1926 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1927 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1928 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1929 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1930 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1931 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1933 print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1934 self.gather_var_logs()
1936 print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1937 self.gather_pgsql_logs()
1939 print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1940 self.gather_root_sfi()
1942 print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1943 for site_spec in self.plc_spec['sites']:
1944 test_site = TestSite(self,site_spec)
1945 for node_spec in site_spec['nodes']:
1946 test_node = TestNode(self, test_site, node_spec)
1947 test_node.gather_qemu_logs()
1949 print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1950 self.gather_nodes_var_logs()
1952 print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1953 self.gather_slivers_var_logs()
1956 def gather_slivers_var_logs(self):
1957 for test_sliver in self.all_sliver_objs():
1958 remote = test_sliver.tar_var_logs()
1959 utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1960 command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1961 utils.system(command)
1964 def gather_var_logs(self):
1965 utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1966 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1967 command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1968 utils.system(command)
1969 command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1970 utils.system(command)
1972 def gather_pgsql_logs(self):
1973 utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1974 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1975 command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1976 utils.system(command)
1978 def gather_root_sfi(self):
1979 utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1980 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1981 command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1982 utils.system(command)
1984 def gather_nodes_var_logs(self):
1985 for site_spec in self.plc_spec['sites']:
1986 test_site = TestSite(self, site_spec)
1987 for node_spec in site_spec['nodes']:
1988 test_node = TestNode(self, test_site, node_spec)
1989 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1990 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1991 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1992 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1993 utils.system(command)
1996 # returns the filename to use for sql dump/restore, using options.dbname if set
1997 def dbfile(self, database):
1998 # uses options.dbname if it is found
2000 name = self.options.dbname
2001 if not isinstance(name, str):
2007 return "/root/{}-{}.sql".format(database, name)
2009 def plc_db_dump(self):
2010 'dump the planetlab5 DB in /root in the PLC - filename has time'
2011 dump=self.dbfile("planetab5")
2012 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
2013 utils.header('Dumped planetlab5 database in {}'.format(dump))
2016 def plc_db_restore(self):
2017 'restore the planetlab5 DB - looks broken, but run -n might help'
2018 dump = self.dbfile("planetab5")
2019 self.run_in_guest('systemctl stop httpd')
2020 # xxx - need another wrapper
2021 self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
2022 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
2023 self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
2024 ##starting httpd service
2025 self.run_in_guest('systemctl start httpd')
2027 utils.header('Database restored from ' + dump)
2030 def create_ignore_steps():
2031 for step in TestPlc.default_steps + TestPlc.other_steps:
2032 # default step can have a plc qualifier
2034 step, qualifier = step.split('@')
2035 # or be defined as forced or ignored by default
2036 for keyword in ['_ignore', '_force']:
2037 if step.endswith(keyword):
2038 step=step.replace(keyword,'')
2039 if step == SEP or step == SEPSFA :
2041 method = getattr(TestPlc,step)
2042 name = step + '_ignore'
2043 wrapped = ignore_result(method)
2044 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
2045 setattr(TestPlc, name, wrapped)
2048 # def ssh_slice_again_ignore (self): pass
2050 # def check_initscripts_ignore (self): pass
2052 def standby_1_through_20(self):
2053 """convenience function to wait for a specified number of minutes"""
2056 def standby_1(): pass
2058 def standby_2(): pass
2060 def standby_3(): pass
2062 def standby_4(): pass
2064 def standby_5(): pass
2066 def standby_6(): pass
2068 def standby_7(): pass
2070 def standby_8(): pass
2072 def standby_9(): pass
2074 def standby_10(): pass
2076 def standby_11(): pass
2078 def standby_12(): pass
2080 def standby_13(): pass
2082 def standby_14(): pass
2084 def standby_15(): pass
2086 def standby_16(): pass
2088 def standby_17(): pass
2090 def standby_18(): pass
2092 def standby_19(): pass
2094 def standby_20(): pass
2096 # convenience for debugging the test logic
2097 def yes(self): return True
2098 def no(self): return False
2099 def fail(self): return False