tcptest now explicitly passes a hostname along to the server side (before this change...
[tests.git] / system / TestPlc.py
1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
3 #
4 import sys
5 import time
6 import os, os.path
7 import traceback
8 import socket
9 from datetime import datetime, timedelta
10
11 import utils
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24
25 from TestBonding import TestBonding
26
27 has_sfa_cache_filename="sfa-cache"
28
29 # step methods must take (self) and return a boolean (options is a member of the class)
30
31 def standby(minutes, dry_run):
32     utils.header('Entering StandBy for {:d} mn'.format(minutes))
33     if dry_run:
34         print('dry_run')
35     else:
36         time.sleep(60*minutes)
37     return True
38
39 def standby_generic(func):
40     def actual(self):
41         minutes = int(func.__name__.split("_")[1])
42         return standby(minutes, self.options.dry_run)
43     return actual
44
45 def node_mapper(method):
46     def map_on_nodes(self, *args, **kwds):
47         overall = True
48         node_method = TestNode.__dict__[method.__name__]
49         for test_node in self.all_nodes():
50             if not node_method(test_node, *args, **kwds):
51                 overall=False
52         return overall
53     # maintain __name__ for ignore_result
54     map_on_nodes.__name__ = method.__name__
55     # restore the doc text
56     map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
57     return map_on_nodes
58
59 def slice_mapper(method):
60     def map_on_slices(self):
61         overall = True
62         slice_method = TestSlice.__dict__[method.__name__]
63         for slice_spec in self.plc_spec['slices']:
64             site_spec = self.locate_site (slice_spec['sitename'])
65             test_site = TestSite(self,site_spec)
66             test_slice = TestSlice(self,test_site,slice_spec)
67             if not slice_method(test_slice, self.options):
68                 overall=False
69         return overall
70     # maintain __name__ for ignore_result
71     map_on_slices.__name__ = method.__name__
72     # restore the doc text
73     map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
74     return map_on_slices
75
76 def bonding_redirector(method):
77     bonding_name = method.__name__.replace('bonding_', '')
78     def redirect(self):
79         bonding_method = TestBonding.__dict__[bonding_name]
80         return bonding_method(self.test_bonding)
81     # maintain __name__ for ignore_result
82     redirect.__name__ = method.__name__
83     # restore the doc text
84     redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
85     return redirect
86
87 # run a step but return True so that we can go on
88 def ignore_result(method):
89     def ignoring(self):
90         # ssh_slice_ignore->ssh_slice
91         ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92         ref_method = TestPlc.__dict__[ref_name]
93         result = ref_method(self)
94         print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95         return Ignored(result)
96     name = method.__name__.replace('_ignore', '').replace('force_', '')
97     ignoring.__name__ = name
98     ignoring.__doc__ = "ignored version of " + name
99     return ignoring
100
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106     # could not get this to work with named arguments
107     def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108         self.timeout = timedelta(minutes = timeout_minutes)
109         self.silent = timedelta(minutes = silent_minutes)
110         self.period = timedelta(seconds = period_seconds)
111     def __call__(self, method):
112         decorator_self=self
113         # compute augmented method name
114         method_name = method.__name__ + "__tasks"
115         # locate in TestSlice
116         slice_method = TestSlice.__dict__[ method_name ]
117         def wrappee(self):
118             tasks=[]
119             for slice_spec in self.plc_spec['slices']:
120                 site_spec = self.locate_site (slice_spec['sitename'])
121                 test_site = TestSite(self, site_spec)
122                 test_slice = TestSlice(self, test_site, slice_spec)
123                 tasks += slice_method (test_slice, self.options)
124             return Completer (tasks, message=method.__name__).\
125                 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126         # restore the doc text from the TestSlice method even if a bit odd
127         wrappee.__name__ = method.__name__
128         wrappee.__doc__ = slice_method.__doc__
129         return wrappee
130
131 def auth_sfa_mapper(method):
132     def actual(self):
133         overall = True
134         auth_method = TestAuthSfa.__dict__[method.__name__]
135         for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136             test_auth = TestAuthSfa(self, auth_spec)
137             if not auth_method(test_auth, self.options):
138                 overall=False
139         return overall
140     # restore the doc text
141     actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
142     return actual
143
144 class Ignored:
145     def __init__(self, result):
146         self.result = result
147
148 SEP = '<sep>'
149 SEPSFA = '<sep_sfa>'
150
151 class TestPlc:
152
153     default_steps = [
154         'show', SEP,
155         'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156         'plc_install', 'plc_configure', 'plc_start', SEP,
157         'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158         'plcapi_urls','speed_up_slices', SEP,
159         'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162         'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164         'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165         'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
166         'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
167         'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
168         'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
169         'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
170         'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
171         'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
172         'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
173         'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
174         'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
175         'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
176         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
177         # but as the stress test might take a while, we sometimes missed the debug mode..
178         'probe_kvm_iptables',
179         'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
180         'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
181         'ssh_slice_sfa@1', SEPSFA,
182         'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
183         'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
184         'cross_check_tcp@1', 'check_system_slice', SEP,
185         # for inspecting the slice while it runs the first time
186         #'fail',
187         # check slices are turned off properly
188         'debug_nodemanager',
189         'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
190         # check they are properly re-created with the same name
191         'fill_slices', 'ssh_slice_again', SEP,
192         'gather_logs_force', SEP,
193         ]
194     other_steps = [
195         'export', 'show_boxes', 'super_speed_up_slices', SEP,
196         'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
197         'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
198         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
199         'delete_leases', 'list_leases', SEP,
200         'populate', SEP,
201         'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
202         'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
203         'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
204         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
205         'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
206         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
207         'sfa_get_expires', SEPSFA,
208         'plc_db_dump' , 'plc_db_restore', SEP,
209         'check_netflow','check_drl', SEP,
210         'slice_fs_present', 'check_initscripts', SEP,
211         'standby_1_through_20','yes','no',SEP,
212         'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
213         ]
214     default_bonding_steps = [
215         'bonding_init_partial',
216         'bonding_add_yum',
217         'bonding_install_rpms', SEP,
218         ]
219
220     @staticmethod
221     def printable_steps(list):
222         single_line = " ".join(list) + " "
223         return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
224     @staticmethod
225     def valid_step(step):
226         return step != SEP and step != SEPSFA
227
228     # turn off the sfa-related steps when build has skipped SFA
229     # this was originally for centos5 but is still valid
230     # for up to f12 as recent SFAs with sqlalchemy won't build before f14
231     @staticmethod
232     def _has_sfa_cached(rpms_url):
233         if os.path.isfile(has_sfa_cache_filename):
234             with open(has_sfa_cache_filename) as cache:
235                 cached = cache.read() == "yes"
236             utils.header("build provides SFA (cached):{}".format(cached))
237             return cached
238         # warning, we're now building 'sface' so let's be a bit more picky
239         # full builds are expected to return with 0 here
240         utils.header("Checking if build provides SFA package...")
241         retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
242         encoded = 'yes' if retcod else 'no'
243         with open(has_sfa_cache_filename,'w') as cache:
244             cache.write(encoded)
245         return retcod
246
247     @staticmethod
248     def check_whether_build_has_sfa(rpms_url):
249         has_sfa = TestPlc._has_sfa_cached(rpms_url)
250         if has_sfa:
251             utils.header("build does provide SFA")
252         else:
253             # move all steps containing 'sfa' from default_steps to other_steps
254             utils.header("SFA package not found - removing steps with sfa or sfi")
255             sfa_steps = [ step for step in TestPlc.default_steps
256                           if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
257             TestPlc.other_steps += sfa_steps
258             for step in sfa_steps:
259                 TestPlc.default_steps.remove(step)
260
261     def __init__(self, plc_spec, options):
262         self.plc_spec = plc_spec
263         self.options = options
264         self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
265         self.vserverip = plc_spec['vserverip']
266         self.vservername = plc_spec['vservername']
267         self.vplchostname = self.vservername.split('-')[-1]
268         self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
269         self.apiserver = TestApiserver(self.url, options.dry_run)
270         (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
271         (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
272
273     def has_addresses_api(self):
274         return self.apiserver.has_method('AddIpAddress')
275
276     def name(self):
277         name = self.plc_spec['name']
278         return "{}.{}".format(name,self.vservername)
279
280     def hostname(self):
281         return self.plc_spec['host_box']
282
283     def is_local(self):
284         return self.test_ssh.is_local()
285
286     # define the API methods on this object through xmlrpc
287     # would help, but not strictly necessary
288     def connect(self):
289         pass
290
291     def actual_command_in_guest(self,command, backslash=False):
292         raw1 = self.host_to_guest(command)
293         raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
294         return raw2
295
296     def start_guest(self):
297       return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
298                                                        dry_run=self.options.dry_run))
299
300     def stop_guest(self):
301       return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
302                                                        dry_run=self.options.dry_run))
303
304     def run_in_guest(self, command, backslash=False):
305         raw = self.actual_command_in_guest(command, backslash)
306         return utils.system(raw)
307
308     def run_in_host(self,command):
309         return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
310
311     # backslashing turned out so awful at some point that I've turned off auto-backslashing
312     # see e.g. plc_start esp. the version for f14
313     #command gets run in the plc's vm
314     def host_to_guest(self, command):
315         ssh_leg = TestSsh(self.vplchostname)
316         return ssh_leg.actual_command(command, keep_stdin=True)
317
318     # this /vservers thing is legacy...
319     def vm_root_in_host(self):
320         return "/vservers/{}/".format(self.vservername)
321
322     def vm_timestamp_path(self):
323         return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
324
325     #start/stop the vserver
326     def start_guest_in_host(self):
327         return "virsh -c lxc:/// start {}".format(self.vservername)
328
329     def stop_guest_in_host(self):
330         return "virsh -c lxc:/// destroy {}".format(self.vservername)
331
332     # xxx quick n dirty
333     def run_in_guest_piped(self,local,remote):
334         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
335                                                                      keep_stdin = True))
336
337     def yum_check_installed(self, rpms):
338         if isinstance(rpms, list):
339             rpms=" ".join(rpms)
340         return self.run_in_guest("rpm -q {}".format(rpms)) == 0
341
342     # does a yum install in the vs, ignore yum retcod, check with rpm
343     def yum_install(self, rpms):
344         if isinstance(rpms, list):
345             rpms=" ".join(rpms)
346         yum_mode = self.run_in_guest("yum -y install {}".format(rpms))
347         if yum_mode != 0:
348             self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
349         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
350         self.run_in_guest("yum-complete-transaction -y")
351         return self.yum_check_installed(rpms)
352
353     def auth_root(self):
354         return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
355                 'AuthMethod' : 'password',
356                 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
357                 'Role'       : self.plc_spec['role'],
358                 }
359
360     def locate_site(self,sitename):
361         for site in self.plc_spec['sites']:
362             if site['site_fields']['name'] == sitename:
363                 return site
364             if site['site_fields']['login_base'] == sitename:
365                 return site
366         raise Exception("Cannot locate site {}".format(sitename))
367
368     def locate_node(self, nodename):
369         for site in self.plc_spec['sites']:
370             for node in site['nodes']:
371                 if node['name'] == nodename:
372                     return site, node
373         raise Exception("Cannot locate node {}".format(nodename))
374
375     def locate_hostname(self, hostname):
376         for site in self.plc_spec['sites']:
377             for node in site['nodes']:
378                 if node['node_fields']['hostname'] == hostname:
379                     return(site, node)
380         raise Exception("Cannot locate hostname {}".format(hostname))
381
382     def locate_key(self, key_name):
383         for key in self.plc_spec['keys']:
384             if key['key_name'] == key_name:
385                 return key
386         raise Exception("Cannot locate key {}".format(key_name))
387
388     def locate_private_key_from_key_names(self, key_names):
389         # locate the first avail. key
390         found = False
391         for key_name in key_names:
392             key_spec = self.locate_key(key_name)
393             test_key = TestKey(self,key_spec)
394             publickey = test_key.publicpath()
395             privatekey = test_key.privatepath()
396             if os.path.isfile(publickey) and os.path.isfile(privatekey):
397                 found = True
398         if found:
399             return privatekey
400         else:
401             return None
402
403     def locate_slice(self, slicename):
404         for slice in self.plc_spec['slices']:
405             if slice['slice_fields']['name'] == slicename:
406                 return slice
407         raise Exception("Cannot locate slice {}".format(slicename))
408
409     def all_sliver_objs(self):
410         result = []
411         for slice_spec in self.plc_spec['slices']:
412             slicename = slice_spec['slice_fields']['name']
413             for nodename in slice_spec['nodenames']:
414                 result.append(self.locate_sliver_obj(nodename, slicename))
415         return result
416
417     def locate_sliver_obj(self, nodename, slicename):
418         site,node = self.locate_node(nodename)
419         slice = self.locate_slice(slicename)
420         # build objects
421         test_site = TestSite(self, site)
422         test_node = TestNode(self, test_site, node)
423         # xxx the slice site is assumed to be the node site - mhh - probably harmless
424         test_slice = TestSlice(self, test_site, slice)
425         return TestSliver(self, test_node, test_slice)
426
427     def locate_first_node(self):
428         nodename = self.plc_spec['slices'][0]['nodenames'][0]
429         site,node = self.locate_node(nodename)
430         test_site = TestSite(self, site)
431         test_node = TestNode(self, test_site, node)
432         return test_node
433
434     def locate_first_sliver(self):
435         slice_spec = self.plc_spec['slices'][0]
436         slicename = slice_spec['slice_fields']['name']
437         nodename = slice_spec['nodenames'][0]
438         return self.locate_sliver_obj(nodename,slicename)
439
440     # all different hostboxes used in this plc
441     def get_BoxNodes(self):
442         # maps on sites and nodes, return [ (host_box,test_node) ]
443         tuples = []
444         for site_spec in self.plc_spec['sites']:
445             test_site = TestSite(self,site_spec)
446             for node_spec in site_spec['nodes']:
447                 test_node = TestNode(self, test_site, node_spec)
448                 if not test_node.is_real():
449                     tuples.append( (test_node.host_box(),test_node) )
450         # transform into a dict { 'host_box' -> [ test_node .. ] }
451         result = {}
452         for (box,node) in tuples:
453             if box not in result:
454                 result[box] = [node]
455             else:
456                 result[box].append(node)
457         return result
458
459     # a step for checking this stuff
460     def show_boxes(self):
461         'print summary of nodes location'
462         for box,nodes in self.get_BoxNodes().items():
463             print(box,":"," + ".join( [ node.name() for node in nodes ] ))
464         return True
465
466     # make this a valid step
467     def qemu_kill_all(self):
468         'kill all qemu instances on the qemu boxes involved by this setup'
469         # this is the brute force version, kill all qemus on that host box
470         for (box,nodes) in self.get_BoxNodes().items():
471             # pass the first nodename, as we don't push template-qemu on testboxes
472             nodedir = nodes[0].nodedir()
473             TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
474         return True
475
476     # make this a valid step
477     def qemu_list_all(self):
478         'list all qemu instances on the qemu boxes involved by this setup'
479         for box,nodes in self.get_BoxNodes().items():
480             # this is the brute force version, kill all qemus on that host box
481             TestBoxQemu(box, self.options.buildname).qemu_list_all()
482         return True
483
484     # kill only the qemus related to this test
485     def qemu_list_mine(self):
486         'list qemu instances for our nodes'
487         for (box,nodes) in self.get_BoxNodes().items():
488             # the fine-grain version
489             for node in nodes:
490                 node.list_qemu()
491         return True
492
493     # kill only the qemus related to this test
494     def qemu_clean_mine(self):
495         'cleanup (rm -rf) qemu instances for our nodes'
496         for box,nodes in self.get_BoxNodes().items():
497             # the fine-grain version
498             for node in nodes:
499                 node.qemu_clean()
500         return True
501
502     # kill only the right qemus
503     def qemu_kill_mine(self):
504         'kill the qemu instances for our nodes'
505         for box,nodes in self.get_BoxNodes().items():
506             # the fine-grain version
507             for node in nodes:
508                 node.kill_qemu()
509         return True
510
511     #################### display config
512     def show(self):
513         "show test configuration after localization"
514         self.show_pass(1)
515         self.show_pass(2)
516         return True
517
518     # uggly hack to make sure 'run export' only reports about the 1st plc
519     # to avoid confusion - also we use 'inri_slice1' in various aliases..
520     exported_id = 1
521     def export(self):
522         "print cut'n paste-able stuff to export env variables to your shell"
523         # guess local domain from hostname
524         if TestPlc.exported_id > 1:
525             print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
526             return True
527         TestPlc.exported_id += 1
528         domain = socket.gethostname().split('.',1)[1]
529         fqdn   = "{}.{}".format(self.plc_spec['host_box'], domain)
530         print("export BUILD={}".format(self.options.buildname))
531         print("export PLCHOSTLXC={}".format(fqdn))
532         print("export GUESTNAME={}".format(self.vservername))
533         print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
534         # find hostname of first node
535         hostname, qemubox = self.all_node_infos()[0]
536         print("export KVMHOST={}.{}".format(qemubox, domain))
537         print("export NODE={}".format(hostname))
538         return True
539
540     # entry point
541     always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
542     def show_pass(self, passno):
543         for (key,val) in self.plc_spec.items():
544             if not self.options.verbose and key not in TestPlc.always_display_keys:
545                 continue
546             if passno == 2:
547                 if key == 'sites':
548                     for site in val:
549                         self.display_site_spec(site)
550                         for node in site['nodes']:
551                             self.display_node_spec(node)
552                 elif key == 'initscripts':
553                     for initscript in val:
554                         self.display_initscript_spec(initscript)
555                 elif key == 'slices':
556                     for slice in val:
557                         self.display_slice_spec(slice)
558                 elif key == 'keys':
559                     for key in val:
560                         self.display_key_spec(key)
561             elif passno == 1:
562                 if key not in ['sites', 'initscripts', 'slices', 'keys']:
563                     print('+   ', key, ':', val)
564
565     def display_site_spec(self, site):
566         print('+ ======== site', site['site_fields']['name'])
567         for k,v in site.items():
568             if not self.options.verbose and k not in TestPlc.always_display_keys:
569                 continue
570             if k == 'nodes':
571                 if v:
572                     print('+       ','nodes : ', end=' ')
573                     for node in v:
574                         print(node['node_fields']['hostname'],'', end=' ')
575                     print('')
576             elif k == 'users':
577                 if v:
578                     print('+       users : ', end=' ')
579                     for user in v:
580                         print(user['name'],'', end=' ')
581                     print('')
582             elif k == 'site_fields':
583                 print('+       login_base', ':', v['login_base'])
584             elif k == 'address_fields':
585                 pass
586             else:
587                 print('+       ', end=' ')
588                 utils.pprint(k, v)
589
590     def display_initscript_spec(self, initscript):
591         print('+ ======== initscript', initscript['initscript_fields']['name'])
592
593     def display_key_spec(self, key):
594         print('+ ======== key', key['key_name'])
595
596     def display_slice_spec(self, slice):
597         print('+ ======== slice', slice['slice_fields']['name'])
598         for k,v in slice.items():
599             if k == 'nodenames':
600                 if v:
601                     print('+       nodes : ', end=' ')
602                     for nodename in v:
603                         print(nodename,'', end=' ')
604                     print('')
605             elif k == 'usernames':
606                 if v:
607                     print('+       users : ', end=' ')
608                     for username in v:
609                         print(username,'', end=' ')
610                     print('')
611             elif k == 'slice_fields':
612                 print('+       fields',':', end=' ')
613                 print('max_nodes=',v['max_nodes'], end=' ')
614                 print('')
615             else:
616                 print('+       ',k,v)
617
618     def display_node_spec(self, node):
619         print("+           node={} host_box={}".format(node['name'], node['host_box']), end=' ')
620         print("hostname=", node['node_fields']['hostname'], end=' ')
621         print("ip=", node['interface_fields']['ip'])
622         if self.options.verbose:
623             utils.pprint("node details", node, depth=3)
624
625     # another entry point for just showing the boxes involved
626     def display_mapping(self):
627         TestPlc.display_mapping_plc(self.plc_spec)
628         return True
629
630     @staticmethod
631     def display_mapping_plc(plc_spec):
632         print('+ MyPLC',plc_spec['name'])
633         # WARNING this would not be right for lxc-based PLC's - should be harmless though
634         print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
635         print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
636         for site_spec in plc_spec['sites']:
637             for node_spec in site_spec['nodes']:
638                 TestPlc.display_mapping_node(node_spec)
639
640     @staticmethod
641     def display_mapping_node(node_spec):
642         print('+   NODE {}'.format(node_spec['name']))
643         print('+\tqemu box {}'.format(node_spec['host_box']))
644         print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
645
646     # write a timestamp in /vservers/<>.timestamp
647     # cannot be inside the vserver, that causes vserver .. build to cough
648     def plcvm_timestamp(self):
649         "Create a timestamp to remember creation date for this plc"
650         now = int(time.time())
651         # TODO-lxc check this one
652         # a first approx. is to store the timestamp close to the VM root like vs does
653         stamp_path = self.vm_timestamp_path()
654         stamp_dir = os.path.dirname(stamp_path)
655         utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
656         return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
657
658     # this is called inconditionnally at the beginning of the test sequence
659     # just in case this is a rerun, so if the vm is not running it's fine
660     def plcvm_delete(self):
661         "vserver delete the test myplc"
662         stamp_path = self.vm_timestamp_path()
663         self.run_in_host("rm -f {}".format(stamp_path))
664         self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
665         self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
666         self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
667         return True
668
669     ### install
670     # historically the build was being fetched by the tests
671     # now the build pushes itself as a subdir of the tests workdir
672     # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
673     def plcvm_create(self):
674         "vserver creation (no install done)"
675         # push the local build/ dir to the testplc box
676         if self.is_local():
677             # a full path for the local calls
678             build_dir = os.path.dirname(sys.argv[0])
679             # sometimes this is empty - set to "." in such a case
680             if not build_dir:
681                 build_dir="."
682             build_dir += "/build"
683         else:
684             # use a standard name - will be relative to remote buildname
685             build_dir = "build"
686             # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
687             self.test_ssh.rmdir(build_dir)
688             self.test_ssh.copy(build_dir, recursive=True)
689         # the repo url is taken from arch-rpms-url
690         # with the last step (i386) removed
691         repo_url = self.options.arch_rpms_url
692         for level in [ 'arch' ]:
693             repo_url = os.path.dirname(repo_url)
694
695         # invoke initvm (drop support for vs)
696         script = "lbuild-initvm.sh"
697         script_options = ""
698         # pass the vbuild-nightly options to [lv]test-initvm
699         script_options += " -p {}".format(self.options.personality)
700         script_options += " -d {}".format(self.options.pldistro)
701         script_options += " -f {}".format(self.options.fcdistro)
702         script_options += " -r {}".format(repo_url)
703         vserver_name = self.vservername
704         try:
705             vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
706             script_options += " -n {}".format(vserver_hostname)
707         except:
708             print("Cannot reverse lookup {}".format(self.vserverip))
709             print("This is considered fatal, as this might pollute the test results")
710             return False
711         create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
712         return self.run_in_host(create_vserver) == 0
713
714     ### install_rpm
715     def plc_install(self):
716         """
717         yum install myplc, noderepo
718         """
719
720         # compute nodefamily
721         if self.options.personality == "linux32":
722             arch = "i386"
723         elif self.options.personality == "linux64":
724             arch = "x86_64"
725         else:
726             raise Exception("Unsupported personality {}".format(self.options.personality))
727         nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
728
729         pkgs_list=[]
730         pkgs_list.append("slicerepo-{}".format(nodefamily))
731         pkgs_list.append("myplc")
732         pkgs_list.append("noderepo-{}".format(nodefamily))
733         pkgs_string=" ".join(pkgs_list)
734         return self.yum_install(pkgs_list)
735
736     def install_syslinux6(self):
737         """
738         install syslinux6 from the fedora21 release
739         """
740         key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
741
742         rpms = [
743             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
744             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
745             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
746         ]
747         # this can be done several times
748         self.run_in_guest("rpm --import {key}".format(**locals()))
749         return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
750
751     def bonding_builds(self):
752         """
753         list /etc/yum.repos.d on the myplc side
754         """
755         self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
756         return True
757
758     def bonding_nodes(self):
759         """
760         List nodes known to the myplc together with their nodefamiliy
761         """
762         print("---------------------------------------- nodes")
763         for node in self.apiserver.GetNodes(self.auth_root()):
764             print("{} -> {}".format(node['hostname'],
765                                     self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
766         print("---------------------------------------- nodes")
767
768
769     ###
770     def mod_python(self):
771         """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
772         return self.yum_install( ['mod_python'] )
773
774     ###
775     def plc_configure(self):
776         "run plc-config-tty"
777         tmpname = '{}.plc-config-tty'.format(self.name())
778         with open(tmpname,'w') as fileconf:
779             for var, value in self.plc_spec['settings'].items():
780                 fileconf.write('e {}\n{}\n'.format(var, value))
781             fileconf.write('w\n')
782             fileconf.write('q\n')
783         utils.system('cat {}'.format(tmpname))
784         self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
785         utils.system('rm {}'.format(tmpname))
786         return True
787
788     # care only about f>=25
789     def start_stop_service(self, service, start_or_stop):
790         "utility to start/stop an old-fashioned service (plc)"
791         return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
792
793     def start_stop_systemd(self, service, start_or_stop):
794         "utility to start/stop a systemd-defined service (sfa)"
795         return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
796
797     def plc_start(self):
798         "service plc start"
799         return self.start_stop_service('plc', 'start')
800
801     def plc_stop(self):
802         "service plc stop"
803         return self.start_stop_service('plc', 'stop')
804
805     def plcvm_start(self):
806         "start the PLC vserver"
807         self.start_guest()
808         return True
809
810     def plcvm_stop(self):
811         "stop the PLC vserver"
812         self.stop_guest()
813         return True
814
815     # stores the keys from the config for further use
816     def keys_store(self):
817         "stores test users ssh keys in keys/"
818         for key_spec in self.plc_spec['keys']:
819                 TestKey(self,key_spec).store_key()
820         return True
821
822     def keys_clean(self):
823         "removes keys cached in keys/"
824         utils.system("rm -rf ./keys")
825         return True
826
827     # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
828     # for later direct access to the nodes
829     def keys_fetch(self):
830         "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
831         dir="./keys"
832         if not os.path.isdir(dir):
833             os.mkdir(dir)
834         vservername = self.vservername
835         vm_root = self.vm_root_in_host()
836         overall = True
837         prefix = 'debug_ssh_key'
838         for ext in ['pub', 'rsa'] :
839             src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
840             dst = "keys/{vservername}-debug.{ext}".format(**locals())
841             if self.test_ssh.fetch(src, dst) != 0:
842                 overall=False
843         return overall
844
845     def sites(self):
846         "create sites with PLCAPI"
847         return self.do_sites()
848
849     def delete_sites(self):
850         "delete sites with PLCAPI"
851         return self.do_sites(action="delete")
852
853     def do_sites(self, action="add"):
854         for site_spec in self.plc_spec['sites']:
855             test_site = TestSite(self,site_spec)
856             if (action != "add"):
857                 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
858                 test_site.delete_site()
859                 # deleted with the site
860                 #test_site.delete_users()
861                 continue
862             else:
863                 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
864                 test_site.create_site()
865                 test_site.create_users()
866         return True
867
868     def delete_all_sites(self):
869         "Delete all sites in PLC, and related objects"
870         print('auth_root', self.auth_root())
871         sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
872         for site in sites:
873             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
874             if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
875                 continue
876             site_id = site['site_id']
877             print('Deleting site_id', site_id)
878             self.apiserver.DeleteSite(self.auth_root(), site_id)
879         return True
880
881     def nodes(self):
882         "create nodes with PLCAPI"
883         return self.do_nodes()
884     def delete_nodes(self):
885         "delete nodes with PLCAPI"
886         return self.do_nodes(action="delete")
887
888     def do_nodes(self, action="add"):
889         for site_spec in self.plc_spec['sites']:
890             test_site = TestSite(self, site_spec)
891             if action != "add":
892                 utils.header("Deleting nodes in site {}".format(test_site.name()))
893                 for node_spec in site_spec['nodes']:
894                     test_node = TestNode(self, test_site, node_spec)
895                     utils.header("Deleting {}".format(test_node.name()))
896                     test_node.delete_node()
897             else:
898                 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
899                 for node_spec in site_spec['nodes']:
900                     utils.pprint('Creating node {}'.format(node_spec), node_spec)
901                     test_node = TestNode(self, test_site, node_spec)
902                     test_node.create_node()
903         return True
904
905     def nodegroups(self):
906         "create nodegroups with PLCAPI"
907         return self.do_nodegroups("add")
908     def delete_nodegroups(self):
909         "delete nodegroups with PLCAPI"
910         return self.do_nodegroups("delete")
911
912     YEAR = 365*24*3600
913     @staticmethod
914     def translate_timestamp(start, grain, timestamp):
915         if timestamp < TestPlc.YEAR:
916             return start + timestamp*grain
917         else:
918             return timestamp
919
920     @staticmethod
921     def timestamp_printable(timestamp):
922         return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
923
924     def leases(self):
925         "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
926         now = int(time.time())
927         grain = self.apiserver.GetLeaseGranularity(self.auth_root())
928         print('API answered grain=', grain)
929         start = (now//grain)*grain
930         start += grain
931         # find out all nodes that are reservable
932         nodes = self.all_reservable_nodenames()
933         if not nodes:
934             utils.header("No reservable node found - proceeding without leases")
935             return True
936         ok = True
937         # attach them to the leases as specified in plc_specs
938         # this is where the 'leases' field gets interpreted as relative of absolute
939         for lease_spec in self.plc_spec['leases']:
940             # skip the ones that come with a null slice id
941             if not lease_spec['slice']:
942                 continue
943             lease_spec['t_from']  = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
944             lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
945             lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
946                                                       lease_spec['t_from'], lease_spec['t_until'])
947             if lease_addition['errors']:
948                 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
949                 ok = False
950             else:
951                 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
952                              .format(nodes, lease_spec['slice'],
953                                      lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
954                                      lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
955
956         return ok
957
958     def delete_leases(self):
959         "remove all leases in the myplc side"
960         lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
961         utils.header("Cleaning leases {}".format(lease_ids))
962         self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
963         return True
964
965     def list_leases(self):
966         "list all leases known to the myplc"
967         leases = self.apiserver.GetLeases(self.auth_root())
968         now = int(time.time())
969         for l in leases:
970             current = l['t_until'] >= now
971             if self.options.verbose or current:
972                 utils.header("{} {} from {} until {}"\
973                              .format(l['hostname'], l['name'],
974                                      TestPlc.timestamp_printable(l['t_from']),
975                                      TestPlc.timestamp_printable(l['t_until'])))
976         return True
977
978     # create nodegroups if needed, and populate
979     def do_nodegroups(self, action="add"):
980         # 1st pass to scan contents
981         groups_dict = {}
982         for site_spec in self.plc_spec['sites']:
983             test_site = TestSite(self,site_spec)
984             for node_spec in site_spec['nodes']:
985                 test_node = TestNode(self, test_site, node_spec)
986                 if 'nodegroups' in node_spec:
987                     nodegroupnames = node_spec['nodegroups']
988                     if isinstance(nodegroupnames, str):
989                         nodegroupnames = [ nodegroupnames ]
990                     for nodegroupname in nodegroupnames:
991                         if nodegroupname not in groups_dict:
992                             groups_dict[nodegroupname] = []
993                         groups_dict[nodegroupname].append(test_node.name())
994         auth = self.auth_root()
995         overall = True
996         for (nodegroupname,group_nodes) in groups_dict.items():
997             if action == "add":
998                 print('nodegroups:', 'dealing with nodegroup',\
999                     nodegroupname, 'on nodes', group_nodes)
1000                 # first, check if the nodetagtype is here
1001                 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1002                 if tag_types:
1003                     tag_type_id = tag_types[0]['tag_type_id']
1004                 else:
1005                     tag_type_id = self.apiserver.AddTagType(auth,
1006                                                             {'tagname' : nodegroupname,
1007                                                              'description' : 'for nodegroup {}'.format(nodegroupname),
1008                                                              'category' : 'test'})
1009                 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1010                 # create nodegroup
1011                 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1012                 if not nodegroups:
1013                     self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1014                     print('created nodegroup', nodegroupname, \
1015                         'from tagname', nodegroupname, 'and value', 'yes')
1016                 # set node tag on all nodes, value='yes'
1017                 for nodename in group_nodes:
1018                     try:
1019                         self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1020                     except:
1021                         traceback.print_exc()
1022                         print('node', nodename, 'seems to already have tag', nodegroupname)
1023                     # check anyway
1024                     try:
1025                         expect_yes = self.apiserver.GetNodeTags(auth,
1026                                                                 {'hostname' : nodename,
1027                                                                  'tagname'  : nodegroupname},
1028                                                                 ['value'])[0]['value']
1029                         if expect_yes != "yes":
1030                             print('Mismatch node tag on node',nodename,'got',expect_yes)
1031                             overall = False
1032                     except:
1033                         if not self.options.dry_run:
1034                             print('Cannot find tag', nodegroupname, 'on node', nodename)
1035                             overall = False
1036             else:
1037                 try:
1038                     print('cleaning nodegroup', nodegroupname)
1039                     self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1040                 except:
1041                     traceback.print_exc()
1042                     overall = False
1043         return overall
1044
1045     # a list of TestNode objs
1046     def all_nodes(self):
1047         nodes=[]
1048         for site_spec in self.plc_spec['sites']:
1049             test_site = TestSite(self,site_spec)
1050             for node_spec in site_spec['nodes']:
1051                 nodes.append(TestNode(self, test_site, node_spec))
1052         return nodes
1053
1054     # return a list of tuples (nodename,qemuname)
1055     def all_node_infos(self) :
1056         node_infos = []
1057         for site_spec in self.plc_spec['sites']:
1058             node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1059                                 for node_spec in site_spec['nodes'] ]
1060         return node_infos
1061
1062     def all_nodenames(self):
1063         return [ x[0] for x in self.all_node_infos() ]
1064     def all_reservable_nodenames(self):
1065         res = []
1066         for site_spec in self.plc_spec['sites']:
1067             for node_spec in site_spec['nodes']:
1068                 node_fields = node_spec['node_fields']
1069                 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1070                     res.append(node_fields['hostname'])
1071         return res
1072
1073     # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1074     def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1075                                silent_minutes, period_seconds = 15):
1076         if self.options.dry_run:
1077             print('dry_run')
1078             return True
1079
1080         class CompleterTaskBootState(CompleterTask):
1081             def __init__(self, test_plc, hostname):
1082                 self.test_plc = test_plc
1083                 self.hostname = hostname
1084                 self.last_boot_state = 'undef'
1085             def actual_run(self):
1086                 try:
1087                     node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1088                                                             [ self.hostname ],
1089                                                             ['boot_state'])[0]
1090                     self.last_boot_state = node['boot_state']
1091                     return self.last_boot_state == target_boot_state
1092                 except:
1093                     return False
1094             def message(self):
1095                 return "CompleterTaskBootState with node {}".format(self.hostname)
1096             def failure_epilogue(self):
1097                 print("node {} in state {} - expected {}"\
1098                     .format(self.hostname, self.last_boot_state, target_boot_state))
1099
1100         timeout = timedelta(minutes=timeout_minutes)
1101         graceout = timedelta(minutes=silent_minutes)
1102         period   = timedelta(seconds=period_seconds)
1103         # the nodes that haven't checked yet - start with a full list and shrink over time
1104         utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1105         tasks = [ CompleterTaskBootState(self,hostname) \
1106                       for (hostname,_) in self.all_node_infos() ]
1107         message = 'check_boot_state={}'.format(target_boot_state)
1108         return Completer(tasks, message=message).run(timeout, graceout, period)
1109
1110     def nodes_booted(self):
1111         return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1112
1113     def probe_kvm_iptables(self):
1114         (_,kvmbox) = self.all_node_infos()[0]
1115         TestSsh(kvmbox).run("iptables-save")
1116         return True
1117
1118     # probing nodes
1119     def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1120         class CompleterTaskPingNode(CompleterTask):
1121             def __init__(self, hostname):
1122                 self.hostname = hostname
1123             def run(self, silent):
1124                 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1125                 return utils.system(command, silent=silent) == 0
1126             def failure_epilogue(self):
1127                 print("Cannot ping node with name {}".format(self.hostname))
1128         timeout = timedelta(seconds = timeout_seconds)
1129         graceout = timeout
1130         period = timedelta(seconds = period_seconds)
1131         node_infos = self.all_node_infos()
1132         tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1133         return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1134
1135     # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1136     def ping_node(self):
1137         "Ping nodes"
1138         return self.check_nodes_ping()
1139
1140     def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1141         # various delays
1142         timeout  = timedelta(minutes=timeout_minutes)
1143         graceout = timedelta(minutes=silent_minutes)
1144         period   = timedelta(seconds=period_seconds)
1145         vservername = self.vservername
1146         if debug:
1147             message = "debug"
1148             completer_message = 'ssh_node_debug'
1149             local_key = "keys/{vservername}-debug.rsa".format(**locals())
1150         else:
1151             message = "boot"
1152             completer_message = 'ssh_node_boot'
1153             local_key = "keys/key_admin.rsa"
1154         utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1155         node_infos = self.all_node_infos()
1156         tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1157                                         boot_state=message, dry_run=self.options.dry_run) \
1158                       for (nodename, qemuname) in node_infos ]
1159         return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1160
1161     def ssh_node_debug(self):
1162         "Tries to ssh into nodes in debug mode with the debug ssh key"
1163         return self.check_nodes_ssh(debug = True,
1164                                     timeout_minutes = self.ssh_node_debug_timeout,
1165                                     silent_minutes = self.ssh_node_debug_silent)
1166
1167     def ssh_node_boot(self):
1168         "Tries to ssh into nodes in production mode with the root ssh key"
1169         return self.check_nodes_ssh(debug = False,
1170                                     timeout_minutes = self.ssh_node_boot_timeout,
1171                                     silent_minutes = self.ssh_node_boot_silent)
1172
1173     def node_bmlogs(self):
1174         "Checks that there's a non-empty dir. /var/log/bm/raw"
1175         return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1176
1177     @node_mapper
1178     def qemu_local_init(self): pass
1179     @node_mapper
1180     def bootcd(self): pass
1181     @node_mapper
1182     def qemu_local_config(self): pass
1183     @node_mapper
1184     def qemu_export(self): pass
1185     @node_mapper
1186     def qemu_cleanlog(self): pass
1187     @node_mapper
1188     def nodestate_reinstall(self): pass
1189     @node_mapper
1190     def nodestate_upgrade(self): pass
1191     @node_mapper
1192     def nodestate_safeboot(self): pass
1193     @node_mapper
1194     def nodestate_boot(self): pass
1195     @node_mapper
1196     def nodestate_show(self): pass
1197     @node_mapper
1198     def nodedistro_f14(self): pass
1199     @node_mapper
1200     def nodedistro_f18(self): pass
1201     @node_mapper
1202     def nodedistro_f20(self): pass
1203     @node_mapper
1204     def nodedistro_f21(self): pass
1205     @node_mapper
1206     def nodedistro_f22(self): pass
1207     @node_mapper
1208     def nodedistro_show(self): pass
1209
1210     ### check hooks : invoke scripts from hooks/{node,slice}
1211     def check_hooks_node(self):
1212         return self.locate_first_node().check_hooks()
1213     def check_hooks_sliver(self) :
1214         return self.locate_first_sliver().check_hooks()
1215
1216     def check_hooks(self):
1217         "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1218         return self.check_hooks_node() and self.check_hooks_sliver()
1219
1220     ### initscripts
1221     def do_check_initscripts(self):
1222         class CompleterTaskInitscript(CompleterTask):
1223             def __init__(self, test_sliver, stamp):
1224                 self.test_sliver = test_sliver
1225                 self.stamp = stamp
1226             def actual_run(self):
1227                 return self.test_sliver.check_initscript_stamp(self.stamp)
1228             def message(self):
1229                 return "initscript checker for {}".format(self.test_sliver.name())
1230             def failure_epilogue(self):
1231                 print("initscript stamp {} not found in sliver {}"\
1232                     .format(self.stamp, self.test_sliver.name()))
1233
1234         tasks = []
1235         for slice_spec in self.plc_spec['slices']:
1236             if 'initscriptstamp' not in slice_spec:
1237                 continue
1238             stamp = slice_spec['initscriptstamp']
1239             slicename = slice_spec['slice_fields']['name']
1240             for nodename in slice_spec['nodenames']:
1241                 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1242                 site,node = self.locate_node(nodename)
1243                 # xxx - passing the wrong site - probably harmless
1244                 test_site = TestSite(self, site)
1245                 test_slice = TestSlice(self, test_site, slice_spec)
1246                 test_node = TestNode(self, test_site, node)
1247                 test_sliver = TestSliver(self, test_node, test_slice)
1248                 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1249         return Completer(tasks, message='check_initscripts').\
1250             run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1251
1252     def check_initscripts(self):
1253         "check that the initscripts have triggered"
1254         return self.do_check_initscripts()
1255
1256     def initscripts(self):
1257         "create initscripts with PLCAPI"
1258         for initscript in self.plc_spec['initscripts']:
1259             utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1260             self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1261         return True
1262
1263     def delete_initscripts(self):
1264         "delete initscripts with PLCAPI"
1265         for initscript in self.plc_spec['initscripts']:
1266             initscript_name = initscript['initscript_fields']['name']
1267             print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1268             try:
1269                 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1270                 print(initscript_name, 'deleted')
1271             except:
1272                 print('deletion went wrong - probably did not exist')
1273         return True
1274
1275     ### manage slices
1276     def slices(self):
1277         "create slices with PLCAPI"
1278         return self.do_slices(action="add")
1279
1280     def delete_slices(self):
1281         "delete slices with PLCAPI"
1282         return self.do_slices(action="delete")
1283
1284     def fill_slices(self):
1285         "add nodes in slices with PLCAPI"
1286         return self.do_slices(action="fill")
1287
1288     def empty_slices(self):
1289         "remove nodes from slices with PLCAPI"
1290         return self.do_slices(action="empty")
1291
1292     def do_slices(self,  action="add"):
1293         for slice in self.plc_spec['slices']:
1294             site_spec = self.locate_site(slice['sitename'])
1295             test_site = TestSite(self,site_spec)
1296             test_slice=TestSlice(self,test_site,slice)
1297             if action == "delete":
1298                 test_slice.delete_slice()
1299             elif action == "fill":
1300                 test_slice.add_nodes()
1301             elif action == "empty":
1302                 test_slice.delete_nodes()
1303             else:
1304                 test_slice.create_slice()
1305         return True
1306
1307     @slice_mapper__tasks(20, 10, 15)
1308     def ssh_slice(self): pass
1309     @slice_mapper__tasks(20, 19, 15)
1310     def ssh_slice_off(self): pass
1311     @slice_mapper__tasks(1, 1, 15)
1312     def slice_fs_present(self): pass
1313     @slice_mapper__tasks(1, 1, 15)
1314     def slice_fs_deleted(self): pass
1315
1316     # use another name so we can exclude/ignore it from the tests on the nightly command line
1317     def ssh_slice_again(self): return self.ssh_slice()
1318     # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1319     # but for some reason the ignore-wrapping thing would not
1320
1321     @slice_mapper
1322     def ssh_slice_basics(self): pass
1323     @slice_mapper
1324     def check_vsys_defaults(self): pass
1325
1326     @node_mapper
1327     def keys_clear_known_hosts(self): pass
1328
1329     def plcapi_urls(self):
1330         """
1331         attempts to reach the PLCAPI with various forms for the URL
1332         """
1333         return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1334
1335     def speed_up_slices(self):
1336         "tweak nodemanager cycle (wait time) to 30+/-10 s"
1337         return self._speed_up_slices (30, 10)
1338     def super_speed_up_slices(self):
1339         "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1340         return self._speed_up_slices(5, 1)
1341
1342     def _speed_up_slices(self, p, r):
1343         # create the template on the server-side
1344         template = "{}.nodemanager".format(self.name())
1345         with open(template,"w") as template_file:
1346             template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1347         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1348         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1349         self.test_ssh.copy_abs(template, remote)
1350         # Add a conf file
1351         if not self.apiserver.GetConfFiles(self.auth_root(),
1352                                            {'dest' : '/etc/sysconfig/nodemanager'}):
1353             self.apiserver.AddConfFile(self.auth_root(),
1354                                         {'dest' : '/etc/sysconfig/nodemanager',
1355                                          'source' : 'PlanetLabConf/nodemanager',
1356                                          'postinstall_cmd' : 'service nm restart',})
1357         return True
1358
1359     def debug_nodemanager(self):
1360         "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1361         template = "{}.nodemanager".format(self.name())
1362         with open(template,"w") as template_file:
1363             template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1364         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1365         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1366         self.test_ssh.copy_abs(template, remote)
1367         return True
1368
1369     @node_mapper
1370     def qemu_start(self) : pass
1371
1372     @node_mapper
1373     def qemu_timestamp(self) : pass
1374
1375     @node_mapper
1376     def qemu_nodefamily(self): pass
1377
1378     # when a spec refers to a node possibly on another plc
1379     def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1380         for plc in [ self ] + other_plcs:
1381             try:
1382                 return plc.locate_sliver_obj(nodename, slicename)
1383             except:
1384                 pass
1385         raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1386
1387     # implement this one as a cross step so that we can take advantage of different nodes
1388     # in multi-plcs mode
1389     def cross_check_tcp(self, other_plcs):
1390         "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1391         if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1392             utils.header("check_tcp: no/empty config found")
1393             return True
1394         specs = self.plc_spec['tcp_specs']
1395         overall = True
1396
1397         # first wait for the network to be up and ready from the slices
1398         class CompleterTaskNetworkReadyInSliver(CompleterTask):
1399             def __init__(self, test_sliver):
1400                 self.test_sliver = test_sliver
1401             def actual_run(self):
1402                 return self.test_sliver.check_tcp_ready(port = 9999)
1403             def message(self):
1404                 return "network ready checker for {}".format(self.test_sliver.name())
1405             def failure_epilogue(self):
1406                 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1407
1408         sliver_specs = {}
1409         tasks = []
1410         managed_sliver_names = set()
1411         for spec in specs:
1412             # locate the TestSliver instances involved, and cache them in the spec instance
1413             spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1414             spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1415             message = "Will check TCP between s={} and c={}"\
1416                       .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1417             if 'client_connect' in spec:
1418                 message += " (using {})".format(spec['client_connect'])
1419             utils.header(message)
1420             # we need to check network presence in both slivers, but also
1421             # avoid to insert a sliver several times
1422             for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1423                 if sliver.name() not in managed_sliver_names:
1424                     tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1425                     # add this sliver's name in the set
1426                     managed_sliver_names .update( {sliver.name()} )
1427
1428         # wait for the netork to be OK in all server sides
1429         if not Completer(tasks, message='check for network readiness in slivers').\
1430            run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1431             return False
1432
1433         # run server and client
1434         for spec in specs:
1435             port = spec['port']
1436             # server side
1437             # the issue here is that we have the server run in background
1438             # and so we have no clue if it took off properly or not
1439             # looks like in some cases it does not
1440             address = spec['s_sliver'].test_node.name()
1441             if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1442                 overall = False
1443                 break
1444
1445             # idem for the client side
1446             # use nodename from located sliver, unless 'client_connect' is set
1447             if 'client_connect' in spec:
1448                 destination = spec['client_connect']
1449             else:
1450                 destination = spec['s_sliver'].test_node.name()
1451             if not spec['c_sliver'].run_tcp_client(destination, port):
1452                 overall = False
1453         return overall
1454
1455     # painfully enough, we need to allow for some time as netflow might show up last
1456     def check_system_slice(self):
1457         "all nodes: check that a system slice is alive"
1458         # netflow currently not working in the lxc distro
1459         # drl not built at all in the wtx distro
1460         # if we find either of them we're happy
1461         return self.check_netflow() or self.check_drl()
1462
1463     # expose these
1464     def check_netflow(self): return self._check_system_slice('netflow')
1465     def check_drl(self): return self._check_system_slice('drl')
1466
1467     # we have the slices up already here, so it should not take too long
1468     def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1469         class CompleterTaskSystemSlice(CompleterTask):
1470             def __init__(self, test_node, dry_run):
1471                 self.test_node = test_node
1472                 self.dry_run = dry_run
1473             def actual_run(self):
1474                 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1475             def message(self):
1476                 return "System slice {} @ {}".format(slicename, self.test_node.name())
1477             def failure_epilogue(self):
1478                 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1479         timeout = timedelta(minutes=timeout_minutes)
1480         silent  = timedelta(0)
1481         period  = timedelta(seconds=period_seconds)
1482         tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1483                       for test_node in self.all_nodes() ]
1484         return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1485
1486     def plcsh_stress_test(self):
1487         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1488         # install the stress-test in the plc image
1489         location = "/usr/share/plc_api/plcsh_stress_test.py"
1490         remote = "{}/{}".format(self.vm_root_in_host(), location)
1491         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1492         command = location
1493         command += " -- --check"
1494         if self.options.size == 1:
1495             command +=  " --tiny"
1496         return self.run_in_guest(command) == 0
1497
1498     # populate runs the same utility without slightly different options
1499     # in particular runs with --preserve (dont cleanup) and without --check
1500     # also it gets run twice, once with the --foreign option for creating fake foreign entries
1501
1502     def sfa_install_all(self):
1503         "yum install sfa sfa-plc sfa-sfatables sfa-client"
1504         return (self.yum_install("sfa sfa-plc sfa-sfatables sfa-client") and
1505                 self.run_in_guest("systemctl enable sfa-registry")==0 and
1506                 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1507
1508     def sfa_install_core(self):
1509         "yum install sfa"
1510         return self.yum_install("sfa")
1511
1512     def sfa_install_plc(self):
1513         "yum install sfa-plc"
1514         return self.yum_install("sfa-plc")
1515
1516     def sfa_install_sfatables(self):
1517         "yum install sfa-sfatables"
1518         return self.yum_install("sfa-sfatables")
1519
1520     # for some very odd reason, this sometimes fails with the following symptom
1521     # # yum install sfa-client
1522     # Setting up Install Process
1523     # ...
1524     # Downloading Packages:
1525     # Running rpm_check_debug
1526     # Running Transaction Test
1527     # Transaction Test Succeeded
1528     # Running Transaction
1529     # Transaction couldn't start:
1530     # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1531     # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1532     # even though in the same context I have
1533     # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1534     # Filesystem            Size  Used Avail Use% Mounted on
1535     # /dev/hdv1             806G  264G  501G  35% /
1536     # none                   16M   36K   16M   1% /tmp
1537     #
1538     # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1539     def sfa_install_client(self):
1540         "yum install sfa-client"
1541         first_try = self.yum_install("sfa-client")
1542         if first_try:
1543             return True
1544         utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1545         code, cached_rpm_path = \
1546                 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1547         utils.header("rpm_path=<<{}>>".format(rpm_path))
1548         # just for checking
1549         self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1550         return self.yum_check_installed("sfa-client")
1551
1552     def sfa_dbclean(self):
1553         "thoroughly wipes off the SFA database"
1554         return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1555             self.run_in_guest("sfa-nuke.py") == 0 or \
1556             self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1557             self.run_in_guest("sfaadmin registry nuke") == 0
1558
1559     def sfa_fsclean(self):
1560         "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1561         self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1562         return True
1563
1564     def sfa_plcclean(self):
1565         "cleans the PLC entries that were created as a side effect of running the script"
1566         # ignore result
1567         sfa_spec = self.plc_spec['sfa']
1568
1569         for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1570             login_base = auth_sfa_spec['login_base']
1571             try:
1572                 self.apiserver.DeleteSite(self.auth_root(),login_base)
1573             except:
1574                 print("Site {} already absent from PLC db".format(login_base))
1575
1576             for spec_name in ['pi_spec','user_spec']:
1577                 user_spec = auth_sfa_spec[spec_name]
1578                 username = user_spec['email']
1579                 try:
1580                     self.apiserver.DeletePerson(self.auth_root(),username)
1581                 except:
1582                     # this in fact is expected as sites delete their members
1583                     #print "User {} already absent from PLC db".format(username)
1584                     pass
1585
1586         print("REMEMBER TO RUN sfa_import AGAIN")
1587         return True
1588
1589     def sfa_uninstall(self):
1590         "uses rpm to uninstall sfa - ignore result"
1591         self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1592         self.run_in_guest("rm -rf /var/lib/sfa")
1593         self.run_in_guest("rm -rf /etc/sfa")
1594         self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1595         # xxx tmp
1596         self.run_in_guest("rpm -e --noscripts sfa-plc")
1597         return True
1598
1599     ### run unit tests for SFA
1600     # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1601     # Running Transaction
1602     # Transaction couldn't start:
1603     # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1604     # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1605     # no matter how many Gbs are available on the testplc
1606     # could not figure out what's wrong, so...
1607     # if the yum install phase fails, consider the test is successful
1608     # other combinations will eventually run it hopefully
1609     def sfa_utest(self):
1610         "yum install sfa-tests and run SFA unittests"
1611         self.run_in_guest("yum -y install sfa-tests")
1612         # failed to install - forget it
1613         if self.run_in_guest("rpm -q sfa-tests") != 0:
1614             utils.header("WARNING: SFA unit tests failed to install, ignoring")
1615             return True
1616         return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1617
1618     ###
1619     def confdir(self):
1620         dirname = "conf.{}".format(self.plc_spec['name'])
1621         if not os.path.isdir(dirname):
1622             utils.system("mkdir -p {}".format(dirname))
1623         if not os.path.isdir(dirname):
1624             raise Exception("Cannot create config dir for plc {}".format(self.name()))
1625         return dirname
1626
1627     def conffile(self, filename):
1628         return "{}/{}".format(self.confdir(), filename)
1629     def confsubdir(self, dirname, clean, dry_run=False):
1630         subdirname = "{}/{}".format(self.confdir(), dirname)
1631         if clean:
1632             utils.system("rm -rf {}".format(subdirname))
1633         if not os.path.isdir(subdirname):
1634             utils.system("mkdir -p {}".format(subdirname))
1635         if not dry_run and not os.path.isdir(subdirname):
1636             raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1637         return subdirname
1638
1639     def conffile_clean(self, filename):
1640         filename=self.conffile(filename)
1641         return utils.system("rm -rf {}".format(filename))==0
1642
1643     ###
1644     def sfa_configure(self):
1645         "run sfa-config-tty"
1646         tmpname = self.conffile("sfa-config-tty")
1647         with open(tmpname,'w') as fileconf:
1648             for var, value in self.plc_spec['sfa']['settings'].items():
1649                 fileconf.write('e {}\n{}\n'.format(var, value))
1650             fileconf.write('w\n')
1651             fileconf.write('R\n')
1652             fileconf.write('q\n')
1653         utils.system('cat {}'.format(tmpname))
1654         self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1655         return True
1656
1657     def aggregate_xml_line(self):
1658         port = self.plc_spec['sfa']['neighbours-port']
1659         return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1660             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1661
1662     def registry_xml_line(self):
1663         return '<registry addr="{}" hrn="{}" port="12345"/>'\
1664             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1665
1666
1667     # a cross step that takes all other plcs in argument
1668     def cross_sfa_configure(self, other_plcs):
1669         "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1670         # of course with a single plc, other_plcs is an empty list
1671         if not other_plcs:
1672             return True
1673         agg_fname = self.conffile("agg.xml")
1674         with open(agg_fname,"w") as out:
1675             out.write("<aggregates>{}</aggregates>\n"\
1676                       .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1677         utils.header("(Over)wrote {}".format(agg_fname))
1678         reg_fname=self.conffile("reg.xml")
1679         with open(reg_fname,"w") as out:
1680             out.write("<registries>{}</registries>\n"\
1681                       .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1682         utils.header("(Over)wrote {}".format(reg_fname))
1683         return self.test_ssh.copy_abs(agg_fname,
1684                                       '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1685            and self.test_ssh.copy_abs(reg_fname,
1686                                       '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1687
1688     def sfa_import(self):
1689         "use sfaadmin to import from plc"
1690         auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1691         return self.run_in_guest('sfaadmin reg import_registry') == 0
1692
1693     def sfa_start(self):
1694         "service sfa start"
1695         return (self.start_stop_systemd('sfa-registry', 'start') and
1696                 self.start_stop_systemd('sfa-aggregate', 'start'))
1697
1698
1699     def sfi_configure(self):
1700         "Create /root/sfi on the plc side for sfi client configuration"
1701         if self.options.dry_run:
1702             utils.header("DRY RUN - skipping step")
1703             return True
1704         sfa_spec = self.plc_spec['sfa']
1705         # cannot use auth_sfa_mapper to pass dir_name
1706         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1707             test_slice = TestAuthSfa(self, slice_spec)
1708             dir_basename = os.path.basename(test_slice.sfi_path())
1709             dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1710                                        clean=True, dry_run=self.options.dry_run)
1711             test_slice.sfi_configure(dir_name)
1712             # push into the remote /root/sfi area
1713             location = test_slice.sfi_path()
1714             remote = "{}/{}".format(self.vm_root_in_host(), location)
1715             self.test_ssh.mkdir(remote, abs=True)
1716             # need to strip last level or remote otherwise we get an extra dir level
1717             self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1718
1719         return True
1720
1721     def sfi_clean(self):
1722         "clean up /root/sfi on the plc side"
1723         self.run_in_guest("rm -rf /root/sfi")
1724         return True
1725
1726     def sfa_rspec_empty(self):
1727         "expose a static empty rspec (ships with the tests module) in the sfi directory"
1728         filename = "empty-rspec.xml"
1729         overall = True
1730         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1731             test_slice = TestAuthSfa(self, slice_spec)
1732             in_vm = test_slice.sfi_path()
1733             remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1734             if self.test_ssh.copy_abs(filename, remote) !=0:
1735                 overall = False
1736         return overall
1737
1738     @auth_sfa_mapper
1739     def sfa_register_site(self): pass
1740     @auth_sfa_mapper
1741     def sfa_register_pi(self): pass
1742     @auth_sfa_mapper
1743     def sfa_register_user(self): pass
1744     @auth_sfa_mapper
1745     def sfa_update_user(self): pass
1746     @auth_sfa_mapper
1747     def sfa_register_slice(self): pass
1748     @auth_sfa_mapper
1749     def sfa_renew_slice(self): pass
1750     @auth_sfa_mapper
1751     def sfa_get_expires(self): pass
1752     @auth_sfa_mapper
1753     def sfa_discover(self): pass
1754     @auth_sfa_mapper
1755     def sfa_rspec(self): pass
1756     @auth_sfa_mapper
1757     def sfa_allocate(self): pass
1758     @auth_sfa_mapper
1759     def sfa_allocate_empty(self): pass
1760     @auth_sfa_mapper
1761     def sfa_provision(self): pass
1762     @auth_sfa_mapper
1763     def sfa_provision_empty(self): pass
1764     @auth_sfa_mapper
1765     def sfa_describe(self): pass
1766     @auth_sfa_mapper
1767     def sfa_check_slice_plc(self): pass
1768     @auth_sfa_mapper
1769     def sfa_check_slice_plc_empty(self): pass
1770     @auth_sfa_mapper
1771     def sfa_update_slice(self): pass
1772     @auth_sfa_mapper
1773     def sfa_remove_user_from_slice(self): pass
1774     @auth_sfa_mapper
1775     def sfa_insert_user_in_slice(self): pass
1776     @auth_sfa_mapper
1777     def sfi_list(self): pass
1778     @auth_sfa_mapper
1779     def sfi_show_site(self): pass
1780     @auth_sfa_mapper
1781     def sfi_show_slice(self): pass
1782     @auth_sfa_mapper
1783     def sfi_show_slice_researchers(self): pass
1784     @auth_sfa_mapper
1785     def ssh_slice_sfa(self): pass
1786     @auth_sfa_mapper
1787     def sfa_delete_user(self): pass
1788     @auth_sfa_mapper
1789     def sfa_delete_slice(self): pass
1790
1791     def sfa_stop(self):
1792         "service sfa stop"
1793         return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1794                 self.start_stop_systemd('sfa-registry', 'stop'))
1795
1796     def populate(self):
1797         "creates random entries in the PLCAPI"
1798         # install the stress-test in the plc image
1799         location = "/usr/share/plc_api/plcsh_stress_test.py"
1800         remote = "{}/{}".format(self.vm_root_in_host(), location)
1801         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1802         command = location
1803         command += " -- --preserve --short-names"
1804         local = (self.run_in_guest(command) == 0);
1805         # second run with --foreign
1806         command += ' --foreign'
1807         remote = (self.run_in_guest(command) == 0);
1808         return local and remote
1809
1810
1811     ####################
1812     @bonding_redirector
1813     def bonding_init_partial(self): pass
1814
1815     @bonding_redirector
1816     def bonding_add_yum(self): pass
1817
1818     @bonding_redirector
1819     def bonding_install_rpms(self): pass
1820
1821     ####################
1822
1823     def gather_logs(self):
1824         "gets all possible logs from plc's/qemu node's/slice's for future reference"
1825         # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1826         # (1.b) get the plc's  /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1827         # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1828         # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1829         # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1830         # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1831         # (1.a)
1832         print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1833         self.gather_var_logs()
1834         # (1.b)
1835         print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1836         self.gather_pgsql_logs()
1837         # (1.c)
1838         print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1839         self.gather_root_sfi()
1840         # (2)
1841         print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1842         for site_spec in self.plc_spec['sites']:
1843             test_site = TestSite(self,site_spec)
1844             for node_spec in site_spec['nodes']:
1845                 test_node = TestNode(self, test_site, node_spec)
1846                 test_node.gather_qemu_logs()
1847         # (3)
1848         print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1849         self.gather_nodes_var_logs()
1850         # (4)
1851         print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1852         self.gather_slivers_var_logs()
1853         return True
1854
1855     def gather_slivers_var_logs(self):
1856         for test_sliver in self.all_sliver_objs():
1857             remote = test_sliver.tar_var_logs()
1858             utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1859             command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1860             utils.system(command)
1861         return True
1862
1863     def gather_var_logs(self):
1864         utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1865         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1866         command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1867         utils.system(command)
1868         command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1869         utils.system(command)
1870
1871     def gather_pgsql_logs(self):
1872         utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1873         to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1874         command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1875         utils.system(command)
1876
1877     def gather_root_sfi(self):
1878         utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1879         to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1880         command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1881         utils.system(command)
1882
1883     def gather_nodes_var_logs(self):
1884         for site_spec in self.plc_spec['sites']:
1885             test_site = TestSite(self, site_spec)
1886             for node_spec in site_spec['nodes']:
1887                 test_node = TestNode(self, test_site, node_spec)
1888                 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1889                 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1890                 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1891                 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1892                 utils.system(command)
1893
1894
1895     # returns the filename to use for sql dump/restore, using options.dbname if set
1896     def dbfile(self, database):
1897         # uses options.dbname if it is found
1898         try:
1899             name = self.options.dbname
1900             if not isinstance(name, str):
1901                 raise Exception
1902         except:
1903             t = datetime.now()
1904             d = t.date()
1905             name = str(d)
1906         return "/root/{}-{}.sql".format(database, name)
1907
1908     def plc_db_dump(self):
1909         'dump the planetlab5 DB in /root in the PLC - filename has time'
1910         dump=self.dbfile("planetab5")
1911         self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1912         utils.header('Dumped planetlab5 database in {}'.format(dump))
1913         return True
1914
1915     def plc_db_restore(self):
1916         'restore the planetlab5 DB - looks broken, but run -n might help'
1917         dump = self.dbfile("planetab5")
1918         ##stop httpd service
1919         self.run_in_guest('service httpd stop')
1920         # xxx - need another wrapper
1921         self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1922         self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1923         self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1924         ##starting httpd service
1925         self.run_in_guest('service httpd start')
1926
1927         utils.header('Database restored from ' + dump)
1928
1929     @staticmethod
1930     def create_ignore_steps():
1931         for step in TestPlc.default_steps + TestPlc.other_steps:
1932             # default step can have a plc qualifier
1933             if '@' in step:
1934                 step, qualifier = step.split('@')
1935             # or be defined as forced or ignored by default
1936             for keyword in ['_ignore','_force']:
1937                 if step.endswith(keyword):
1938                     step=step.replace(keyword,'')
1939             if step == SEP or step == SEPSFA :
1940                 continue
1941             method = getattr(TestPlc,step)
1942             name = step + '_ignore'
1943             wrapped = ignore_result(method)
1944 #            wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1945             setattr(TestPlc, name, wrapped)
1946
1947 #    @ignore_result
1948 #    def ssh_slice_again_ignore (self): pass
1949 #    @ignore_result
1950 #    def check_initscripts_ignore (self): pass
1951
1952     def standby_1_through_20(self):
1953         """convenience function to wait for a specified number of minutes"""
1954         pass
1955     @standby_generic
1956     def standby_1(): pass
1957     @standby_generic
1958     def standby_2(): pass
1959     @standby_generic
1960     def standby_3(): pass
1961     @standby_generic
1962     def standby_4(): pass
1963     @standby_generic
1964     def standby_5(): pass
1965     @standby_generic
1966     def standby_6(): pass
1967     @standby_generic
1968     def standby_7(): pass
1969     @standby_generic
1970     def standby_8(): pass
1971     @standby_generic
1972     def standby_9(): pass
1973     @standby_generic
1974     def standby_10(): pass
1975     @standby_generic
1976     def standby_11(): pass
1977     @standby_generic
1978     def standby_12(): pass
1979     @standby_generic
1980     def standby_13(): pass
1981     @standby_generic
1982     def standby_14(): pass
1983     @standby_generic
1984     def standby_15(): pass
1985     @standby_generic
1986     def standby_16(): pass
1987     @standby_generic
1988     def standby_17(): pass
1989     @standby_generic
1990     def standby_18(): pass
1991     @standby_generic
1992     def standby_19(): pass
1993     @standby_generic
1994     def standby_20(): pass
1995
1996     # convenience for debugging the test logic
1997     def yes(self): return True
1998     def no(self): return False
1999     def fail(self): return False