add pyOpenSSL to the list of packages to pip2 install for sfa
[tests.git] / system / TestPlc.py
1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
3 #
4 import sys
5 import time
6 import os, os.path
7 import traceback
8 import socket
9 from datetime import datetime, timedelta
10
11 import utils
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24
25 from TestBonding import TestBonding
26
27 has_sfa_cache_filename="sfa-cache"
28
29 # step methods must take (self) and return a boolean (options is a member of the class)
30
31 def standby(minutes, dry_run):
32     utils.header('Entering StandBy for {:d} mn'.format(minutes))
33     if dry_run:
34         print('dry_run')
35     else:
36         time.sleep(60*minutes)
37     return True
38
39 def standby_generic(func):
40     def actual(self):
41         minutes = int(func.__name__.split("_")[1])
42         return standby(minutes, self.options.dry_run)
43     return actual
44
45 def node_mapper(method):
46     def map_on_nodes(self, *args, **kwds):
47         overall = True
48         node_method = TestNode.__dict__[method.__name__]
49         for test_node in self.all_nodes():
50             if not node_method(test_node, *args, **kwds):
51                 overall = False
52         return overall
53     # maintain __name__ for ignore_result
54     map_on_nodes.__name__ = method.__name__
55     # restore the doc text
56     map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
57     return map_on_nodes
58
59 def slice_mapper(method):
60     def map_on_slices(self):
61         overall = True
62         slice_method = TestSlice.__dict__[method.__name__]
63         for slice_spec in self.plc_spec['slices']:
64             site_spec = self.locate_site (slice_spec['sitename'])
65             test_site = TestSite(self,site_spec)
66             test_slice = TestSlice(self,test_site,slice_spec)
67             if not slice_method(test_slice, self.options):
68                 overall=False
69         return overall
70     # maintain __name__ for ignore_result
71     map_on_slices.__name__ = method.__name__
72     # restore the doc text
73     map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
74     return map_on_slices
75
76 def bonding_redirector(method):
77     bonding_name = method.__name__.replace('bonding_', '')
78     def redirect(self):
79         bonding_method = TestBonding.__dict__[bonding_name]
80         return bonding_method(self.test_bonding)
81     # maintain __name__ for ignore_result
82     redirect.__name__ = method.__name__
83     # restore the doc text
84     redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
85     return redirect
86
87 # run a step but return True so that we can go on
88 def ignore_result(method):
89     def ignoring(self):
90         # ssh_slice_ignore->ssh_slice
91         ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92         ref_method = TestPlc.__dict__[ref_name]
93         result = ref_method(self)
94         print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95         return Ignored(result)
96     name = method.__name__.replace('_ignore', '').replace('force_', '')
97     ignoring.__name__ = name
98     ignoring.__doc__ = "ignored version of " + name
99     return ignoring
100
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106     # could not get this to work with named arguments
107     def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108         self.timeout = timedelta(minutes = timeout_minutes)
109         self.silent = timedelta(minutes = silent_minutes)
110         self.period = timedelta(seconds = period_seconds)
111     def __call__(self, method):
112         decorator_self=self
113         # compute augmented method name
114         method_name = method.__name__ + "__tasks"
115         # locate in TestSlice
116         slice_method = TestSlice.__dict__[ method_name ]
117         def wrappee(self):
118             tasks=[]
119             for slice_spec in self.plc_spec['slices']:
120                 site_spec = self.locate_site (slice_spec['sitename'])
121                 test_site = TestSite(self, site_spec)
122                 test_slice = TestSlice(self, test_site, slice_spec)
123                 tasks += slice_method (test_slice, self.options)
124             return Completer (tasks, message=method.__name__).\
125                 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126         # restore the doc text from the TestSlice method even if a bit odd
127         wrappee.__name__ = method.__name__
128         wrappee.__doc__ = slice_method.__doc__
129         return wrappee
130
131 def auth_sfa_mapper(method):
132     def actual(self):
133         overall = True
134         auth_method = TestAuthSfa.__dict__[method.__name__]
135         for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136             test_auth = TestAuthSfa(self, auth_spec)
137             if not auth_method(test_auth, self.options):
138                 overall=False
139         return overall
140     # restore the doc text
141     actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
142     return actual
143
144 class Ignored:
145     def __init__(self, result):
146         self.result = result
147
148 SEP = '<sep>'
149 SEPSFA = '<sep_sfa>'
150
151 class TestPlc:
152
153     default_steps = [
154         'show', SEP,
155         'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
156         'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157         'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158         'plcapi_urls', 'speed_up_slices', SEP,
159         'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # ss # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # ss # keep this out of the way for now
162 # ss         'check_vsys_defaults_ignore', SEP,
163 # ss # run this first off so it's easier to re-run on another qemu box
164 # ss         'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
165 # ss         'bootcd', 'qemu_local_config', SEP,
166 # ss         'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
167 # ss         'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
168         'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
169         'sfa_start', 'sfa_import', SEPSFA,
170         'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
171         'sfa_register_user@1', 'sfa_update_user@1',
172         'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
173         'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
174         'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
175         'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
176         'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
177         'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
178         'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
179         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
180         # but as the stress test might take a while, we sometimes missed the debug mode..
181 # ss        'probe_kvm_iptables',
182 # ss        'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
183 # ss        'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
184 # ss        'ssh_slice_sfa@1', SEPSFA,
185         'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
186         'sfa_check_slice_plc_empty@1', SEPSFA,
187         'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
188 # ss        'check_system_slice', SEP,
189         # for inspecting the slice while it runs the first time
190         #'fail',
191         # check slices are turned off properly
192 # ss        'debug_nodemanager',
193 # ss        'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
194 # ss        # check they are properly re-created with the same name
195 # ss        'fill_slices', 'ssh_slice_again', SEP,
196         'gather_logs_force', SEP,
197         ]
198     other_steps = [
199         'export', 'show_boxes', 'super_speed_up_slices', SEP,
200         'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
201         'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
202         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
203         'delete_leases', 'list_leases', SEP,
204         'populate', SEP,
205         'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
206         'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
207         'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
208         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
209         'sfa_install_core', 'sfa_install_sfatables',
210         'sfa_install_plc', 'sfa_install_client', SEPSFA,
211         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
212         'sfa_get_expires', SEPSFA,
213         'plc_db_dump', 'plc_db_restore', SEP,
214         'check_netflow', 'check_drl', SEP,
215         # used to be part of default steps but won't work since f27
216         'cross_check_tcp@1',
217         'slice_fs_present', 'check_initscripts', SEP,
218         'standby_1_through_20', 'yes', 'no', SEP,
219         'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
220         ]
221     default_bonding_steps = [
222         'bonding_init_partial',
223         'bonding_add_yum',
224         'bonding_install_rpms', SEP,
225         ]
226
227     @staticmethod
228     def printable_steps(list):
229         single_line = " ".join(list) + " "
230         return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
231     @staticmethod
232     def valid_step(step):
233         return step != SEP and step != SEPSFA
234
235     # turn off the sfa-related steps when build has skipped SFA
236     # this was originally for centos5 but is still valid
237     # for up to f12 as recent SFAs with sqlalchemy won't build before f14
238     @staticmethod
239     def _has_sfa_cached(rpms_url):
240         if os.path.isfile(has_sfa_cache_filename):
241             with open(has_sfa_cache_filename) as cache:
242                 cached = cache.read() == "yes"
243             utils.header("build provides SFA (cached):{}".format(cached))
244             return cached
245         # warning, we're now building 'sface' so let's be a bit more picky
246         # full builds are expected to return with 0 here
247         utils.header("Checking if build provides SFA package...")
248         retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
249         encoded = 'yes' if retcod else 'no'
250         with open(has_sfa_cache_filename,'w') as cache:
251             cache.write(encoded)
252         return retcod
253
254     @staticmethod
255     def check_whether_build_has_sfa(rpms_url):
256         has_sfa = TestPlc._has_sfa_cached(rpms_url)
257         if has_sfa:
258             utils.header("build does provide SFA")
259         else:
260             # move all steps containing 'sfa' from default_steps to other_steps
261             utils.header("SFA package not found - removing steps with sfa or sfi")
262             sfa_steps = [ step for step in TestPlc.default_steps
263                           if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
264             TestPlc.other_steps += sfa_steps
265             for step in sfa_steps:
266                 TestPlc.default_steps.remove(step)
267
268     def __init__(self, plc_spec, options):
269         self.plc_spec = plc_spec
270         self.options = options
271         self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
272         self.vserverip = plc_spec['vserverip']
273         self.vservername = plc_spec['vservername']
274         self.vplchostname = self.vservername.split('-')[-1]
275         self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
276         self.apiserver = TestApiserver(self.url, options.dry_run)
277         (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
278         (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
279
280     def has_addresses_api(self):
281         return self.apiserver.has_method('AddIpAddress')
282
283     def name(self):
284         name = self.plc_spec['name']
285         return "{}.{}".format(name,self.vservername)
286
287     def hostname(self):
288         return self.plc_spec['host_box']
289
290     def is_local(self):
291         return self.test_ssh.is_local()
292
293     # define the API methods on this object through xmlrpc
294     # would help, but not strictly necessary
295     def connect(self):
296         pass
297
298     def actual_command_in_guest(self,command, backslash=False):
299         raw1 = self.host_to_guest(command)
300         raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
301         return raw2
302
303     def start_guest(self):
304       return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
305                                                        dry_run=self.options.dry_run))
306
307     def stop_guest(self):
308       return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
309                                                        dry_run=self.options.dry_run))
310
311     def run_in_guest(self, command, backslash=False):
312         raw = self.actual_command_in_guest(command, backslash)
313         return utils.system(raw)
314
315     def run_in_host(self,command):
316         return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
317
318     # backslashing turned out so awful at some point that I've turned off auto-backslashing
319     # see e.g. plc_start esp. the version for f14
320     #command gets run in the plc's vm
321     def host_to_guest(self, command):
322         ssh_leg = TestSsh(self.vplchostname)
323         return ssh_leg.actual_command(command, keep_stdin=True)
324
325     # this /vservers thing is legacy...
326     def vm_root_in_host(self):
327         return "/vservers/{}/".format(self.vservername)
328
329     def vm_timestamp_path(self):
330         return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
331
332     #start/stop the vserver
333     def start_guest_in_host(self):
334         return "virsh -c lxc:/// start {}".format(self.vservername)
335
336     def stop_guest_in_host(self):
337         return "virsh -c lxc:/// destroy {}".format(self.vservername)
338
339     # xxx quick n dirty
340     def run_in_guest_piped(self,local,remote):
341         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
342                                                                      keep_stdin = True))
343
344     def dnf_check_installed(self, rpms):
345         if isinstance(rpms, list):
346             rpms=" ".join(rpms)
347         return self.run_in_guest("rpm -q {}".format(rpms)) == 0
348
349     # does a yum install in the vs, ignore yum retcod, check with rpm
350     def dnf_install(self, rpms):
351         if isinstance(rpms, list):
352             rpms=" ".join(rpms)
353         yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
354         if yum_mode != 0:
355             self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
356         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
357         # nothing similar with dnf, forget about this for now
358         # self.run_in_guest("yum-complete-transaction -y")
359         return self.dnf_check_installed(rpms)
360
361     def pip3_install(self, package):
362         return self.run_in_guest("pip3 install {}".format(package)) == 0
363
364     def auth_root(self):
365         return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
366                 'AuthMethod' : 'password',
367                 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
368                 'Role'       : self.plc_spec['role'],
369                 }
370
371     def locate_site(self,sitename):
372         for site in self.plc_spec['sites']:
373             if site['site_fields']['name'] == sitename:
374                 return site
375             if site['site_fields']['login_base'] == sitename:
376                 return site
377         raise Exception("Cannot locate site {}".format(sitename))
378
379     def locate_node(self, nodename):
380         for site in self.plc_spec['sites']:
381             for node in site['nodes']:
382                 if node['name'] == nodename:
383                     return site, node
384         raise Exception("Cannot locate node {}".format(nodename))
385
386     def locate_hostname(self, hostname):
387         for site in self.plc_spec['sites']:
388             for node in site['nodes']:
389                 if node['node_fields']['hostname'] == hostname:
390                     return(site, node)
391         raise Exception("Cannot locate hostname {}".format(hostname))
392
393     def locate_key(self, key_name):
394         for key in self.plc_spec['keys']:
395             if key['key_name'] == key_name:
396                 return key
397         raise Exception("Cannot locate key {}".format(key_name))
398
399     def locate_private_key_from_key_names(self, key_names):
400         # locate the first avail. key
401         found = False
402         for key_name in key_names:
403             key_spec = self.locate_key(key_name)
404             test_key = TestKey(self,key_spec)
405             publickey = test_key.publicpath()
406             privatekey = test_key.privatepath()
407             if os.path.isfile(publickey) and os.path.isfile(privatekey):
408                 found = True
409         if found:
410             return privatekey
411         else:
412             return None
413
414     def locate_slice(self, slicename):
415         for slice in self.plc_spec['slices']:
416             if slice['slice_fields']['name'] == slicename:
417                 return slice
418         raise Exception("Cannot locate slice {}".format(slicename))
419
420     def all_sliver_objs(self):
421         result = []
422         for slice_spec in self.plc_spec['slices']:
423             slicename = slice_spec['slice_fields']['name']
424             for nodename in slice_spec['nodenames']:
425                 result.append(self.locate_sliver_obj(nodename, slicename))
426         return result
427
428     def locate_sliver_obj(self, nodename, slicename):
429         site,node = self.locate_node(nodename)
430         slice = self.locate_slice(slicename)
431         # build objects
432         test_site = TestSite(self, site)
433         test_node = TestNode(self, test_site, node)
434         # xxx the slice site is assumed to be the node site - mhh - probably harmless
435         test_slice = TestSlice(self, test_site, slice)
436         return TestSliver(self, test_node, test_slice)
437
438     def locate_first_node(self):
439         nodename = self.plc_spec['slices'][0]['nodenames'][0]
440         site,node = self.locate_node(nodename)
441         test_site = TestSite(self, site)
442         test_node = TestNode(self, test_site, node)
443         return test_node
444
445     def locate_first_sliver(self):
446         slice_spec = self.plc_spec['slices'][0]
447         slicename = slice_spec['slice_fields']['name']
448         nodename = slice_spec['nodenames'][0]
449         return self.locate_sliver_obj(nodename,slicename)
450
451     # all different hostboxes used in this plc
452     def get_BoxNodes(self):
453         # maps on sites and nodes, return [ (host_box,test_node) ]
454         tuples = []
455         for site_spec in self.plc_spec['sites']:
456             test_site = TestSite(self,site_spec)
457             for node_spec in site_spec['nodes']:
458                 test_node = TestNode(self, test_site, node_spec)
459                 if not test_node.is_real():
460                     tuples.append( (test_node.host_box(),test_node) )
461         # transform into a dict { 'host_box' -> [ test_node .. ] }
462         result = {}
463         for (box,node) in tuples:
464             if box not in result:
465                 result[box] = [node]
466             else:
467                 result[box].append(node)
468         return result
469
470     # a step for checking this stuff
471     def show_boxes(self):
472         'print summary of nodes location'
473         for box,nodes in self.get_BoxNodes().items():
474             print(box,":"," + ".join( [ node.name() for node in nodes ] ))
475         return True
476
477     # make this a valid step
478     def qemu_kill_all(self):
479         'kill all qemu instances on the qemu boxes involved by this setup'
480         # this is the brute force version, kill all qemus on that host box
481         for (box,nodes) in self.get_BoxNodes().items():
482             # pass the first nodename, as we don't push template-qemu on testboxes
483             nodedir = nodes[0].nodedir()
484             TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
485         return True
486
487     # make this a valid step
488     def qemu_list_all(self):
489         'list all qemu instances on the qemu boxes involved by this setup'
490         for box,nodes in self.get_BoxNodes().items():
491             # this is the brute force version, kill all qemus on that host box
492             TestBoxQemu(box, self.options.buildname).qemu_list_all()
493         return True
494
495     # kill only the qemus related to this test
496     def qemu_list_mine(self):
497         'list qemu instances for our nodes'
498         for (box,nodes) in self.get_BoxNodes().items():
499             # the fine-grain version
500             for node in nodes:
501                 node.list_qemu()
502         return True
503
504     # kill only the qemus related to this test
505     def qemu_clean_mine(self):
506         'cleanup (rm -rf) qemu instances for our nodes'
507         for box,nodes in self.get_BoxNodes().items():
508             # the fine-grain version
509             for node in nodes:
510                 node.qemu_clean()
511         return True
512
513     # kill only the right qemus
514     def qemu_kill_mine(self):
515         'kill the qemu instances for our nodes'
516         for box,nodes in self.get_BoxNodes().items():
517             # the fine-grain version
518             for node in nodes:
519                 node.kill_qemu()
520         return True
521
522     #################### display config
523     def show(self):
524         "show test configuration after localization"
525         self.show_pass(1)
526         self.show_pass(2)
527         return True
528
529     # uggly hack to make sure 'run export' only reports about the 1st plc
530     # to avoid confusion - also we use 'inri_slice1' in various aliases..
531     exported_id = 1
532     def export(self):
533         "print cut'n paste-able stuff to export env variables to your shell"
534         # guess local domain from hostname
535         if TestPlc.exported_id > 1:
536             print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
537             return True
538         TestPlc.exported_id += 1
539         domain = socket.gethostname().split('.',1)[1]
540         fqdn   = "{}.{}".format(self.plc_spec['host_box'], domain)
541         print("export BUILD={}".format(self.options.buildname))
542         print("export PLCHOSTLXC={}".format(fqdn))
543         print("export GUESTNAME={}".format(self.vservername))
544         print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
545         # find hostname of first node
546         hostname, qemubox = self.all_node_infos()[0]
547         print("export KVMHOST={}.{}".format(qemubox, domain))
548         print("export NODE={}".format(hostname))
549         return True
550
551     # entry point
552     always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
553     def show_pass(self, passno):
554         for (key,val) in self.plc_spec.items():
555             if not self.options.verbose and key not in TestPlc.always_display_keys:
556                 continue
557             if passno == 2:
558                 if key == 'sites':
559                     for site in val:
560                         self.display_site_spec(site)
561                         for node in site['nodes']:
562                             self.display_node_spec(node)
563                 elif key == 'initscripts':
564                     for initscript in val:
565                         self.display_initscript_spec(initscript)
566                 elif key == 'slices':
567                     for slice in val:
568                         self.display_slice_spec(slice)
569                 elif key == 'keys':
570                     for key in val:
571                         self.display_key_spec(key)
572             elif passno == 1:
573                 if key not in ['sites', 'initscripts', 'slices', 'keys']:
574                     print('+   ', key, ':', val)
575
576     def display_site_spec(self, site):
577         print('+ ======== site', site['site_fields']['name'])
578         for k,v in site.items():
579             if not self.options.verbose and k not in TestPlc.always_display_keys:
580                 continue
581             if k == 'nodes':
582                 if v:
583                     print('+       ', 'nodes : ', end=' ')
584                     for node in v:
585                         print(node['node_fields']['hostname'],'', end=' ')
586                     print('')
587             elif k == 'users':
588                 if v:
589                     print('+       users : ', end=' ')
590                     for user in v:
591                         print(user['name'],'', end=' ')
592                     print('')
593             elif k == 'site_fields':
594                 print('+       login_base', ':', v['login_base'])
595             elif k == 'address_fields':
596                 pass
597             else:
598                 print('+       ', end=' ')
599                 utils.pprint(k, v)
600
601     def display_initscript_spec(self, initscript):
602         print('+ ======== initscript', initscript['initscript_fields']['name'])
603
604     def display_key_spec(self, key):
605         print('+ ======== key', key['key_name'])
606
607     def display_slice_spec(self, slice):
608         print('+ ======== slice', slice['slice_fields']['name'])
609         for k,v in slice.items():
610             if k == 'nodenames':
611                 if v:
612                     print('+       nodes : ', end=' ')
613                     for nodename in v:
614                         print(nodename,'', end=' ')
615                     print('')
616             elif k == 'usernames':
617                 if v:
618                     print('+       users : ', end=' ')
619                     for username in v:
620                         print(username,'', end=' ')
621                     print('')
622             elif k == 'slice_fields':
623                 print('+       fields', ':', end=' ')
624                 print('max_nodes=',v['max_nodes'], end=' ')
625                 print('')
626             else:
627                 print('+       ',k,v)
628
629     def display_node_spec(self, node):
630         print("+           node={} host_box={}".format(node['name'], node['host_box']), end=' ')
631         print("hostname=", node['node_fields']['hostname'], end=' ')
632         print("ip=", node['interface_fields']['ip'])
633         if self.options.verbose:
634             utils.pprint("node details", node, depth=3)
635
636     # another entry point for just showing the boxes involved
637     def display_mapping(self):
638         TestPlc.display_mapping_plc(self.plc_spec)
639         return True
640
641     @staticmethod
642     def display_mapping_plc(plc_spec):
643         print('+ MyPLC',plc_spec['name'])
644         # WARNING this would not be right for lxc-based PLC's - should be harmless though
645         print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
646         print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
647         for site_spec in plc_spec['sites']:
648             for node_spec in site_spec['nodes']:
649                 TestPlc.display_mapping_node(node_spec)
650
651     @staticmethod
652     def display_mapping_node(node_spec):
653         print('+   NODE {}'.format(node_spec['name']))
654         print('+\tqemu box {}'.format(node_spec['host_box']))
655         print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
656
657     # write a timestamp in /vservers/<>.timestamp
658     # cannot be inside the vserver, that causes vserver .. build to cough
659     def plcvm_timestamp(self):
660         "Create a timestamp to remember creation date for this plc"
661         now = int(time.time())
662         # TODO-lxc check this one
663         # a first approx. is to store the timestamp close to the VM root like vs does
664         stamp_path = self.vm_timestamp_path()
665         stamp_dir = os.path.dirname(stamp_path)
666         utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
667         return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
668
669     # this is called inconditionnally at the beginning of the test sequence
670     # just in case this is a rerun, so if the vm is not running it's fine
671     def plcvm_delete(self):
672         "vserver delete the test myplc"
673         stamp_path = self.vm_timestamp_path()
674         self.run_in_host("rm -f {}".format(stamp_path))
675         self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
676         self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
677         self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
678         return True
679
680     ### install
681     # historically the build was being fetched by the tests
682     # now the build pushes itself as a subdir of the tests workdir
683     # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
684     def plcvm_create(self):
685         "vserver creation (no install done)"
686         # push the local build/ dir to the testplc box
687         if self.is_local():
688             # a full path for the local calls
689             build_dir = os.path.dirname(sys.argv[0])
690             # sometimes this is empty - set to "." in such a case
691             if not build_dir:
692                 build_dir="."
693             build_dir += "/build"
694         else:
695             # use a standard name - will be relative to remote buildname
696             build_dir = "build"
697             # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
698             self.test_ssh.rmdir(build_dir)
699             self.test_ssh.copy(build_dir, recursive=True)
700         # the repo url is taken from arch-rpms-url
701         # with the last step (i386) removed
702         repo_url = self.options.arch_rpms_url
703         for level in [ 'arch' ]:
704             repo_url = os.path.dirname(repo_url)
705
706         # invoke initvm (drop support for vs)
707         script = "lbuild-initvm.sh"
708         script_options = ""
709         # pass the vbuild-nightly options to [lv]test-initvm
710         script_options += " -p {}".format(self.options.personality)
711         script_options += " -d {}".format(self.options.pldistro)
712         script_options += " -f {}".format(self.options.fcdistro)
713         script_options += " -r {}".format(repo_url)
714         vserver_name = self.vservername
715         try:
716             vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
717             script_options += " -n {}".format(vserver_hostname)
718         except:
719             print("Cannot reverse lookup {}".format(self.vserverip))
720             print("This is considered fatal, as this might pollute the test results")
721             return False
722         create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
723         return self.run_in_host(create_vserver) == 0
724
725     ### install django through pip
726     def django_install(self):
727         # plcapi requires Django, that is no longer provided py fedora as an rpm
728         # so we use pip instead
729         """
730         pip install Django
731         """
732         return self.pip3_install('Django')
733
734     ### install_rpm
735     def plc_install(self):
736         """
737         yum install myplc, noderepo
738         """
739
740         # compute nodefamily
741         if self.options.personality == "linux32":
742             arch = "i386"
743         elif self.options.personality == "linux64":
744             arch = "x86_64"
745         else:
746             raise Exception("Unsupported personality {}".format(self.options.personality))
747         nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
748
749         # check it's possible to install just 'myplc-core' first
750         if not self.dnf_install("myplc-core"):
751             return False
752
753         pkgs_list = []
754         pkgs_list.append("myplc")
755         # pkgs_list.append("slicerepo-{}".format(nodefamily))
756         # pkgs_list.append("noderepo-{}".format(nodefamily))
757         pkgs_string=" ".join(pkgs_list)
758         return self.dnf_install(pkgs_list)
759
760     def install_syslinux6(self):
761         """
762         install syslinux6 from the fedora21 release
763         """
764         key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
765
766         rpms = [
767             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
768             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
769             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
770         ]
771         # this can be done several times
772         self.run_in_guest("rpm --import {key}".format(**locals()))
773         return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
774
775     def bonding_builds(self):
776         """
777         list /etc/yum.repos.d on the myplc side
778         """
779         self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
780         return True
781
782     def bonding_nodes(self):
783         """
784         List nodes known to the myplc together with their nodefamiliy
785         """
786         print("---------------------------------------- nodes")
787         for node in self.apiserver.GetNodes(self.auth_root()):
788             print("{} -> {}".format(node['hostname'],
789                                     self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
790         print("---------------------------------------- nodes")
791
792
793     ###
794     def mod_python(self):
795         """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
796         return self.dnf_install( ['mod_python'] )
797
798     ###
799     def plc_configure(self):
800         "run plc-config-tty"
801         tmpname = '{}.plc-config-tty'.format(self.name())
802         with open(tmpname,'w') as fileconf:
803             for var, value in self.plc_spec['settings'].items():
804                 fileconf.write('e {}\n{}\n'.format(var, value))
805             fileconf.write('w\n')
806             fileconf.write('q\n')
807         utils.system('cat {}'.format(tmpname))
808         self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
809         utils.system('rm {}'.format(tmpname))
810         return True
811
812     # care only about f>=27
813     def start_stop_systemd(self, service, start_or_stop):
814         "utility to start/stop a systemd-defined service (sfa)"
815         return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
816
817     def plc_start(self):
818         "start plc through systemclt"
819         return self.start_stop_systemd('plc', 'start')
820
821     def plc_stop(self):
822         "stop plc through systemctl"
823         return self.start_stop_systemd('plc', 'stop')
824
825     def plcvm_start(self):
826         "start the PLC vserver"
827         self.start_guest()
828         return True
829
830     def plcvm_stop(self):
831         "stop the PLC vserver"
832         self.stop_guest()
833         return True
834
835     # stores the keys from the config for further use
836     def keys_store(self):
837         "stores test users ssh keys in keys/"
838         for key_spec in self.plc_spec['keys']:
839                 TestKey(self,key_spec).store_key()
840         return True
841
842     def keys_clean(self):
843         "removes keys cached in keys/"
844         utils.system("rm -rf ./keys")
845         return True
846
847     # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
848     # for later direct access to the nodes
849     def keys_fetch(self):
850         "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
851         dir="./keys"
852         if not os.path.isdir(dir):
853             os.mkdir(dir)
854         vservername = self.vservername
855         vm_root = self.vm_root_in_host()
856         overall = True
857         prefix = 'debug_ssh_key'
858         for ext in ['pub', 'rsa'] :
859             src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
860             dst = "keys/{vservername}-debug.{ext}".format(**locals())
861             if self.test_ssh.fetch(src, dst) != 0:
862                 overall=False
863         return overall
864
865     def sites(self):
866         "create sites with PLCAPI"
867         return self.do_sites()
868
869     def delete_sites(self):
870         "delete sites with PLCAPI"
871         return self.do_sites(action="delete")
872
873     def do_sites(self, action="add"):
874         for site_spec in self.plc_spec['sites']:
875             test_site = TestSite(self,site_spec)
876             if (action != "add"):
877                 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
878                 test_site.delete_site()
879                 # deleted with the site
880                 #test_site.delete_users()
881                 continue
882             else:
883                 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
884                 test_site.create_site()
885                 test_site.create_users()
886         return True
887
888     def delete_all_sites(self):
889         "Delete all sites in PLC, and related objects"
890         print('auth_root', self.auth_root())
891         sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
892         for site in sites:
893             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
894             if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
895                 continue
896             site_id = site['site_id']
897             print('Deleting site_id', site_id)
898             self.apiserver.DeleteSite(self.auth_root(), site_id)
899         return True
900
901     def nodes(self):
902         "create nodes with PLCAPI"
903         return self.do_nodes()
904     def delete_nodes(self):
905         "delete nodes with PLCAPI"
906         return self.do_nodes(action="delete")
907
908     def do_nodes(self, action="add"):
909         for site_spec in self.plc_spec['sites']:
910             test_site = TestSite(self, site_spec)
911             if action != "add":
912                 utils.header("Deleting nodes in site {}".format(test_site.name()))
913                 for node_spec in site_spec['nodes']:
914                     test_node = TestNode(self, test_site, node_spec)
915                     utils.header("Deleting {}".format(test_node.name()))
916                     test_node.delete_node()
917             else:
918                 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
919                 for node_spec in site_spec['nodes']:
920                     utils.pprint('Creating node {}'.format(node_spec), node_spec)
921                     test_node = TestNode(self, test_site, node_spec)
922                     test_node.create_node()
923         return True
924
925     def nodegroups(self):
926         "create nodegroups with PLCAPI"
927         return self.do_nodegroups("add")
928     def delete_nodegroups(self):
929         "delete nodegroups with PLCAPI"
930         return self.do_nodegroups("delete")
931
932     YEAR = 365*24*3600
933     @staticmethod
934     def translate_timestamp(start, grain, timestamp):
935         if timestamp < TestPlc.YEAR:
936             return start + timestamp*grain
937         else:
938             return timestamp
939
940     @staticmethod
941     def timestamp_printable(timestamp):
942         return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
943
944     def leases(self):
945         "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
946         now = int(time.time())
947         grain = self.apiserver.GetLeaseGranularity(self.auth_root())
948         print('API answered grain=', grain)
949         start = (now//grain)*grain
950         start += grain
951         # find out all nodes that are reservable
952         nodes = self.all_reservable_nodenames()
953         if not nodes:
954             utils.header("No reservable node found - proceeding without leases")
955             return True
956         ok = True
957         # attach them to the leases as specified in plc_specs
958         # this is where the 'leases' field gets interpreted as relative of absolute
959         for lease_spec in self.plc_spec['leases']:
960             # skip the ones that come with a null slice id
961             if not lease_spec['slice']:
962                 continue
963             lease_spec['t_from']  = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
964             lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
965             lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
966                                                       lease_spec['t_from'], lease_spec['t_until'])
967             if lease_addition['errors']:
968                 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
969                 ok = False
970             else:
971                 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
972                              .format(nodes, lease_spec['slice'],
973                                      lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
974                                      lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
975
976         return ok
977
978     def delete_leases(self):
979         "remove all leases in the myplc side"
980         lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
981         utils.header("Cleaning leases {}".format(lease_ids))
982         self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
983         return True
984
985     def list_leases(self):
986         "list all leases known to the myplc"
987         leases = self.apiserver.GetLeases(self.auth_root())
988         now = int(time.time())
989         for l in leases:
990             current = l['t_until'] >= now
991             if self.options.verbose or current:
992                 utils.header("{} {} from {} until {}"\
993                              .format(l['hostname'], l['name'],
994                                      TestPlc.timestamp_printable(l['t_from']),
995                                      TestPlc.timestamp_printable(l['t_until'])))
996         return True
997
998     # create nodegroups if needed, and populate
999     def do_nodegroups(self, action="add"):
1000         # 1st pass to scan contents
1001         groups_dict = {}
1002         for site_spec in self.plc_spec['sites']:
1003             test_site = TestSite(self,site_spec)
1004             for node_spec in site_spec['nodes']:
1005                 test_node = TestNode(self, test_site, node_spec)
1006                 if 'nodegroups' in node_spec:
1007                     nodegroupnames = node_spec['nodegroups']
1008                     if isinstance(nodegroupnames, str):
1009                         nodegroupnames = [ nodegroupnames ]
1010                     for nodegroupname in nodegroupnames:
1011                         if nodegroupname not in groups_dict:
1012                             groups_dict[nodegroupname] = []
1013                         groups_dict[nodegroupname].append(test_node.name())
1014         auth = self.auth_root()
1015         overall = True
1016         for (nodegroupname,group_nodes) in groups_dict.items():
1017             if action == "add":
1018                 print('nodegroups:', 'dealing with nodegroup',\
1019                     nodegroupname, 'on nodes', group_nodes)
1020                 # first, check if the nodetagtype is here
1021                 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1022                 if tag_types:
1023                     tag_type_id = tag_types[0]['tag_type_id']
1024                 else:
1025                     tag_type_id = self.apiserver.AddTagType(auth,
1026                                                             {'tagname' : nodegroupname,
1027                                                              'description' : 'for nodegroup {}'.format(nodegroupname),
1028                                                              'category' : 'test'})
1029                 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1030                 # create nodegroup
1031                 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1032                 if not nodegroups:
1033                     self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1034                     print('created nodegroup', nodegroupname, \
1035                         'from tagname', nodegroupname, 'and value', 'yes')
1036                 # set node tag on all nodes, value='yes'
1037                 for nodename in group_nodes:
1038                     try:
1039                         self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1040                     except:
1041                         traceback.print_exc()
1042                         print('node', nodename, 'seems to already have tag', nodegroupname)
1043                     # check anyway
1044                     try:
1045                         expect_yes = self.apiserver.GetNodeTags(auth,
1046                                                                 {'hostname' : nodename,
1047                                                                  'tagname'  : nodegroupname},
1048                                                                 ['value'])[0]['value']
1049                         if expect_yes != "yes":
1050                             print('Mismatch node tag on node',nodename,'got',expect_yes)
1051                             overall = False
1052                     except:
1053                         if not self.options.dry_run:
1054                             print('Cannot find tag', nodegroupname, 'on node', nodename)
1055                             overall = False
1056             else:
1057                 try:
1058                     print('cleaning nodegroup', nodegroupname)
1059                     self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1060                 except:
1061                     traceback.print_exc()
1062                     overall = False
1063         return overall
1064
1065     # a list of TestNode objs
1066     def all_nodes(self):
1067         nodes=[]
1068         for site_spec in self.plc_spec['sites']:
1069             test_site = TestSite(self,site_spec)
1070             for node_spec in site_spec['nodes']:
1071                 nodes.append(TestNode(self, test_site, node_spec))
1072         return nodes
1073
1074     # return a list of tuples (nodename,qemuname)
1075     def all_node_infos(self) :
1076         node_infos = []
1077         for site_spec in self.plc_spec['sites']:
1078             node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1079                                 for node_spec in site_spec['nodes'] ]
1080         return node_infos
1081
1082     def all_nodenames(self):
1083         return [ x[0] for x in self.all_node_infos() ]
1084     def all_reservable_nodenames(self):
1085         res = []
1086         for site_spec in self.plc_spec['sites']:
1087             for node_spec in site_spec['nodes']:
1088                 node_fields = node_spec['node_fields']
1089                 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1090                     res.append(node_fields['hostname'])
1091         return res
1092
1093     # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1094     def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1095                                silent_minutes, period_seconds = 15):
1096         if self.options.dry_run:
1097             print('dry_run')
1098             return True
1099
1100         class CompleterTaskBootState(CompleterTask):
1101             def __init__(self, test_plc, hostname):
1102                 self.test_plc = test_plc
1103                 self.hostname = hostname
1104                 self.last_boot_state = 'undef'
1105             def actual_run(self):
1106                 try:
1107                     node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1108                                                             [ self.hostname ],
1109                                                             ['boot_state'])[0]
1110                     self.last_boot_state = node['boot_state']
1111                     return self.last_boot_state == target_boot_state
1112                 except:
1113                     return False
1114             def message(self):
1115                 return "CompleterTaskBootState with node {}".format(self.hostname)
1116             def failure_epilogue(self):
1117                 print("node {} in state {} - expected {}"\
1118                     .format(self.hostname, self.last_boot_state, target_boot_state))
1119
1120         timeout = timedelta(minutes=timeout_minutes)
1121         graceout = timedelta(minutes=silent_minutes)
1122         period   = timedelta(seconds=period_seconds)
1123         # the nodes that haven't checked yet - start with a full list and shrink over time
1124         utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1125         tasks = [ CompleterTaskBootState(self,hostname) \
1126                       for (hostname,_) in self.all_node_infos() ]
1127         message = 'check_boot_state={}'.format(target_boot_state)
1128         return Completer(tasks, message=message).run(timeout, graceout, period)
1129
1130     def nodes_booted(self):
1131         return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1132
1133     def probe_kvm_iptables(self):
1134         (_,kvmbox) = self.all_node_infos()[0]
1135         TestSsh(kvmbox).run("iptables-save")
1136         return True
1137
1138     # probing nodes
1139     def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1140         class CompleterTaskPingNode(CompleterTask):
1141             def __init__(self, hostname):
1142                 self.hostname = hostname
1143             def run(self, silent):
1144                 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1145                 return utils.system(command, silent=silent) == 0
1146             def failure_epilogue(self):
1147                 print("Cannot ping node with name {}".format(self.hostname))
1148         timeout = timedelta(seconds = timeout_seconds)
1149         graceout = timeout
1150         period = timedelta(seconds = period_seconds)
1151         node_infos = self.all_node_infos()
1152         tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1153         return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1154
1155     # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1156     def ping_node(self):
1157         "Ping nodes"
1158         return self.check_nodes_ping()
1159
1160     def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1161         # various delays
1162         timeout  = timedelta(minutes=timeout_minutes)
1163         graceout = timedelta(minutes=silent_minutes)
1164         period   = timedelta(seconds=period_seconds)
1165         vservername = self.vservername
1166         if debug:
1167             message = "debug"
1168             completer_message = 'ssh_node_debug'
1169             local_key = "keys/{vservername}-debug.rsa".format(**locals())
1170         else:
1171             message = "boot"
1172             completer_message = 'ssh_node_boot'
1173             local_key = "keys/key_admin.rsa"
1174         utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1175         node_infos = self.all_node_infos()
1176         tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1177                                         boot_state=message, dry_run=self.options.dry_run) \
1178                       for (nodename, qemuname) in node_infos ]
1179         return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1180
1181     def ssh_node_debug(self):
1182         "Tries to ssh into nodes in debug mode with the debug ssh key"
1183         return self.check_nodes_ssh(debug = True,
1184                                     timeout_minutes = self.ssh_node_debug_timeout,
1185                                     silent_minutes = self.ssh_node_debug_silent)
1186
1187     def ssh_node_boot(self):
1188         "Tries to ssh into nodes in production mode with the root ssh key"
1189         return self.check_nodes_ssh(debug = False,
1190                                     timeout_minutes = self.ssh_node_boot_timeout,
1191                                     silent_minutes = self.ssh_node_boot_silent)
1192
1193     def node_bmlogs(self):
1194         "Checks that there's a non-empty dir. /var/log/bm/raw"
1195         return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1196
1197     @node_mapper
1198     def qemu_local_init(self): pass
1199     @node_mapper
1200     def bootcd(self): pass
1201     @node_mapper
1202     def qemu_local_config(self): pass
1203     @node_mapper
1204     def qemu_export(self): pass
1205     @node_mapper
1206     def qemu_cleanlog(self): pass
1207     @node_mapper
1208     def nodestate_reinstall(self): pass
1209     @node_mapper
1210     def nodestate_upgrade(self): pass
1211     @node_mapper
1212     def nodestate_safeboot(self): pass
1213     @node_mapper
1214     def nodestate_boot(self): pass
1215     @node_mapper
1216     def nodestate_show(self): pass
1217     @node_mapper
1218     def nodedistro_f14(self): pass
1219     @node_mapper
1220     def nodedistro_f18(self): pass
1221     @node_mapper
1222     def nodedistro_f20(self): pass
1223     @node_mapper
1224     def nodedistro_f21(self): pass
1225     @node_mapper
1226     def nodedistro_f22(self): pass
1227     @node_mapper
1228     def nodedistro_show(self): pass
1229
1230     ### check hooks : invoke scripts from hooks/{node,slice}
1231     def check_hooks_node(self):
1232         return self.locate_first_node().check_hooks()
1233     def check_hooks_sliver(self) :
1234         return self.locate_first_sliver().check_hooks()
1235
1236     def check_hooks(self):
1237         "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1238         return self.check_hooks_node() and self.check_hooks_sliver()
1239
1240     ### initscripts
1241     def do_check_initscripts(self):
1242         class CompleterTaskInitscript(CompleterTask):
1243             def __init__(self, test_sliver, stamp):
1244                 self.test_sliver = test_sliver
1245                 self.stamp = stamp
1246             def actual_run(self):
1247                 return self.test_sliver.check_initscript_stamp(self.stamp)
1248             def message(self):
1249                 return "initscript checker for {}".format(self.test_sliver.name())
1250             def failure_epilogue(self):
1251                 print("initscript stamp {} not found in sliver {}"\
1252                     .format(self.stamp, self.test_sliver.name()))
1253
1254         tasks = []
1255         for slice_spec in self.plc_spec['slices']:
1256             if 'initscriptstamp' not in slice_spec:
1257                 continue
1258             stamp = slice_spec['initscriptstamp']
1259             slicename = slice_spec['slice_fields']['name']
1260             for nodename in slice_spec['nodenames']:
1261                 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1262                 site,node = self.locate_node(nodename)
1263                 # xxx - passing the wrong site - probably harmless
1264                 test_site = TestSite(self, site)
1265                 test_slice = TestSlice(self, test_site, slice_spec)
1266                 test_node = TestNode(self, test_site, node)
1267                 test_sliver = TestSliver(self, test_node, test_slice)
1268                 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1269         return Completer(tasks, message='check_initscripts').\
1270             run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1271
1272     def check_initscripts(self):
1273         "check that the initscripts have triggered"
1274         return self.do_check_initscripts()
1275
1276     def initscripts(self):
1277         "create initscripts with PLCAPI"
1278         for initscript in self.plc_spec['initscripts']:
1279             utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1280             self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1281         return True
1282
1283     def delete_initscripts(self):
1284         "delete initscripts with PLCAPI"
1285         for initscript in self.plc_spec['initscripts']:
1286             initscript_name = initscript['initscript_fields']['name']
1287             print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1288             try:
1289                 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1290                 print(initscript_name, 'deleted')
1291             except:
1292                 print('deletion went wrong - probably did not exist')
1293         return True
1294
1295     ### manage slices
1296     def slices(self):
1297         "create slices with PLCAPI"
1298         return self.do_slices(action="add")
1299
1300     def delete_slices(self):
1301         "delete slices with PLCAPI"
1302         return self.do_slices(action="delete")
1303
1304     def fill_slices(self):
1305         "add nodes in slices with PLCAPI"
1306         return self.do_slices(action="fill")
1307
1308     def empty_slices(self):
1309         "remove nodes from slices with PLCAPI"
1310         return self.do_slices(action="empty")
1311
1312     def do_slices(self,  action="add"):
1313         for slice in self.plc_spec['slices']:
1314             site_spec = self.locate_site(slice['sitename'])
1315             test_site = TestSite(self,site_spec)
1316             test_slice=TestSlice(self,test_site,slice)
1317             if action == "delete":
1318                 test_slice.delete_slice()
1319             elif action == "fill":
1320                 test_slice.add_nodes()
1321             elif action == "empty":
1322                 test_slice.delete_nodes()
1323             else:
1324                 test_slice.create_slice()
1325         return True
1326
1327     @slice_mapper__tasks(20, 10, 15)
1328     def ssh_slice(self): pass
1329     @slice_mapper__tasks(20, 19, 15)
1330     def ssh_slice_off(self): pass
1331     @slice_mapper__tasks(1, 1, 15)
1332     def slice_fs_present(self): pass
1333     @slice_mapper__tasks(1, 1, 15)
1334     def slice_fs_deleted(self): pass
1335
1336     # use another name so we can exclude/ignore it from the tests on the nightly command line
1337     def ssh_slice_again(self): return self.ssh_slice()
1338     # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1339     # but for some reason the ignore-wrapping thing would not
1340
1341     @slice_mapper
1342     def ssh_slice_basics(self): pass
1343     @slice_mapper
1344     def check_vsys_defaults(self): pass
1345
1346     @node_mapper
1347     def keys_clear_known_hosts(self): pass
1348
1349     def plcapi_urls(self):
1350         """
1351         attempts to reach the PLCAPI with various forms for the URL
1352         """
1353         return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1354
1355     def speed_up_slices(self):
1356         "tweak nodemanager cycle (wait time) to 30+/-10 s"
1357         return self._speed_up_slices (30, 10)
1358     def super_speed_up_slices(self):
1359         "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1360         return self._speed_up_slices(5, 1)
1361
1362     def _speed_up_slices(self, p, r):
1363         # create the template on the server-side
1364         template = "{}.nodemanager".format(self.name())
1365         with open(template,"w") as template_file:
1366             template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1367         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1368         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1369         self.test_ssh.copy_abs(template, remote)
1370         # Add a conf file
1371         if not self.apiserver.GetConfFiles(self.auth_root(),
1372                                            {'dest' : '/etc/sysconfig/nodemanager'}):
1373             self.apiserver.AddConfFile(self.auth_root(),
1374                                         {'dest' : '/etc/sysconfig/nodemanager',
1375                                          'source' : 'PlanetLabConf/nodemanager',
1376                                          'postinstall_cmd' : 'service nm restart',})
1377         return True
1378
1379     def debug_nodemanager(self):
1380         "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1381         template = "{}.nodemanager".format(self.name())
1382         with open(template,"w") as template_file:
1383             template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1384         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1385         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1386         self.test_ssh.copy_abs(template, remote)
1387         return True
1388
1389     @node_mapper
1390     def qemu_start(self) : pass
1391
1392     @node_mapper
1393     def qemu_timestamp(self) : pass
1394
1395     @node_mapper
1396     def qemu_nodefamily(self): pass
1397
1398     # when a spec refers to a node possibly on another plc
1399     def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1400         for plc in [ self ] + other_plcs:
1401             try:
1402                 return plc.locate_sliver_obj(nodename, slicename)
1403             except:
1404                 pass
1405         raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1406
1407     # implement this one as a cross step so that we can take advantage of different nodes
1408     # in multi-plcs mode
1409     def cross_check_tcp(self, other_plcs):
1410         "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1411         if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1412             utils.header("check_tcp: no/empty config found")
1413             return True
1414         specs = self.plc_spec['tcp_specs']
1415         overall = True
1416
1417         # first wait for the network to be up and ready from the slices
1418         class CompleterTaskNetworkReadyInSliver(CompleterTask):
1419             def __init__(self, test_sliver):
1420                 self.test_sliver = test_sliver
1421             def actual_run(self):
1422                 return self.test_sliver.check_tcp_ready(port = 9999)
1423             def message(self):
1424                 return "network ready checker for {}".format(self.test_sliver.name())
1425             def failure_epilogue(self):
1426                 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1427
1428         sliver_specs = {}
1429         tasks = []
1430         managed_sliver_names = set()
1431         for spec in specs:
1432             # locate the TestSliver instances involved, and cache them in the spec instance
1433             spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1434             spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1435             message = "Will check TCP between s={} and c={}"\
1436                       .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1437             if 'client_connect' in spec:
1438                 message += " (using {})".format(spec['client_connect'])
1439             utils.header(message)
1440             # we need to check network presence in both slivers, but also
1441             # avoid to insert a sliver several times
1442             for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1443                 if sliver.name() not in managed_sliver_names:
1444                     tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1445                     # add this sliver's name in the set
1446                     managed_sliver_names .update( {sliver.name()} )
1447
1448         # wait for the netork to be OK in all server sides
1449         if not Completer(tasks, message='check for network readiness in slivers').\
1450            run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1451             return False
1452
1453         # run server and client
1454         for spec in specs:
1455             port = spec['port']
1456             # server side
1457             # the issue here is that we have the server run in background
1458             # and so we have no clue if it took off properly or not
1459             # looks like in some cases it does not
1460             address = spec['s_sliver'].test_node.name()
1461             if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1462                 overall = False
1463                 break
1464
1465             # idem for the client side
1466             # use nodename from located sliver, unless 'client_connect' is set
1467             if 'client_connect' in spec:
1468                 destination = spec['client_connect']
1469             else:
1470                 destination = spec['s_sliver'].test_node.name()
1471             if not spec['c_sliver'].run_tcp_client(destination, port):
1472                 overall = False
1473         return overall
1474
1475     # painfully enough, we need to allow for some time as netflow might show up last
1476     def check_system_slice(self):
1477         "all nodes: check that a system slice is alive"
1478         # netflow currently not working in the lxc distro
1479         # drl not built at all in the wtx distro
1480         # if we find either of them we're happy
1481         return self.check_netflow() or self.check_drl()
1482
1483     # expose these
1484     def check_netflow(self): return self._check_system_slice('netflow')
1485     def check_drl(self): return self._check_system_slice('drl')
1486
1487     # we have the slices up already here, so it should not take too long
1488     def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1489         class CompleterTaskSystemSlice(CompleterTask):
1490             def __init__(self, test_node, dry_run):
1491                 self.test_node = test_node
1492                 self.dry_run = dry_run
1493             def actual_run(self):
1494                 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1495             def message(self):
1496                 return "System slice {} @ {}".format(slicename, self.test_node.name())
1497             def failure_epilogue(self):
1498                 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1499         timeout = timedelta(minutes=timeout_minutes)
1500         silent  = timedelta(0)
1501         period  = timedelta(seconds=period_seconds)
1502         tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1503                       for test_node in self.all_nodes() ]
1504         return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1505
1506     def plcsh_stress_test(self):
1507         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1508         # install the stress-test in the plc image
1509         location = "/usr/share/plc_api/plcsh_stress_test.py"
1510         remote = "{}/{}".format(self.vm_root_in_host(), location)
1511         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1512         command = location
1513         command += " -- --check"
1514         if self.options.size == 1:
1515             command +=  " --tiny"
1516         return self.run_in_guest(command) == 0
1517
1518     # populate runs the same utility without slightly different options
1519     # in particular runs with --preserve (dont cleanup) and without --check
1520     # also it gets run twice, once with the --foreign option for creating fake foreign entries
1521
1522     def install_pip2(self):
1523
1524         # xxx could make sense to mirror this one
1525
1526         replacements = [
1527             "https://acc.dl.osdn.jp/storage/g/u/un/unitedrpms/32/x86_64/python2-pip-19.1.1-7.fc32.noarch.rpm",
1528         ]
1529
1530         return (
1531                self.run_in_guest("pip2 --version") == 0
1532             or self.run_in_guest("dnf install python2-pip") == 0
1533             or self.run_in_guest("dnf localinstall -y " + " ".join(replacements)) == 0)
1534
1535
1536     def install_m2crypto(self):
1537
1538         # installing m2crypto for python2 is increasingly difficult
1539         # f29 and f31: dnf install python2-m2crypto
1540         # f33: no longer available but the f31 repos below do the job just fine
1541         # note that using pip2 does not look like a viable option because it does
1542         # an install from sources and that's quite awkward
1543
1544         replacements = [
1545             "http://mirror.onelab.eu/fedora/releases/31/Everything/x86_64/os/Packages/p/python2-typing-3.6.2-5.fc31.noarch.rpm",
1546             "http://mirror.onelab.eu/fedora/releases/31/Everything/x86_64/os/Packages/p/python2-m2crypto-0.35.2-2.fc31.x86_64.rpm",
1547         ]
1548
1549         return (
1550                self.run_in_guest('python2 -c "import M2Crypto"', backslash=True) == 0
1551             or self.run_in_guest("pip2 install python2-m2crypto") == 0
1552             or self.run_in_guest("dnf localinstall -y " + " ".join(replacements)) == 0)
1553
1554         # about pip2:
1555         # we can try and use
1556         # that qould then need to be mirrored
1557         # so the logic goes like this
1558         # check for pip2 command
1559         # if not, try dnf install python2-pip
1560         # if still not, dnf localinstall the above
1561
1562
1563     def sfa_install_all(self):
1564         "yum install sfa sfa-plc sfa-sfatables sfa-client"
1565
1566         # the rpm/dnf packages named in python2-* are getting deprecated
1567         # we use pip2 instead
1568         # but that's not good for m2crypto
1569
1570         pip_dependencies = [
1571             'sqlalchemy-migrate',
1572             'lxml',
1573             'python-dateutil',
1574             'psycopg2-binary',
1575             'pyOpenSSL',
1576         ]
1577
1578         return (
1579                     self.install_pip2()
1580                 and self.install_m2crypto()
1581                 and all((self.run_in_guest(f"pip2 install {dep}") == 0)
1582                         for dep in pip_dependencies)
1583                 and self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client")
1584                 and self.run_in_guest("systemctl enable sfa-registry")==0
1585                 and self.run_in_guest("systemctl enable sfa-aggregate")==0)
1586
1587     def sfa_install_core(self):
1588         "yum install sfa"
1589         return self.dnf_install("sfa")
1590
1591     def sfa_install_plc(self):
1592         "yum install sfa-plc"
1593         return self.dnf_install("sfa-plc")
1594
1595     def sfa_install_sfatables(self):
1596         "yum install sfa-sfatables"
1597         return self.dnf_install("sfa-sfatables")
1598
1599     # for some very odd reason, this sometimes fails with the following symptom
1600     # # yum install sfa-client
1601     # Setting up Install Process
1602     # ...
1603     # Downloading Packages:
1604     # Running rpm_check_debug
1605     # Running Transaction Test
1606     # Transaction Test Succeeded
1607     # Running Transaction
1608     # Transaction couldn't start:
1609     # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1610     # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1611     # even though in the same context I have
1612     # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1613     # Filesystem            Size  Used Avail Use% Mounted on
1614     # /dev/hdv1             806G  264G  501G  35% /
1615     # none                   16M   36K   16M   1% /tmp
1616     #
1617     # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1618     def sfa_install_client(self):
1619         "yum install sfa-client"
1620         first_try = self.dnf_install("sfa-client")
1621         if first_try:
1622             return True
1623         utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1624         code, cached_rpm_path = \
1625                 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1626         utils.header("rpm_path=<<{}>>".format(rpm_path))
1627         # just for checking
1628         self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1629         return self.dnf_check_installed("sfa-client")
1630
1631     def sfa_dbclean(self):
1632         "thoroughly wipes off the SFA database"
1633         return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1634             self.run_in_guest("sfa-nuke.py") == 0 or \
1635             self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1636             self.run_in_guest("sfaadmin registry nuke") == 0
1637
1638     def sfa_fsclean(self):
1639         "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1640         self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1641         return True
1642
1643     def sfa_plcclean(self):
1644         "cleans the PLC entries that were created as a side effect of running the script"
1645         # ignore result
1646         sfa_spec = self.plc_spec['sfa']
1647
1648         for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1649             login_base = auth_sfa_spec['login_base']
1650             try:
1651                 self.apiserver.DeleteSite(self.auth_root(),login_base)
1652             except:
1653                 print("Site {} already absent from PLC db".format(login_base))
1654
1655             for spec_name in ['pi_spec', 'user_spec']:
1656                 user_spec = auth_sfa_spec[spec_name]
1657                 username = user_spec['email']
1658                 try:
1659                     self.apiserver.DeletePerson(self.auth_root(),username)
1660                 except:
1661                     # this in fact is expected as sites delete their members
1662                     #print "User {} already absent from PLC db".format(username)
1663                     pass
1664
1665         print("REMEMBER TO RUN sfa_import AGAIN")
1666         return True
1667
1668     def sfa_uninstall(self):
1669         "uses rpm to uninstall sfa - ignore result"
1670         self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1671         self.run_in_guest("rm -rf /var/lib/sfa")
1672         self.run_in_guest("rm -rf /etc/sfa")
1673         self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1674         # xxx tmp
1675         self.run_in_guest("rpm -e --noscripts sfa-plc")
1676         return True
1677
1678     ### run unit tests for SFA
1679     # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1680     # Running Transaction
1681     # Transaction couldn't start:
1682     # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1683     # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1684     # no matter how many Gbs are available on the testplc
1685     # could not figure out what's wrong, so...
1686     # if the yum install phase fails, consider the test is successful
1687     # other combinations will eventually run it hopefully
1688     def sfa_utest(self):
1689         "dnf install sfa-tests and run SFA unittests"
1690         self.run_in_guest("dnf -y install sfa-tests")
1691         # failed to install - forget it
1692         if self.run_in_guest("rpm -q sfa-tests") != 0:
1693             utils.header("WARNING: SFA unit tests failed to install, ignoring")
1694             return True
1695         return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1696
1697     ###
1698     def confdir(self):
1699         dirname = "conf.{}".format(self.plc_spec['name'])
1700         if not os.path.isdir(dirname):
1701             utils.system("mkdir -p {}".format(dirname))
1702         if not os.path.isdir(dirname):
1703             raise Exception("Cannot create config dir for plc {}".format(self.name()))
1704         return dirname
1705
1706     def conffile(self, filename):
1707         return "{}/{}".format(self.confdir(), filename)
1708     def confsubdir(self, dirname, clean, dry_run=False):
1709         subdirname = "{}/{}".format(self.confdir(), dirname)
1710         if clean:
1711             utils.system("rm -rf {}".format(subdirname))
1712         if not os.path.isdir(subdirname):
1713             utils.system("mkdir -p {}".format(subdirname))
1714         if not dry_run and not os.path.isdir(subdirname):
1715             raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1716         return subdirname
1717
1718     def conffile_clean(self, filename):
1719         filename=self.conffile(filename)
1720         return utils.system("rm -rf {}".format(filename))==0
1721
1722     ###
1723     def sfa_configure(self):
1724         "run sfa-config-tty"
1725         tmpname = self.conffile("sfa-config-tty")
1726         with open(tmpname,'w') as fileconf:
1727             for var, value in self.plc_spec['sfa']['settings'].items():
1728                 fileconf.write('e {}\n{}\n'.format(var, value))
1729             fileconf.write('w\n')
1730             fileconf.write('R\n')
1731             fileconf.write('q\n')
1732         utils.system('cat {}'.format(tmpname))
1733         self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1734         return True
1735
1736     def aggregate_xml_line(self):
1737         port = self.plc_spec['sfa']['neighbours-port']
1738         return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1739             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1740
1741     def registry_xml_line(self):
1742         return '<registry addr="{}" hrn="{}" port="12345"/>'\
1743             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1744
1745
1746     # a cross step that takes all other plcs in argument
1747     def cross_sfa_configure(self, other_plcs):
1748         "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1749         # of course with a single plc, other_plcs is an empty list
1750         if not other_plcs:
1751             return True
1752         agg_fname = self.conffile("agg.xml")
1753         with open(agg_fname,"w") as out:
1754             out.write("<aggregates>{}</aggregates>\n"\
1755                       .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1756         utils.header("(Over)wrote {}".format(agg_fname))
1757         reg_fname=self.conffile("reg.xml")
1758         with open(reg_fname,"w") as out:
1759             out.write("<registries>{}</registries>\n"\
1760                       .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1761         utils.header("(Over)wrote {}".format(reg_fname))
1762         return self.test_ssh.copy_abs(agg_fname,
1763                                       '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1764            and self.test_ssh.copy_abs(reg_fname,
1765                                       '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1766
1767     def sfa_import(self):
1768         "use sfaadmin to import from plc"
1769         auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1770         return self.run_in_guest('sfaadmin reg import_registry') == 0
1771
1772     def sfa_start(self):
1773         "start SFA through systemctl - also install dependencies"
1774
1775         return (self.start_stop_systemd('sfa-registry', 'start')
1776             and self.start_stop_systemd('sfa-aggregate', 'start'))
1777
1778
1779     def sfi_configure(self):
1780         "Create /root/sfi on the plc side for sfi client configuration"
1781         if self.options.dry_run:
1782             utils.header("DRY RUN - skipping step")
1783             return True
1784         sfa_spec = self.plc_spec['sfa']
1785         # cannot use auth_sfa_mapper to pass dir_name
1786         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1787             test_slice = TestAuthSfa(self, slice_spec)
1788             dir_basename = os.path.basename(test_slice.sfi_path())
1789             dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1790                                        clean=True, dry_run=self.options.dry_run)
1791             test_slice.sfi_configure(dir_name)
1792             # push into the remote /root/sfi area
1793             location = test_slice.sfi_path()
1794             remote = "{}/{}".format(self.vm_root_in_host(), location)
1795             self.test_ssh.mkdir(remote, abs=True)
1796             # need to strip last level or remote otherwise we get an extra dir level
1797             self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1798
1799         return True
1800
1801     def sfi_clean(self):
1802         "clean up /root/sfi on the plc side"
1803         self.run_in_guest("rm -rf /root/sfi")
1804         return True
1805
1806     def sfa_rspec_empty(self):
1807         "expose a static empty rspec (ships with the tests module) in the sfi directory"
1808         filename = "empty-rspec.xml"
1809         overall = True
1810         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1811             test_slice = TestAuthSfa(self, slice_spec)
1812             in_vm = test_slice.sfi_path()
1813             remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1814             if self.test_ssh.copy_abs(filename, remote) !=0:
1815                 overall = False
1816         return overall
1817
1818     @auth_sfa_mapper
1819     def sfa_register_site(self): pass
1820     @auth_sfa_mapper
1821     def sfa_register_pi(self): pass
1822     @auth_sfa_mapper
1823     def sfa_register_user(self): pass
1824     @auth_sfa_mapper
1825     def sfa_update_user(self): pass
1826     @auth_sfa_mapper
1827     def sfa_register_slice(self): pass
1828     @auth_sfa_mapper
1829     def sfa_renew_slice(self): pass
1830     @auth_sfa_mapper
1831     def sfa_get_expires(self): pass
1832     @auth_sfa_mapper
1833     def sfa_discover(self): pass
1834     @auth_sfa_mapper
1835     def sfa_rspec(self): pass
1836     @auth_sfa_mapper
1837     def sfa_allocate(self): pass
1838     @auth_sfa_mapper
1839     def sfa_allocate_empty(self): pass
1840     @auth_sfa_mapper
1841     def sfa_provision(self): pass
1842     @auth_sfa_mapper
1843     def sfa_provision_empty(self): pass
1844     @auth_sfa_mapper
1845     def sfa_describe(self): pass
1846     @auth_sfa_mapper
1847     def sfa_check_slice_plc(self): pass
1848     @auth_sfa_mapper
1849     def sfa_check_slice_plc_empty(self): pass
1850     @auth_sfa_mapper
1851     def sfa_update_slice(self): pass
1852     @auth_sfa_mapper
1853     def sfa_remove_user_from_slice(self): pass
1854     @auth_sfa_mapper
1855     def sfa_insert_user_in_slice(self): pass
1856     @auth_sfa_mapper
1857     def sfi_list(self): pass
1858     @auth_sfa_mapper
1859     def sfi_show_site(self): pass
1860     @auth_sfa_mapper
1861     def sfi_show_slice(self): pass
1862     @auth_sfa_mapper
1863     def sfi_show_slice_researchers(self): pass
1864     @auth_sfa_mapper
1865     def ssh_slice_sfa(self): pass
1866     @auth_sfa_mapper
1867     def sfa_delete_user(self): pass
1868     @auth_sfa_mapper
1869     def sfa_delete_slice(self): pass
1870
1871     def sfa_stop(self):
1872         "stop sfa through systemclt"
1873         return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1874                 self.start_stop_systemd('sfa-registry', 'stop'))
1875
1876     def populate(self):
1877         "creates random entries in the PLCAPI"
1878         # install the stress-test in the plc image
1879         location = "/usr/share/plc_api/plcsh_stress_test.py"
1880         remote = "{}/{}".format(self.vm_root_in_host(), location)
1881         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1882         command = location
1883         command += " -- --preserve --short-names"
1884         local = (self.run_in_guest(command) == 0);
1885         # second run with --foreign
1886         command += ' --foreign'
1887         remote = (self.run_in_guest(command) == 0);
1888         return local and remote
1889
1890
1891     ####################
1892     @bonding_redirector
1893     def bonding_init_partial(self): pass
1894
1895     @bonding_redirector
1896     def bonding_add_yum(self): pass
1897
1898     @bonding_redirector
1899     def bonding_install_rpms(self): pass
1900
1901     ####################
1902
1903     def gather_logs(self):
1904         "gets all possible logs from plc's/qemu node's/slice's for future reference"
1905         # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1906         # (1.b) get the plc's  /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1907         # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1908         # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1909         # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1910         # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1911         # (1.a)
1912         print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1913         self.gather_var_logs()
1914         # (1.b)
1915         print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1916         self.gather_pgsql_logs()
1917         # (1.c)
1918         print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1919         self.gather_root_sfi()
1920         # (2)
1921         print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1922         for site_spec in self.plc_spec['sites']:
1923             test_site = TestSite(self,site_spec)
1924             for node_spec in site_spec['nodes']:
1925                 test_node = TestNode(self, test_site, node_spec)
1926                 test_node.gather_qemu_logs()
1927         # (3)
1928         print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1929         self.gather_nodes_var_logs()
1930         # (4)
1931         print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1932         self.gather_slivers_var_logs()
1933         return True
1934
1935     def gather_slivers_var_logs(self):
1936         for test_sliver in self.all_sliver_objs():
1937             remote = test_sliver.tar_var_logs()
1938             utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1939             command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1940             utils.system(command)
1941         return True
1942
1943     def gather_var_logs(self):
1944         utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1945         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1946         command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1947         utils.system(command)
1948         command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1949         utils.system(command)
1950
1951     def gather_pgsql_logs(self):
1952         utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1953         to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1954         command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1955         utils.system(command)
1956
1957     def gather_root_sfi(self):
1958         utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1959         to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1960         command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1961         utils.system(command)
1962
1963     def gather_nodes_var_logs(self):
1964         for site_spec in self.plc_spec['sites']:
1965             test_site = TestSite(self, site_spec)
1966             for node_spec in site_spec['nodes']:
1967                 test_node = TestNode(self, test_site, node_spec)
1968                 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1969                 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1970                 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1971                 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1972                 utils.system(command)
1973
1974
1975     # returns the filename to use for sql dump/restore, using options.dbname if set
1976     def dbfile(self, database):
1977         # uses options.dbname if it is found
1978         try:
1979             name = self.options.dbname
1980             if not isinstance(name, str):
1981                 raise Exception
1982         except:
1983             t = datetime.now()
1984             d = t.date()
1985             name = str(d)
1986         return "/root/{}-{}.sql".format(database, name)
1987
1988     def plc_db_dump(self):
1989         'dump the planetlab5 DB in /root in the PLC - filename has time'
1990         dump=self.dbfile("planetab5")
1991         self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1992         utils.header('Dumped planetlab5 database in {}'.format(dump))
1993         return True
1994
1995     def plc_db_restore(self):
1996         'restore the planetlab5 DB - looks broken, but run -n might help'
1997         dump = self.dbfile("planetab5")
1998         self.run_in_guest('systemctl stop httpd')
1999         # xxx - need another wrapper
2000         self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
2001         self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
2002         self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
2003         ##starting httpd service
2004         self.run_in_guest('systemctl start httpd')
2005
2006         utils.header('Database restored from ' + dump)
2007
2008     @staticmethod
2009     def create_ignore_steps():
2010         for step in TestPlc.default_steps + TestPlc.other_steps:
2011             # default step can have a plc qualifier
2012             if '@' in step:
2013                 step, qualifier = step.split('@')
2014             # or be defined as forced or ignored by default
2015             for keyword in ['_ignore', '_force']:
2016                 if step.endswith(keyword):
2017                     step=step.replace(keyword,'')
2018             if step == SEP or step == SEPSFA :
2019                 continue
2020             method = getattr(TestPlc,step)
2021             name = step + '_ignore'
2022             wrapped = ignore_result(method)
2023 #            wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
2024             setattr(TestPlc, name, wrapped)
2025
2026 #    @ignore_result
2027 #    def ssh_slice_again_ignore (self): pass
2028 #    @ignore_result
2029 #    def check_initscripts_ignore (self): pass
2030
2031     def standby_1_through_20(self):
2032         """convenience function to wait for a specified number of minutes"""
2033         pass
2034     @standby_generic
2035     def standby_1(): pass
2036     @standby_generic
2037     def standby_2(): pass
2038     @standby_generic
2039     def standby_3(): pass
2040     @standby_generic
2041     def standby_4(): pass
2042     @standby_generic
2043     def standby_5(): pass
2044     @standby_generic
2045     def standby_6(): pass
2046     @standby_generic
2047     def standby_7(): pass
2048     @standby_generic
2049     def standby_8(): pass
2050     @standby_generic
2051     def standby_9(): pass
2052     @standby_generic
2053     def standby_10(): pass
2054     @standby_generic
2055     def standby_11(): pass
2056     @standby_generic
2057     def standby_12(): pass
2058     @standby_generic
2059     def standby_13(): pass
2060     @standby_generic
2061     def standby_14(): pass
2062     @standby_generic
2063     def standby_15(): pass
2064     @standby_generic
2065     def standby_16(): pass
2066     @standby_generic
2067     def standby_17(): pass
2068     @standby_generic
2069     def standby_18(): pass
2070     @standby_generic
2071     def standby_19(): pass
2072     @standby_generic
2073     def standby_20(): pass
2074
2075     # convenience for debugging the test logic
2076     def yes(self): return True
2077     def no(self): return False
2078     def fail(self): return False