install django using pip
[tests.git] / system / TestPlc.py
1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
3 #
4 import sys
5 import time
6 import os, os.path
7 import traceback
8 import socket
9 from datetime import datetime, timedelta
10
11 import utils
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24
25 from TestBonding import TestBonding
26
27 has_sfa_cache_filename="sfa-cache"
28
29 # step methods must take (self) and return a boolean (options is a member of the class)
30
31 def standby(minutes, dry_run):
32     utils.header('Entering StandBy for {:d} mn'.format(minutes))
33     if dry_run:
34         print('dry_run')
35     else:
36         time.sleep(60*minutes)
37     return True
38
39 def standby_generic(func):
40     def actual(self):
41         minutes = int(func.__name__.split("_")[1])
42         return standby(minutes, self.options.dry_run)
43     return actual
44
45 def node_mapper(method):
46     def map_on_nodes(self, *args, **kwds):
47         overall = True
48         node_method = TestNode.__dict__[method.__name__]
49         for test_node in self.all_nodes():
50             if not node_method(test_node, *args, **kwds):
51                 overall=False
52         return overall
53     # maintain __name__ for ignore_result
54     map_on_nodes.__name__ = method.__name__
55     # restore the doc text
56     map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
57     return map_on_nodes
58
59 def slice_mapper(method):
60     def map_on_slices(self):
61         overall = True
62         slice_method = TestSlice.__dict__[method.__name__]
63         for slice_spec in self.plc_spec['slices']:
64             site_spec = self.locate_site (slice_spec['sitename'])
65             test_site = TestSite(self,site_spec)
66             test_slice = TestSlice(self,test_site,slice_spec)
67             if not slice_method(test_slice, self.options):
68                 overall=False
69         return overall
70     # maintain __name__ for ignore_result
71     map_on_slices.__name__ = method.__name__
72     # restore the doc text
73     map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
74     return map_on_slices
75
76 def bonding_redirector(method):
77     bonding_name = method.__name__.replace('bonding_', '')
78     def redirect(self):
79         bonding_method = TestBonding.__dict__[bonding_name]
80         return bonding_method(self.test_bonding)
81     # maintain __name__ for ignore_result
82     redirect.__name__ = method.__name__
83     # restore the doc text
84     redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
85     return redirect
86
87 # run a step but return True so that we can go on
88 def ignore_result(method):
89     def ignoring(self):
90         # ssh_slice_ignore->ssh_slice
91         ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92         ref_method = TestPlc.__dict__[ref_name]
93         result = ref_method(self)
94         print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95         return Ignored(result)
96     name = method.__name__.replace('_ignore', '').replace('force_', '')
97     ignoring.__name__ = name
98     ignoring.__doc__ = "ignored version of " + name
99     return ignoring
100
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106     # could not get this to work with named arguments
107     def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108         self.timeout = timedelta(minutes = timeout_minutes)
109         self.silent = timedelta(minutes = silent_minutes)
110         self.period = timedelta(seconds = period_seconds)
111     def __call__(self, method):
112         decorator_self=self
113         # compute augmented method name
114         method_name = method.__name__ + "__tasks"
115         # locate in TestSlice
116         slice_method = TestSlice.__dict__[ method_name ]
117         def wrappee(self):
118             tasks=[]
119             for slice_spec in self.plc_spec['slices']:
120                 site_spec = self.locate_site (slice_spec['sitename'])
121                 test_site = TestSite(self, site_spec)
122                 test_slice = TestSlice(self, test_site, slice_spec)
123                 tasks += slice_method (test_slice, self.options)
124             return Completer (tasks, message=method.__name__).\
125                 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126         # restore the doc text from the TestSlice method even if a bit odd
127         wrappee.__name__ = method.__name__
128         wrappee.__doc__ = slice_method.__doc__
129         return wrappee
130
131 def auth_sfa_mapper(method):
132     def actual(self):
133         overall = True
134         auth_method = TestAuthSfa.__dict__[method.__name__]
135         for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136             test_auth = TestAuthSfa(self, auth_spec)
137             if not auth_method(test_auth, self.options):
138                 overall=False
139         return overall
140     # restore the doc text
141     actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
142     return actual
143
144 class Ignored:
145     def __init__(self, result):
146         self.result = result
147
148 SEP = '<sep>'
149 SEPSFA = '<sep_sfa>'
150
151 class TestPlc:
152
153     default_steps = [
154         'show', SEP,
155         'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156         'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157         'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158         'plcapi_urls','speed_up_slices', SEP,
159         'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162         'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164         'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165         'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
166         'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
167         'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
168         'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
169         'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
170         'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
171         'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
172         'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
173         'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
174         'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
175         'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
176         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
177         # but as the stress test might take a while, we sometimes missed the debug mode..
178         'probe_kvm_iptables',
179         'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
180         'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
181         'ssh_slice_sfa@1', SEPSFA,
182         'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
183         'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
184         'cross_check_tcp@1', 'check_system_slice', SEP,
185         # for inspecting the slice while it runs the first time
186         #'fail',
187         # check slices are turned off properly
188         'debug_nodemanager',
189         'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
190         # check they are properly re-created with the same name
191         'fill_slices', 'ssh_slice_again', SEP,
192         'gather_logs_force', SEP,
193         ]
194     other_steps = [
195         'export', 'show_boxes', 'super_speed_up_slices', SEP,
196         'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
197         'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
198         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
199         'delete_leases', 'list_leases', SEP,
200         'populate', SEP,
201         'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
202         'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
203         'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
204         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
205         'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
206         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
207         'sfa_get_expires', SEPSFA,
208         'plc_db_dump' , 'plc_db_restore', SEP,
209         'check_netflow','check_drl', SEP,
210         'slice_fs_present', 'check_initscripts', SEP,
211         'standby_1_through_20','yes','no',SEP,
212         'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
213         ]
214     default_bonding_steps = [
215         'bonding_init_partial',
216         'bonding_add_yum',
217         'bonding_install_rpms', SEP,
218         ]
219
220     @staticmethod
221     def printable_steps(list):
222         single_line = " ".join(list) + " "
223         return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
224     @staticmethod
225     def valid_step(step):
226         return step != SEP and step != SEPSFA
227
228     # turn off the sfa-related steps when build has skipped SFA
229     # this was originally for centos5 but is still valid
230     # for up to f12 as recent SFAs with sqlalchemy won't build before f14
231     @staticmethod
232     def _has_sfa_cached(rpms_url):
233         if os.path.isfile(has_sfa_cache_filename):
234             with open(has_sfa_cache_filename) as cache:
235                 cached = cache.read() == "yes"
236             utils.header("build provides SFA (cached):{}".format(cached))
237             return cached
238         # warning, we're now building 'sface' so let's be a bit more picky
239         # full builds are expected to return with 0 here
240         utils.header("Checking if build provides SFA package...")
241         retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
242         encoded = 'yes' if retcod else 'no'
243         with open(has_sfa_cache_filename,'w') as cache:
244             cache.write(encoded)
245         return retcod
246
247     @staticmethod
248     def check_whether_build_has_sfa(rpms_url):
249         has_sfa = TestPlc._has_sfa_cached(rpms_url)
250         if has_sfa:
251             utils.header("build does provide SFA")
252         else:
253             # move all steps containing 'sfa' from default_steps to other_steps
254             utils.header("SFA package not found - removing steps with sfa or sfi")
255             sfa_steps = [ step for step in TestPlc.default_steps
256                           if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
257             TestPlc.other_steps += sfa_steps
258             for step in sfa_steps:
259                 TestPlc.default_steps.remove(step)
260
261     def __init__(self, plc_spec, options):
262         self.plc_spec = plc_spec
263         self.options = options
264         self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
265         self.vserverip = plc_spec['vserverip']
266         self.vservername = plc_spec['vservername']
267         self.vplchostname = self.vservername.split('-')[-1]
268         self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
269         self.apiserver = TestApiserver(self.url, options.dry_run)
270         (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
271         (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
272
273     def has_addresses_api(self):
274         return self.apiserver.has_method('AddIpAddress')
275
276     def name(self):
277         name = self.plc_spec['name']
278         return "{}.{}".format(name,self.vservername)
279
280     def hostname(self):
281         return self.plc_spec['host_box']
282
283     def is_local(self):
284         return self.test_ssh.is_local()
285
286     # define the API methods on this object through xmlrpc
287     # would help, but not strictly necessary
288     def connect(self):
289         pass
290
291     def actual_command_in_guest(self,command, backslash=False):
292         raw1 = self.host_to_guest(command)
293         raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
294         return raw2
295
296     def start_guest(self):
297       return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
298                                                        dry_run=self.options.dry_run))
299
300     def stop_guest(self):
301       return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
302                                                        dry_run=self.options.dry_run))
303
304     def run_in_guest(self, command, backslash=False):
305         raw = self.actual_command_in_guest(command, backslash)
306         return utils.system(raw)
307
308     def run_in_host(self,command):
309         return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
310
311     # backslashing turned out so awful at some point that I've turned off auto-backslashing
312     # see e.g. plc_start esp. the version for f14
313     #command gets run in the plc's vm
314     def host_to_guest(self, command):
315         ssh_leg = TestSsh(self.vplchostname)
316         return ssh_leg.actual_command(command, keep_stdin=True)
317
318     # this /vservers thing is legacy...
319     def vm_root_in_host(self):
320         return "/vservers/{}/".format(self.vservername)
321
322     def vm_timestamp_path(self):
323         return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
324
325     #start/stop the vserver
326     def start_guest_in_host(self):
327         return "virsh -c lxc:/// start {}".format(self.vservername)
328
329     def stop_guest_in_host(self):
330         return "virsh -c lxc:/// destroy {}".format(self.vservername)
331
332     # xxx quick n dirty
333     def run_in_guest_piped(self,local,remote):
334         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
335                                                                      keep_stdin = True))
336
337     def yum_check_installed(self, rpms):
338         if isinstance(rpms, list):
339             rpms=" ".join(rpms)
340         return self.run_in_guest("rpm -q {}".format(rpms)) == 0
341
342     # does a yum install in the vs, ignore yum retcod, check with rpm
343     def yum_install(self, rpms):
344         if isinstance(rpms, list):
345             rpms=" ".join(rpms)
346         yum_mode = self.run_in_guest("yum -y install {}".format(rpms))
347         if yum_mode != 0:
348             self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
349         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
350         self.run_in_guest("yum-complete-transaction -y")
351         return self.yum_check_installed(rpms)
352
353     def pip_install(self, package):
354         return self.run_in_guest("pip install {}".format(package))
355
356     def auth_root(self):
357         return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
358                 'AuthMethod' : 'password',
359                 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
360                 'Role'       : self.plc_spec['role'],
361                 }
362
363     def locate_site(self,sitename):
364         for site in self.plc_spec['sites']:
365             if site['site_fields']['name'] == sitename:
366                 return site
367             if site['site_fields']['login_base'] == sitename:
368                 return site
369         raise Exception("Cannot locate site {}".format(sitename))
370
371     def locate_node(self, nodename):
372         for site in self.plc_spec['sites']:
373             for node in site['nodes']:
374                 if node['name'] == nodename:
375                     return site, node
376         raise Exception("Cannot locate node {}".format(nodename))
377
378     def locate_hostname(self, hostname):
379         for site in self.plc_spec['sites']:
380             for node in site['nodes']:
381                 if node['node_fields']['hostname'] == hostname:
382                     return(site, node)
383         raise Exception("Cannot locate hostname {}".format(hostname))
384
385     def locate_key(self, key_name):
386         for key in self.plc_spec['keys']:
387             if key['key_name'] == key_name:
388                 return key
389         raise Exception("Cannot locate key {}".format(key_name))
390
391     def locate_private_key_from_key_names(self, key_names):
392         # locate the first avail. key
393         found = False
394         for key_name in key_names:
395             key_spec = self.locate_key(key_name)
396             test_key = TestKey(self,key_spec)
397             publickey = test_key.publicpath()
398             privatekey = test_key.privatepath()
399             if os.path.isfile(publickey) and os.path.isfile(privatekey):
400                 found = True
401         if found:
402             return privatekey
403         else:
404             return None
405
406     def locate_slice(self, slicename):
407         for slice in self.plc_spec['slices']:
408             if slice['slice_fields']['name'] == slicename:
409                 return slice
410         raise Exception("Cannot locate slice {}".format(slicename))
411
412     def all_sliver_objs(self):
413         result = []
414         for slice_spec in self.plc_spec['slices']:
415             slicename = slice_spec['slice_fields']['name']
416             for nodename in slice_spec['nodenames']:
417                 result.append(self.locate_sliver_obj(nodename, slicename))
418         return result
419
420     def locate_sliver_obj(self, nodename, slicename):
421         site,node = self.locate_node(nodename)
422         slice = self.locate_slice(slicename)
423         # build objects
424         test_site = TestSite(self, site)
425         test_node = TestNode(self, test_site, node)
426         # xxx the slice site is assumed to be the node site - mhh - probably harmless
427         test_slice = TestSlice(self, test_site, slice)
428         return TestSliver(self, test_node, test_slice)
429
430     def locate_first_node(self):
431         nodename = self.plc_spec['slices'][0]['nodenames'][0]
432         site,node = self.locate_node(nodename)
433         test_site = TestSite(self, site)
434         test_node = TestNode(self, test_site, node)
435         return test_node
436
437     def locate_first_sliver(self):
438         slice_spec = self.plc_spec['slices'][0]
439         slicename = slice_spec['slice_fields']['name']
440         nodename = slice_spec['nodenames'][0]
441         return self.locate_sliver_obj(nodename,slicename)
442
443     # all different hostboxes used in this plc
444     def get_BoxNodes(self):
445         # maps on sites and nodes, return [ (host_box,test_node) ]
446         tuples = []
447         for site_spec in self.plc_spec['sites']:
448             test_site = TestSite(self,site_spec)
449             for node_spec in site_spec['nodes']:
450                 test_node = TestNode(self, test_site, node_spec)
451                 if not test_node.is_real():
452                     tuples.append( (test_node.host_box(),test_node) )
453         # transform into a dict { 'host_box' -> [ test_node .. ] }
454         result = {}
455         for (box,node) in tuples:
456             if box not in result:
457                 result[box] = [node]
458             else:
459                 result[box].append(node)
460         return result
461
462     # a step for checking this stuff
463     def show_boxes(self):
464         'print summary of nodes location'
465         for box,nodes in self.get_BoxNodes().items():
466             print(box,":"," + ".join( [ node.name() for node in nodes ] ))
467         return True
468
469     # make this a valid step
470     def qemu_kill_all(self):
471         'kill all qemu instances on the qemu boxes involved by this setup'
472         # this is the brute force version, kill all qemus on that host box
473         for (box,nodes) in self.get_BoxNodes().items():
474             # pass the first nodename, as we don't push template-qemu on testboxes
475             nodedir = nodes[0].nodedir()
476             TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
477         return True
478
479     # make this a valid step
480     def qemu_list_all(self):
481         'list all qemu instances on the qemu boxes involved by this setup'
482         for box,nodes in self.get_BoxNodes().items():
483             # this is the brute force version, kill all qemus on that host box
484             TestBoxQemu(box, self.options.buildname).qemu_list_all()
485         return True
486
487     # kill only the qemus related to this test
488     def qemu_list_mine(self):
489         'list qemu instances for our nodes'
490         for (box,nodes) in self.get_BoxNodes().items():
491             # the fine-grain version
492             for node in nodes:
493                 node.list_qemu()
494         return True
495
496     # kill only the qemus related to this test
497     def qemu_clean_mine(self):
498         'cleanup (rm -rf) qemu instances for our nodes'
499         for box,nodes in self.get_BoxNodes().items():
500             # the fine-grain version
501             for node in nodes:
502                 node.qemu_clean()
503         return True
504
505     # kill only the right qemus
506     def qemu_kill_mine(self):
507         'kill the qemu instances for our nodes'
508         for box,nodes in self.get_BoxNodes().items():
509             # the fine-grain version
510             for node in nodes:
511                 node.kill_qemu()
512         return True
513
514     #################### display config
515     def show(self):
516         "show test configuration after localization"
517         self.show_pass(1)
518         self.show_pass(2)
519         return True
520
521     # uggly hack to make sure 'run export' only reports about the 1st plc
522     # to avoid confusion - also we use 'inri_slice1' in various aliases..
523     exported_id = 1
524     def export(self):
525         "print cut'n paste-able stuff to export env variables to your shell"
526         # guess local domain from hostname
527         if TestPlc.exported_id > 1:
528             print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
529             return True
530         TestPlc.exported_id += 1
531         domain = socket.gethostname().split('.',1)[1]
532         fqdn   = "{}.{}".format(self.plc_spec['host_box'], domain)
533         print("export BUILD={}".format(self.options.buildname))
534         print("export PLCHOSTLXC={}".format(fqdn))
535         print("export GUESTNAME={}".format(self.vservername))
536         print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
537         # find hostname of first node
538         hostname, qemubox = self.all_node_infos()[0]
539         print("export KVMHOST={}.{}".format(qemubox, domain))
540         print("export NODE={}".format(hostname))
541         return True
542
543     # entry point
544     always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
545     def show_pass(self, passno):
546         for (key,val) in self.plc_spec.items():
547             if not self.options.verbose and key not in TestPlc.always_display_keys:
548                 continue
549             if passno == 2:
550                 if key == 'sites':
551                     for site in val:
552                         self.display_site_spec(site)
553                         for node in site['nodes']:
554                             self.display_node_spec(node)
555                 elif key == 'initscripts':
556                     for initscript in val:
557                         self.display_initscript_spec(initscript)
558                 elif key == 'slices':
559                     for slice in val:
560                         self.display_slice_spec(slice)
561                 elif key == 'keys':
562                     for key in val:
563                         self.display_key_spec(key)
564             elif passno == 1:
565                 if key not in ['sites', 'initscripts', 'slices', 'keys']:
566                     print('+   ', key, ':', val)
567
568     def display_site_spec(self, site):
569         print('+ ======== site', site['site_fields']['name'])
570         for k,v in site.items():
571             if not self.options.verbose and k not in TestPlc.always_display_keys:
572                 continue
573             if k == 'nodes':
574                 if v:
575                     print('+       ','nodes : ', end=' ')
576                     for node in v:
577                         print(node['node_fields']['hostname'],'', end=' ')
578                     print('')
579             elif k == 'users':
580                 if v:
581                     print('+       users : ', end=' ')
582                     for user in v:
583                         print(user['name'],'', end=' ')
584                     print('')
585             elif k == 'site_fields':
586                 print('+       login_base', ':', v['login_base'])
587             elif k == 'address_fields':
588                 pass
589             else:
590                 print('+       ', end=' ')
591                 utils.pprint(k, v)
592
593     def display_initscript_spec(self, initscript):
594         print('+ ======== initscript', initscript['initscript_fields']['name'])
595
596     def display_key_spec(self, key):
597         print('+ ======== key', key['key_name'])
598
599     def display_slice_spec(self, slice):
600         print('+ ======== slice', slice['slice_fields']['name'])
601         for k,v in slice.items():
602             if k == 'nodenames':
603                 if v:
604                     print('+       nodes : ', end=' ')
605                     for nodename in v:
606                         print(nodename,'', end=' ')
607                     print('')
608             elif k == 'usernames':
609                 if v:
610                     print('+       users : ', end=' ')
611                     for username in v:
612                         print(username,'', end=' ')
613                     print('')
614             elif k == 'slice_fields':
615                 print('+       fields',':', end=' ')
616                 print('max_nodes=',v['max_nodes'], end=' ')
617                 print('')
618             else:
619                 print('+       ',k,v)
620
621     def display_node_spec(self, node):
622         print("+           node={} host_box={}".format(node['name'], node['host_box']), end=' ')
623         print("hostname=", node['node_fields']['hostname'], end=' ')
624         print("ip=", node['interface_fields']['ip'])
625         if self.options.verbose:
626             utils.pprint("node details", node, depth=3)
627
628     # another entry point for just showing the boxes involved
629     def display_mapping(self):
630         TestPlc.display_mapping_plc(self.plc_spec)
631         return True
632
633     @staticmethod
634     def display_mapping_plc(plc_spec):
635         print('+ MyPLC',plc_spec['name'])
636         # WARNING this would not be right for lxc-based PLC's - should be harmless though
637         print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
638         print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
639         for site_spec in plc_spec['sites']:
640             for node_spec in site_spec['nodes']:
641                 TestPlc.display_mapping_node(node_spec)
642
643     @staticmethod
644     def display_mapping_node(node_spec):
645         print('+   NODE {}'.format(node_spec['name']))
646         print('+\tqemu box {}'.format(node_spec['host_box']))
647         print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
648
649     # write a timestamp in /vservers/<>.timestamp
650     # cannot be inside the vserver, that causes vserver .. build to cough
651     def plcvm_timestamp(self):
652         "Create a timestamp to remember creation date for this plc"
653         now = int(time.time())
654         # TODO-lxc check this one
655         # a first approx. is to store the timestamp close to the VM root like vs does
656         stamp_path = self.vm_timestamp_path()
657         stamp_dir = os.path.dirname(stamp_path)
658         utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
659         return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
660
661     # this is called inconditionnally at the beginning of the test sequence
662     # just in case this is a rerun, so if the vm is not running it's fine
663     def plcvm_delete(self):
664         "vserver delete the test myplc"
665         stamp_path = self.vm_timestamp_path()
666         self.run_in_host("rm -f {}".format(stamp_path))
667         self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
668         self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
669         self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
670         return True
671
672     ### install
673     # historically the build was being fetched by the tests
674     # now the build pushes itself as a subdir of the tests workdir
675     # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
676     def plcvm_create(self):
677         "vserver creation (no install done)"
678         # push the local build/ dir to the testplc box
679         if self.is_local():
680             # a full path for the local calls
681             build_dir = os.path.dirname(sys.argv[0])
682             # sometimes this is empty - set to "." in such a case
683             if not build_dir:
684                 build_dir="."
685             build_dir += "/build"
686         else:
687             # use a standard name - will be relative to remote buildname
688             build_dir = "build"
689             # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
690             self.test_ssh.rmdir(build_dir)
691             self.test_ssh.copy(build_dir, recursive=True)
692         # the repo url is taken from arch-rpms-url
693         # with the last step (i386) removed
694         repo_url = self.options.arch_rpms_url
695         for level in [ 'arch' ]:
696             repo_url = os.path.dirname(repo_url)
697
698         # invoke initvm (drop support for vs)
699         script = "lbuild-initvm.sh"
700         script_options = ""
701         # pass the vbuild-nightly options to [lv]test-initvm
702         script_options += " -p {}".format(self.options.personality)
703         script_options += " -d {}".format(self.options.pldistro)
704         script_options += " -f {}".format(self.options.fcdistro)
705         script_options += " -r {}".format(repo_url)
706         vserver_name = self.vservername
707         try:
708             vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
709             script_options += " -n {}".format(vserver_hostname)
710         except:
711             print("Cannot reverse lookup {}".format(self.vserverip))
712             print("This is considered fatal, as this might pollute the test results")
713             return False
714         create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
715         return self.run_in_host(create_vserver) == 0
716
717     ### install django through pip
718     def django_install(self):
719         # plcapi requires Django, that is no longer provided py fedora as an rpm
720         # so we use pip instead
721         """
722         pip install Django
723         """
724         return self.pip_install('Django')
725
726     ### install_rpm
727     def plc_install(self):
728         """
729         yum install myplc, noderepo
730         """
731
732         # compute nodefamily
733         if self.options.personality == "linux32":
734             arch = "i386"
735         elif self.options.personality == "linux64":
736             arch = "x86_64"
737         else:
738             raise Exception("Unsupported personality {}".format(self.options.personality))
739         nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
740
741         pkgs_list=[]
742         pkgs_list.append("slicerepo-{}".format(nodefamily))
743         pkgs_list.append("myplc")
744         pkgs_list.append("noderepo-{}".format(nodefamily))
745         pkgs_string=" ".join(pkgs_list)
746         return self.yum_install(pkgs_list)
747
748     def install_syslinux6(self):
749         """
750         install syslinux6 from the fedora21 release
751         """
752         key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
753
754         rpms = [
755             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
756             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
757             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
758         ]
759         # this can be done several times
760         self.run_in_guest("rpm --import {key}".format(**locals()))
761         return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
762
763     def bonding_builds(self):
764         """
765         list /etc/yum.repos.d on the myplc side
766         """
767         self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
768         return True
769
770     def bonding_nodes(self):
771         """
772         List nodes known to the myplc together with their nodefamiliy
773         """
774         print("---------------------------------------- nodes")
775         for node in self.apiserver.GetNodes(self.auth_root()):
776             print("{} -> {}".format(node['hostname'],
777                                     self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
778         print("---------------------------------------- nodes")
779
780
781     ###
782     def mod_python(self):
783         """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
784         return self.yum_install( ['mod_python'] )
785
786     ###
787     def plc_configure(self):
788         "run plc-config-tty"
789         tmpname = '{}.plc-config-tty'.format(self.name())
790         with open(tmpname,'w') as fileconf:
791             for var, value in self.plc_spec['settings'].items():
792                 fileconf.write('e {}\n{}\n'.format(var, value))
793             fileconf.write('w\n')
794             fileconf.write('q\n')
795         utils.system('cat {}'.format(tmpname))
796         self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
797         utils.system('rm {}'.format(tmpname))
798         return True
799
800     # care only about f>=25
801     def start_stop_service(self, service, start_or_stop):
802         "utility to start/stop an old-fashioned service (plc)"
803         return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
804
805     def start_stop_systemd(self, service, start_or_stop):
806         "utility to start/stop a systemd-defined service (sfa)"
807         return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
808
809     def plc_start(self):
810         "service plc start"
811         return self.start_stop_service('plc', 'start')
812
813     def plc_stop(self):
814         "service plc stop"
815         return self.start_stop_service('plc', 'stop')
816
817     def plcvm_start(self):
818         "start the PLC vserver"
819         self.start_guest()
820         return True
821
822     def plcvm_stop(self):
823         "stop the PLC vserver"
824         self.stop_guest()
825         return True
826
827     # stores the keys from the config for further use
828     def keys_store(self):
829         "stores test users ssh keys in keys/"
830         for key_spec in self.plc_spec['keys']:
831                 TestKey(self,key_spec).store_key()
832         return True
833
834     def keys_clean(self):
835         "removes keys cached in keys/"
836         utils.system("rm -rf ./keys")
837         return True
838
839     # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
840     # for later direct access to the nodes
841     def keys_fetch(self):
842         "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
843         dir="./keys"
844         if not os.path.isdir(dir):
845             os.mkdir(dir)
846         vservername = self.vservername
847         vm_root = self.vm_root_in_host()
848         overall = True
849         prefix = 'debug_ssh_key'
850         for ext in ['pub', 'rsa'] :
851             src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
852             dst = "keys/{vservername}-debug.{ext}".format(**locals())
853             if self.test_ssh.fetch(src, dst) != 0:
854                 overall=False
855         return overall
856
857     def sites(self):
858         "create sites with PLCAPI"
859         return self.do_sites()
860
861     def delete_sites(self):
862         "delete sites with PLCAPI"
863         return self.do_sites(action="delete")
864
865     def do_sites(self, action="add"):
866         for site_spec in self.plc_spec['sites']:
867             test_site = TestSite(self,site_spec)
868             if (action != "add"):
869                 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
870                 test_site.delete_site()
871                 # deleted with the site
872                 #test_site.delete_users()
873                 continue
874             else:
875                 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
876                 test_site.create_site()
877                 test_site.create_users()
878         return True
879
880     def delete_all_sites(self):
881         "Delete all sites in PLC, and related objects"
882         print('auth_root', self.auth_root())
883         sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
884         for site in sites:
885             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
886             if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
887                 continue
888             site_id = site['site_id']
889             print('Deleting site_id', site_id)
890             self.apiserver.DeleteSite(self.auth_root(), site_id)
891         return True
892
893     def nodes(self):
894         "create nodes with PLCAPI"
895         return self.do_nodes()
896     def delete_nodes(self):
897         "delete nodes with PLCAPI"
898         return self.do_nodes(action="delete")
899
900     def do_nodes(self, action="add"):
901         for site_spec in self.plc_spec['sites']:
902             test_site = TestSite(self, site_spec)
903             if action != "add":
904                 utils.header("Deleting nodes in site {}".format(test_site.name()))
905                 for node_spec in site_spec['nodes']:
906                     test_node = TestNode(self, test_site, node_spec)
907                     utils.header("Deleting {}".format(test_node.name()))
908                     test_node.delete_node()
909             else:
910                 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
911                 for node_spec in site_spec['nodes']:
912                     utils.pprint('Creating node {}'.format(node_spec), node_spec)
913                     test_node = TestNode(self, test_site, node_spec)
914                     test_node.create_node()
915         return True
916
917     def nodegroups(self):
918         "create nodegroups with PLCAPI"
919         return self.do_nodegroups("add")
920     def delete_nodegroups(self):
921         "delete nodegroups with PLCAPI"
922         return self.do_nodegroups("delete")
923
924     YEAR = 365*24*3600
925     @staticmethod
926     def translate_timestamp(start, grain, timestamp):
927         if timestamp < TestPlc.YEAR:
928             return start + timestamp*grain
929         else:
930             return timestamp
931
932     @staticmethod
933     def timestamp_printable(timestamp):
934         return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
935
936     def leases(self):
937         "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
938         now = int(time.time())
939         grain = self.apiserver.GetLeaseGranularity(self.auth_root())
940         print('API answered grain=', grain)
941         start = (now//grain)*grain
942         start += grain
943         # find out all nodes that are reservable
944         nodes = self.all_reservable_nodenames()
945         if not nodes:
946             utils.header("No reservable node found - proceeding without leases")
947             return True
948         ok = True
949         # attach them to the leases as specified in plc_specs
950         # this is where the 'leases' field gets interpreted as relative of absolute
951         for lease_spec in self.plc_spec['leases']:
952             # skip the ones that come with a null slice id
953             if not lease_spec['slice']:
954                 continue
955             lease_spec['t_from']  = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
956             lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
957             lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
958                                                       lease_spec['t_from'], lease_spec['t_until'])
959             if lease_addition['errors']:
960                 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
961                 ok = False
962             else:
963                 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
964                              .format(nodes, lease_spec['slice'],
965                                      lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
966                                      lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
967
968         return ok
969
970     def delete_leases(self):
971         "remove all leases in the myplc side"
972         lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
973         utils.header("Cleaning leases {}".format(lease_ids))
974         self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
975         return True
976
977     def list_leases(self):
978         "list all leases known to the myplc"
979         leases = self.apiserver.GetLeases(self.auth_root())
980         now = int(time.time())
981         for l in leases:
982             current = l['t_until'] >= now
983             if self.options.verbose or current:
984                 utils.header("{} {} from {} until {}"\
985                              .format(l['hostname'], l['name'],
986                                      TestPlc.timestamp_printable(l['t_from']),
987                                      TestPlc.timestamp_printable(l['t_until'])))
988         return True
989
990     # create nodegroups if needed, and populate
991     def do_nodegroups(self, action="add"):
992         # 1st pass to scan contents
993         groups_dict = {}
994         for site_spec in self.plc_spec['sites']:
995             test_site = TestSite(self,site_spec)
996             for node_spec in site_spec['nodes']:
997                 test_node = TestNode(self, test_site, node_spec)
998                 if 'nodegroups' in node_spec:
999                     nodegroupnames = node_spec['nodegroups']
1000                     if isinstance(nodegroupnames, str):
1001                         nodegroupnames = [ nodegroupnames ]
1002                     for nodegroupname in nodegroupnames:
1003                         if nodegroupname not in groups_dict:
1004                             groups_dict[nodegroupname] = []
1005                         groups_dict[nodegroupname].append(test_node.name())
1006         auth = self.auth_root()
1007         overall = True
1008         for (nodegroupname,group_nodes) in groups_dict.items():
1009             if action == "add":
1010                 print('nodegroups:', 'dealing with nodegroup',\
1011                     nodegroupname, 'on nodes', group_nodes)
1012                 # first, check if the nodetagtype is here
1013                 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1014                 if tag_types:
1015                     tag_type_id = tag_types[0]['tag_type_id']
1016                 else:
1017                     tag_type_id = self.apiserver.AddTagType(auth,
1018                                                             {'tagname' : nodegroupname,
1019                                                              'description' : 'for nodegroup {}'.format(nodegroupname),
1020                                                              'category' : 'test'})
1021                 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1022                 # create nodegroup
1023                 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1024                 if not nodegroups:
1025                     self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1026                     print('created nodegroup', nodegroupname, \
1027                         'from tagname', nodegroupname, 'and value', 'yes')
1028                 # set node tag on all nodes, value='yes'
1029                 for nodename in group_nodes:
1030                     try:
1031                         self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1032                     except:
1033                         traceback.print_exc()
1034                         print('node', nodename, 'seems to already have tag', nodegroupname)
1035                     # check anyway
1036                     try:
1037                         expect_yes = self.apiserver.GetNodeTags(auth,
1038                                                                 {'hostname' : nodename,
1039                                                                  'tagname'  : nodegroupname},
1040                                                                 ['value'])[0]['value']
1041                         if expect_yes != "yes":
1042                             print('Mismatch node tag on node',nodename,'got',expect_yes)
1043                             overall = False
1044                     except:
1045                         if not self.options.dry_run:
1046                             print('Cannot find tag', nodegroupname, 'on node', nodename)
1047                             overall = False
1048             else:
1049                 try:
1050                     print('cleaning nodegroup', nodegroupname)
1051                     self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1052                 except:
1053                     traceback.print_exc()
1054                     overall = False
1055         return overall
1056
1057     # a list of TestNode objs
1058     def all_nodes(self):
1059         nodes=[]
1060         for site_spec in self.plc_spec['sites']:
1061             test_site = TestSite(self,site_spec)
1062             for node_spec in site_spec['nodes']:
1063                 nodes.append(TestNode(self, test_site, node_spec))
1064         return nodes
1065
1066     # return a list of tuples (nodename,qemuname)
1067     def all_node_infos(self) :
1068         node_infos = []
1069         for site_spec in self.plc_spec['sites']:
1070             node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1071                                 for node_spec in site_spec['nodes'] ]
1072         return node_infos
1073
1074     def all_nodenames(self):
1075         return [ x[0] for x in self.all_node_infos() ]
1076     def all_reservable_nodenames(self):
1077         res = []
1078         for site_spec in self.plc_spec['sites']:
1079             for node_spec in site_spec['nodes']:
1080                 node_fields = node_spec['node_fields']
1081                 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1082                     res.append(node_fields['hostname'])
1083         return res
1084
1085     # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1086     def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1087                                silent_minutes, period_seconds = 15):
1088         if self.options.dry_run:
1089             print('dry_run')
1090             return True
1091
1092         class CompleterTaskBootState(CompleterTask):
1093             def __init__(self, test_plc, hostname):
1094                 self.test_plc = test_plc
1095                 self.hostname = hostname
1096                 self.last_boot_state = 'undef'
1097             def actual_run(self):
1098                 try:
1099                     node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1100                                                             [ self.hostname ],
1101                                                             ['boot_state'])[0]
1102                     self.last_boot_state = node['boot_state']
1103                     return self.last_boot_state == target_boot_state
1104                 except:
1105                     return False
1106             def message(self):
1107                 return "CompleterTaskBootState with node {}".format(self.hostname)
1108             def failure_epilogue(self):
1109                 print("node {} in state {} - expected {}"\
1110                     .format(self.hostname, self.last_boot_state, target_boot_state))
1111
1112         timeout = timedelta(minutes=timeout_minutes)
1113         graceout = timedelta(minutes=silent_minutes)
1114         period   = timedelta(seconds=period_seconds)
1115         # the nodes that haven't checked yet - start with a full list and shrink over time
1116         utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1117         tasks = [ CompleterTaskBootState(self,hostname) \
1118                       for (hostname,_) in self.all_node_infos() ]
1119         message = 'check_boot_state={}'.format(target_boot_state)
1120         return Completer(tasks, message=message).run(timeout, graceout, period)
1121
1122     def nodes_booted(self):
1123         return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1124
1125     def probe_kvm_iptables(self):
1126         (_,kvmbox) = self.all_node_infos()[0]
1127         TestSsh(kvmbox).run("iptables-save")
1128         return True
1129
1130     # probing nodes
1131     def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1132         class CompleterTaskPingNode(CompleterTask):
1133             def __init__(self, hostname):
1134                 self.hostname = hostname
1135             def run(self, silent):
1136                 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1137                 return utils.system(command, silent=silent) == 0
1138             def failure_epilogue(self):
1139                 print("Cannot ping node with name {}".format(self.hostname))
1140         timeout = timedelta(seconds = timeout_seconds)
1141         graceout = timeout
1142         period = timedelta(seconds = period_seconds)
1143         node_infos = self.all_node_infos()
1144         tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1145         return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1146
1147     # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1148     def ping_node(self):
1149         "Ping nodes"
1150         return self.check_nodes_ping()
1151
1152     def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1153         # various delays
1154         timeout  = timedelta(minutes=timeout_minutes)
1155         graceout = timedelta(minutes=silent_minutes)
1156         period   = timedelta(seconds=period_seconds)
1157         vservername = self.vservername
1158         if debug:
1159             message = "debug"
1160             completer_message = 'ssh_node_debug'
1161             local_key = "keys/{vservername}-debug.rsa".format(**locals())
1162         else:
1163             message = "boot"
1164             completer_message = 'ssh_node_boot'
1165             local_key = "keys/key_admin.rsa"
1166         utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1167         node_infos = self.all_node_infos()
1168         tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1169                                         boot_state=message, dry_run=self.options.dry_run) \
1170                       for (nodename, qemuname) in node_infos ]
1171         return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1172
1173     def ssh_node_debug(self):
1174         "Tries to ssh into nodes in debug mode with the debug ssh key"
1175         return self.check_nodes_ssh(debug = True,
1176                                     timeout_minutes = self.ssh_node_debug_timeout,
1177                                     silent_minutes = self.ssh_node_debug_silent)
1178
1179     def ssh_node_boot(self):
1180         "Tries to ssh into nodes in production mode with the root ssh key"
1181         return self.check_nodes_ssh(debug = False,
1182                                     timeout_minutes = self.ssh_node_boot_timeout,
1183                                     silent_minutes = self.ssh_node_boot_silent)
1184
1185     def node_bmlogs(self):
1186         "Checks that there's a non-empty dir. /var/log/bm/raw"
1187         return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1188
1189     @node_mapper
1190     def qemu_local_init(self): pass
1191     @node_mapper
1192     def bootcd(self): pass
1193     @node_mapper
1194     def qemu_local_config(self): pass
1195     @node_mapper
1196     def qemu_export(self): pass
1197     @node_mapper
1198     def qemu_cleanlog(self): pass
1199     @node_mapper
1200     def nodestate_reinstall(self): pass
1201     @node_mapper
1202     def nodestate_upgrade(self): pass
1203     @node_mapper
1204     def nodestate_safeboot(self): pass
1205     @node_mapper
1206     def nodestate_boot(self): pass
1207     @node_mapper
1208     def nodestate_show(self): pass
1209     @node_mapper
1210     def nodedistro_f14(self): pass
1211     @node_mapper
1212     def nodedistro_f18(self): pass
1213     @node_mapper
1214     def nodedistro_f20(self): pass
1215     @node_mapper
1216     def nodedistro_f21(self): pass
1217     @node_mapper
1218     def nodedistro_f22(self): pass
1219     @node_mapper
1220     def nodedistro_show(self): pass
1221
1222     ### check hooks : invoke scripts from hooks/{node,slice}
1223     def check_hooks_node(self):
1224         return self.locate_first_node().check_hooks()
1225     def check_hooks_sliver(self) :
1226         return self.locate_first_sliver().check_hooks()
1227
1228     def check_hooks(self):
1229         "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1230         return self.check_hooks_node() and self.check_hooks_sliver()
1231
1232     ### initscripts
1233     def do_check_initscripts(self):
1234         class CompleterTaskInitscript(CompleterTask):
1235             def __init__(self, test_sliver, stamp):
1236                 self.test_sliver = test_sliver
1237                 self.stamp = stamp
1238             def actual_run(self):
1239                 return self.test_sliver.check_initscript_stamp(self.stamp)
1240             def message(self):
1241                 return "initscript checker for {}".format(self.test_sliver.name())
1242             def failure_epilogue(self):
1243                 print("initscript stamp {} not found in sliver {}"\
1244                     .format(self.stamp, self.test_sliver.name()))
1245
1246         tasks = []
1247         for slice_spec in self.plc_spec['slices']:
1248             if 'initscriptstamp' not in slice_spec:
1249                 continue
1250             stamp = slice_spec['initscriptstamp']
1251             slicename = slice_spec['slice_fields']['name']
1252             for nodename in slice_spec['nodenames']:
1253                 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1254                 site,node = self.locate_node(nodename)
1255                 # xxx - passing the wrong site - probably harmless
1256                 test_site = TestSite(self, site)
1257                 test_slice = TestSlice(self, test_site, slice_spec)
1258                 test_node = TestNode(self, test_site, node)
1259                 test_sliver = TestSliver(self, test_node, test_slice)
1260                 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1261         return Completer(tasks, message='check_initscripts').\
1262             run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1263
1264     def check_initscripts(self):
1265         "check that the initscripts have triggered"
1266         return self.do_check_initscripts()
1267
1268     def initscripts(self):
1269         "create initscripts with PLCAPI"
1270         for initscript in self.plc_spec['initscripts']:
1271             utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1272             self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1273         return True
1274
1275     def delete_initscripts(self):
1276         "delete initscripts with PLCAPI"
1277         for initscript in self.plc_spec['initscripts']:
1278             initscript_name = initscript['initscript_fields']['name']
1279             print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1280             try:
1281                 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1282                 print(initscript_name, 'deleted')
1283             except:
1284                 print('deletion went wrong - probably did not exist')
1285         return True
1286
1287     ### manage slices
1288     def slices(self):
1289         "create slices with PLCAPI"
1290         return self.do_slices(action="add")
1291
1292     def delete_slices(self):
1293         "delete slices with PLCAPI"
1294         return self.do_slices(action="delete")
1295
1296     def fill_slices(self):
1297         "add nodes in slices with PLCAPI"
1298         return self.do_slices(action="fill")
1299
1300     def empty_slices(self):
1301         "remove nodes from slices with PLCAPI"
1302         return self.do_slices(action="empty")
1303
1304     def do_slices(self,  action="add"):
1305         for slice in self.plc_spec['slices']:
1306             site_spec = self.locate_site(slice['sitename'])
1307             test_site = TestSite(self,site_spec)
1308             test_slice=TestSlice(self,test_site,slice)
1309             if action == "delete":
1310                 test_slice.delete_slice()
1311             elif action == "fill":
1312                 test_slice.add_nodes()
1313             elif action == "empty":
1314                 test_slice.delete_nodes()
1315             else:
1316                 test_slice.create_slice()
1317         return True
1318
1319     @slice_mapper__tasks(20, 10, 15)
1320     def ssh_slice(self): pass
1321     @slice_mapper__tasks(20, 19, 15)
1322     def ssh_slice_off(self): pass
1323     @slice_mapper__tasks(1, 1, 15)
1324     def slice_fs_present(self): pass
1325     @slice_mapper__tasks(1, 1, 15)
1326     def slice_fs_deleted(self): pass
1327
1328     # use another name so we can exclude/ignore it from the tests on the nightly command line
1329     def ssh_slice_again(self): return self.ssh_slice()
1330     # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1331     # but for some reason the ignore-wrapping thing would not
1332
1333     @slice_mapper
1334     def ssh_slice_basics(self): pass
1335     @slice_mapper
1336     def check_vsys_defaults(self): pass
1337
1338     @node_mapper
1339     def keys_clear_known_hosts(self): pass
1340
1341     def plcapi_urls(self):
1342         """
1343         attempts to reach the PLCAPI with various forms for the URL
1344         """
1345         return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1346
1347     def speed_up_slices(self):
1348         "tweak nodemanager cycle (wait time) to 30+/-10 s"
1349         return self._speed_up_slices (30, 10)
1350     def super_speed_up_slices(self):
1351         "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1352         return self._speed_up_slices(5, 1)
1353
1354     def _speed_up_slices(self, p, r):
1355         # create the template on the server-side
1356         template = "{}.nodemanager".format(self.name())
1357         with open(template,"w") as template_file:
1358             template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1359         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1360         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1361         self.test_ssh.copy_abs(template, remote)
1362         # Add a conf file
1363         if not self.apiserver.GetConfFiles(self.auth_root(),
1364                                            {'dest' : '/etc/sysconfig/nodemanager'}):
1365             self.apiserver.AddConfFile(self.auth_root(),
1366                                         {'dest' : '/etc/sysconfig/nodemanager',
1367                                          'source' : 'PlanetLabConf/nodemanager',
1368                                          'postinstall_cmd' : 'service nm restart',})
1369         return True
1370
1371     def debug_nodemanager(self):
1372         "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1373         template = "{}.nodemanager".format(self.name())
1374         with open(template,"w") as template_file:
1375             template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1376         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1377         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1378         self.test_ssh.copy_abs(template, remote)
1379         return True
1380
1381     @node_mapper
1382     def qemu_start(self) : pass
1383
1384     @node_mapper
1385     def qemu_timestamp(self) : pass
1386
1387     @node_mapper
1388     def qemu_nodefamily(self): pass
1389
1390     # when a spec refers to a node possibly on another plc
1391     def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1392         for plc in [ self ] + other_plcs:
1393             try:
1394                 return plc.locate_sliver_obj(nodename, slicename)
1395             except:
1396                 pass
1397         raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1398
1399     # implement this one as a cross step so that we can take advantage of different nodes
1400     # in multi-plcs mode
1401     def cross_check_tcp(self, other_plcs):
1402         "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1403         if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1404             utils.header("check_tcp: no/empty config found")
1405             return True
1406         specs = self.plc_spec['tcp_specs']
1407         overall = True
1408
1409         # first wait for the network to be up and ready from the slices
1410         class CompleterTaskNetworkReadyInSliver(CompleterTask):
1411             def __init__(self, test_sliver):
1412                 self.test_sliver = test_sliver
1413             def actual_run(self):
1414                 return self.test_sliver.check_tcp_ready(port = 9999)
1415             def message(self):
1416                 return "network ready checker for {}".format(self.test_sliver.name())
1417             def failure_epilogue(self):
1418                 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1419
1420         sliver_specs = {}
1421         tasks = []
1422         managed_sliver_names = set()
1423         for spec in specs:
1424             # locate the TestSliver instances involved, and cache them in the spec instance
1425             spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1426             spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1427             message = "Will check TCP between s={} and c={}"\
1428                       .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1429             if 'client_connect' in spec:
1430                 message += " (using {})".format(spec['client_connect'])
1431             utils.header(message)
1432             # we need to check network presence in both slivers, but also
1433             # avoid to insert a sliver several times
1434             for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1435                 if sliver.name() not in managed_sliver_names:
1436                     tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1437                     # add this sliver's name in the set
1438                     managed_sliver_names .update( {sliver.name()} )
1439
1440         # wait for the netork to be OK in all server sides
1441         if not Completer(tasks, message='check for network readiness in slivers').\
1442            run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1443             return False
1444
1445         # run server and client
1446         for spec in specs:
1447             port = spec['port']
1448             # server side
1449             # the issue here is that we have the server run in background
1450             # and so we have no clue if it took off properly or not
1451             # looks like in some cases it does not
1452             address = spec['s_sliver'].test_node.name()
1453             if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1454                 overall = False
1455                 break
1456
1457             # idem for the client side
1458             # use nodename from located sliver, unless 'client_connect' is set
1459             if 'client_connect' in spec:
1460                 destination = spec['client_connect']
1461             else:
1462                 destination = spec['s_sliver'].test_node.name()
1463             if not spec['c_sliver'].run_tcp_client(destination, port):
1464                 overall = False
1465         return overall
1466
1467     # painfully enough, we need to allow for some time as netflow might show up last
1468     def check_system_slice(self):
1469         "all nodes: check that a system slice is alive"
1470         # netflow currently not working in the lxc distro
1471         # drl not built at all in the wtx distro
1472         # if we find either of them we're happy
1473         return self.check_netflow() or self.check_drl()
1474
1475     # expose these
1476     def check_netflow(self): return self._check_system_slice('netflow')
1477     def check_drl(self): return self._check_system_slice('drl')
1478
1479     # we have the slices up already here, so it should not take too long
1480     def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1481         class CompleterTaskSystemSlice(CompleterTask):
1482             def __init__(self, test_node, dry_run):
1483                 self.test_node = test_node
1484                 self.dry_run = dry_run
1485             def actual_run(self):
1486                 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1487             def message(self):
1488                 return "System slice {} @ {}".format(slicename, self.test_node.name())
1489             def failure_epilogue(self):
1490                 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1491         timeout = timedelta(minutes=timeout_minutes)
1492         silent  = timedelta(0)
1493         period  = timedelta(seconds=period_seconds)
1494         tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1495                       for test_node in self.all_nodes() ]
1496         return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1497
1498     def plcsh_stress_test(self):
1499         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1500         # install the stress-test in the plc image
1501         location = "/usr/share/plc_api/plcsh_stress_test.py"
1502         remote = "{}/{}".format(self.vm_root_in_host(), location)
1503         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1504         command = location
1505         command += " -- --check"
1506         if self.options.size == 1:
1507             command +=  " --tiny"
1508         return self.run_in_guest(command) == 0
1509
1510     # populate runs the same utility without slightly different options
1511     # in particular runs with --preserve (dont cleanup) and without --check
1512     # also it gets run twice, once with the --foreign option for creating fake foreign entries
1513
1514     def sfa_install_all(self):
1515         "yum install sfa sfa-plc sfa-sfatables sfa-client"
1516         return (self.yum_install("sfa sfa-plc sfa-sfatables sfa-client") and
1517                 self.run_in_guest("systemctl enable sfa-registry")==0 and
1518                 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1519
1520     def sfa_install_core(self):
1521         "yum install sfa"
1522         return self.yum_install("sfa")
1523
1524     def sfa_install_plc(self):
1525         "yum install sfa-plc"
1526         return self.yum_install("sfa-plc")
1527
1528     def sfa_install_sfatables(self):
1529         "yum install sfa-sfatables"
1530         return self.yum_install("sfa-sfatables")
1531
1532     # for some very odd reason, this sometimes fails with the following symptom
1533     # # yum install sfa-client
1534     # Setting up Install Process
1535     # ...
1536     # Downloading Packages:
1537     # Running rpm_check_debug
1538     # Running Transaction Test
1539     # Transaction Test Succeeded
1540     # Running Transaction
1541     # Transaction couldn't start:
1542     # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1543     # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1544     # even though in the same context I have
1545     # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1546     # Filesystem            Size  Used Avail Use% Mounted on
1547     # /dev/hdv1             806G  264G  501G  35% /
1548     # none                   16M   36K   16M   1% /tmp
1549     #
1550     # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1551     def sfa_install_client(self):
1552         "yum install sfa-client"
1553         first_try = self.yum_install("sfa-client")
1554         if first_try:
1555             return True
1556         utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1557         code, cached_rpm_path = \
1558                 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1559         utils.header("rpm_path=<<{}>>".format(rpm_path))
1560         # just for checking
1561         self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1562         return self.yum_check_installed("sfa-client")
1563
1564     def sfa_dbclean(self):
1565         "thoroughly wipes off the SFA database"
1566         return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1567             self.run_in_guest("sfa-nuke.py") == 0 or \
1568             self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1569             self.run_in_guest("sfaadmin registry nuke") == 0
1570
1571     def sfa_fsclean(self):
1572         "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1573         self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1574         return True
1575
1576     def sfa_plcclean(self):
1577         "cleans the PLC entries that were created as a side effect of running the script"
1578         # ignore result
1579         sfa_spec = self.plc_spec['sfa']
1580
1581         for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1582             login_base = auth_sfa_spec['login_base']
1583             try:
1584                 self.apiserver.DeleteSite(self.auth_root(),login_base)
1585             except:
1586                 print("Site {} already absent from PLC db".format(login_base))
1587
1588             for spec_name in ['pi_spec','user_spec']:
1589                 user_spec = auth_sfa_spec[spec_name]
1590                 username = user_spec['email']
1591                 try:
1592                     self.apiserver.DeletePerson(self.auth_root(),username)
1593                 except:
1594                     # this in fact is expected as sites delete their members
1595                     #print "User {} already absent from PLC db".format(username)
1596                     pass
1597
1598         print("REMEMBER TO RUN sfa_import AGAIN")
1599         return True
1600
1601     def sfa_uninstall(self):
1602         "uses rpm to uninstall sfa - ignore result"
1603         self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1604         self.run_in_guest("rm -rf /var/lib/sfa")
1605         self.run_in_guest("rm -rf /etc/sfa")
1606         self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1607         # xxx tmp
1608         self.run_in_guest("rpm -e --noscripts sfa-plc")
1609         return True
1610
1611     ### run unit tests for SFA
1612     # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1613     # Running Transaction
1614     # Transaction couldn't start:
1615     # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1616     # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1617     # no matter how many Gbs are available on the testplc
1618     # could not figure out what's wrong, so...
1619     # if the yum install phase fails, consider the test is successful
1620     # other combinations will eventually run it hopefully
1621     def sfa_utest(self):
1622         "yum install sfa-tests and run SFA unittests"
1623         self.run_in_guest("yum -y install sfa-tests")
1624         # failed to install - forget it
1625         if self.run_in_guest("rpm -q sfa-tests") != 0:
1626             utils.header("WARNING: SFA unit tests failed to install, ignoring")
1627             return True
1628         return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1629
1630     ###
1631     def confdir(self):
1632         dirname = "conf.{}".format(self.plc_spec['name'])
1633         if not os.path.isdir(dirname):
1634             utils.system("mkdir -p {}".format(dirname))
1635         if not os.path.isdir(dirname):
1636             raise Exception("Cannot create config dir for plc {}".format(self.name()))
1637         return dirname
1638
1639     def conffile(self, filename):
1640         return "{}/{}".format(self.confdir(), filename)
1641     def confsubdir(self, dirname, clean, dry_run=False):
1642         subdirname = "{}/{}".format(self.confdir(), dirname)
1643         if clean:
1644             utils.system("rm -rf {}".format(subdirname))
1645         if not os.path.isdir(subdirname):
1646             utils.system("mkdir -p {}".format(subdirname))
1647         if not dry_run and not os.path.isdir(subdirname):
1648             raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1649         return subdirname
1650
1651     def conffile_clean(self, filename):
1652         filename=self.conffile(filename)
1653         return utils.system("rm -rf {}".format(filename))==0
1654
1655     ###
1656     def sfa_configure(self):
1657         "run sfa-config-tty"
1658         tmpname = self.conffile("sfa-config-tty")
1659         with open(tmpname,'w') as fileconf:
1660             for var, value in self.plc_spec['sfa']['settings'].items():
1661                 fileconf.write('e {}\n{}\n'.format(var, value))
1662             fileconf.write('w\n')
1663             fileconf.write('R\n')
1664             fileconf.write('q\n')
1665         utils.system('cat {}'.format(tmpname))
1666         self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1667         return True
1668
1669     def aggregate_xml_line(self):
1670         port = self.plc_spec['sfa']['neighbours-port']
1671         return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1672             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1673
1674     def registry_xml_line(self):
1675         return '<registry addr="{}" hrn="{}" port="12345"/>'\
1676             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1677
1678
1679     # a cross step that takes all other plcs in argument
1680     def cross_sfa_configure(self, other_plcs):
1681         "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1682         # of course with a single plc, other_plcs is an empty list
1683         if not other_plcs:
1684             return True
1685         agg_fname = self.conffile("agg.xml")
1686         with open(agg_fname,"w") as out:
1687             out.write("<aggregates>{}</aggregates>\n"\
1688                       .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1689         utils.header("(Over)wrote {}".format(agg_fname))
1690         reg_fname=self.conffile("reg.xml")
1691         with open(reg_fname,"w") as out:
1692             out.write("<registries>{}</registries>\n"\
1693                       .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1694         utils.header("(Over)wrote {}".format(reg_fname))
1695         return self.test_ssh.copy_abs(agg_fname,
1696                                       '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1697            and self.test_ssh.copy_abs(reg_fname,
1698                                       '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1699
1700     def sfa_import(self):
1701         "use sfaadmin to import from plc"
1702         auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1703         return self.run_in_guest('sfaadmin reg import_registry') == 0
1704
1705     def sfa_start(self):
1706         "service sfa start"
1707         return (self.start_stop_systemd('sfa-registry', 'start') and
1708                 self.start_stop_systemd('sfa-aggregate', 'start'))
1709
1710
1711     def sfi_configure(self):
1712         "Create /root/sfi on the plc side for sfi client configuration"
1713         if self.options.dry_run:
1714             utils.header("DRY RUN - skipping step")
1715             return True
1716         sfa_spec = self.plc_spec['sfa']
1717         # cannot use auth_sfa_mapper to pass dir_name
1718         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1719             test_slice = TestAuthSfa(self, slice_spec)
1720             dir_basename = os.path.basename(test_slice.sfi_path())
1721             dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1722                                        clean=True, dry_run=self.options.dry_run)
1723             test_slice.sfi_configure(dir_name)
1724             # push into the remote /root/sfi area
1725             location = test_slice.sfi_path()
1726             remote = "{}/{}".format(self.vm_root_in_host(), location)
1727             self.test_ssh.mkdir(remote, abs=True)
1728             # need to strip last level or remote otherwise we get an extra dir level
1729             self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1730
1731         return True
1732
1733     def sfi_clean(self):
1734         "clean up /root/sfi on the plc side"
1735         self.run_in_guest("rm -rf /root/sfi")
1736         return True
1737
1738     def sfa_rspec_empty(self):
1739         "expose a static empty rspec (ships with the tests module) in the sfi directory"
1740         filename = "empty-rspec.xml"
1741         overall = True
1742         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1743             test_slice = TestAuthSfa(self, slice_spec)
1744             in_vm = test_slice.sfi_path()
1745             remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1746             if self.test_ssh.copy_abs(filename, remote) !=0:
1747                 overall = False
1748         return overall
1749
1750     @auth_sfa_mapper
1751     def sfa_register_site(self): pass
1752     @auth_sfa_mapper
1753     def sfa_register_pi(self): pass
1754     @auth_sfa_mapper
1755     def sfa_register_user(self): pass
1756     @auth_sfa_mapper
1757     def sfa_update_user(self): pass
1758     @auth_sfa_mapper
1759     def sfa_register_slice(self): pass
1760     @auth_sfa_mapper
1761     def sfa_renew_slice(self): pass
1762     @auth_sfa_mapper
1763     def sfa_get_expires(self): pass
1764     @auth_sfa_mapper
1765     def sfa_discover(self): pass
1766     @auth_sfa_mapper
1767     def sfa_rspec(self): pass
1768     @auth_sfa_mapper
1769     def sfa_allocate(self): pass
1770     @auth_sfa_mapper
1771     def sfa_allocate_empty(self): pass
1772     @auth_sfa_mapper
1773     def sfa_provision(self): pass
1774     @auth_sfa_mapper
1775     def sfa_provision_empty(self): pass
1776     @auth_sfa_mapper
1777     def sfa_describe(self): pass
1778     @auth_sfa_mapper
1779     def sfa_check_slice_plc(self): pass
1780     @auth_sfa_mapper
1781     def sfa_check_slice_plc_empty(self): pass
1782     @auth_sfa_mapper
1783     def sfa_update_slice(self): pass
1784     @auth_sfa_mapper
1785     def sfa_remove_user_from_slice(self): pass
1786     @auth_sfa_mapper
1787     def sfa_insert_user_in_slice(self): pass
1788     @auth_sfa_mapper
1789     def sfi_list(self): pass
1790     @auth_sfa_mapper
1791     def sfi_show_site(self): pass
1792     @auth_sfa_mapper
1793     def sfi_show_slice(self): pass
1794     @auth_sfa_mapper
1795     def sfi_show_slice_researchers(self): pass
1796     @auth_sfa_mapper
1797     def ssh_slice_sfa(self): pass
1798     @auth_sfa_mapper
1799     def sfa_delete_user(self): pass
1800     @auth_sfa_mapper
1801     def sfa_delete_slice(self): pass
1802
1803     def sfa_stop(self):
1804         "service sfa stop"
1805         return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1806                 self.start_stop_systemd('sfa-registry', 'stop'))
1807
1808     def populate(self):
1809         "creates random entries in the PLCAPI"
1810         # install the stress-test in the plc image
1811         location = "/usr/share/plc_api/plcsh_stress_test.py"
1812         remote = "{}/{}".format(self.vm_root_in_host(), location)
1813         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1814         command = location
1815         command += " -- --preserve --short-names"
1816         local = (self.run_in_guest(command) == 0);
1817         # second run with --foreign
1818         command += ' --foreign'
1819         remote = (self.run_in_guest(command) == 0);
1820         return local and remote
1821
1822
1823     ####################
1824     @bonding_redirector
1825     def bonding_init_partial(self): pass
1826
1827     @bonding_redirector
1828     def bonding_add_yum(self): pass
1829
1830     @bonding_redirector
1831     def bonding_install_rpms(self): pass
1832
1833     ####################
1834
1835     def gather_logs(self):
1836         "gets all possible logs from plc's/qemu node's/slice's for future reference"
1837         # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1838         # (1.b) get the plc's  /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1839         # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1840         # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1841         # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1842         # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1843         # (1.a)
1844         print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1845         self.gather_var_logs()
1846         # (1.b)
1847         print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1848         self.gather_pgsql_logs()
1849         # (1.c)
1850         print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1851         self.gather_root_sfi()
1852         # (2)
1853         print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1854         for site_spec in self.plc_spec['sites']:
1855             test_site = TestSite(self,site_spec)
1856             for node_spec in site_spec['nodes']:
1857                 test_node = TestNode(self, test_site, node_spec)
1858                 test_node.gather_qemu_logs()
1859         # (3)
1860         print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1861         self.gather_nodes_var_logs()
1862         # (4)
1863         print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1864         self.gather_slivers_var_logs()
1865         return True
1866
1867     def gather_slivers_var_logs(self):
1868         for test_sliver in self.all_sliver_objs():
1869             remote = test_sliver.tar_var_logs()
1870             utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1871             command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1872             utils.system(command)
1873         return True
1874
1875     def gather_var_logs(self):
1876         utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1877         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1878         command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1879         utils.system(command)
1880         command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1881         utils.system(command)
1882
1883     def gather_pgsql_logs(self):
1884         utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1885         to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1886         command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1887         utils.system(command)
1888
1889     def gather_root_sfi(self):
1890         utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1891         to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1892         command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1893         utils.system(command)
1894
1895     def gather_nodes_var_logs(self):
1896         for site_spec in self.plc_spec['sites']:
1897             test_site = TestSite(self, site_spec)
1898             for node_spec in site_spec['nodes']:
1899                 test_node = TestNode(self, test_site, node_spec)
1900                 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1901                 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1902                 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1903                 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1904                 utils.system(command)
1905
1906
1907     # returns the filename to use for sql dump/restore, using options.dbname if set
1908     def dbfile(self, database):
1909         # uses options.dbname if it is found
1910         try:
1911             name = self.options.dbname
1912             if not isinstance(name, str):
1913                 raise Exception
1914         except:
1915             t = datetime.now()
1916             d = t.date()
1917             name = str(d)
1918         return "/root/{}-{}.sql".format(database, name)
1919
1920     def plc_db_dump(self):
1921         'dump the planetlab5 DB in /root in the PLC - filename has time'
1922         dump=self.dbfile("planetab5")
1923         self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1924         utils.header('Dumped planetlab5 database in {}'.format(dump))
1925         return True
1926
1927     def plc_db_restore(self):
1928         'restore the planetlab5 DB - looks broken, but run -n might help'
1929         dump = self.dbfile("planetab5")
1930         ##stop httpd service
1931         self.run_in_guest('service httpd stop')
1932         # xxx - need another wrapper
1933         self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1934         self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1935         self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1936         ##starting httpd service
1937         self.run_in_guest('service httpd start')
1938
1939         utils.header('Database restored from ' + dump)
1940
1941     @staticmethod
1942     def create_ignore_steps():
1943         for step in TestPlc.default_steps + TestPlc.other_steps:
1944             # default step can have a plc qualifier
1945             if '@' in step:
1946                 step, qualifier = step.split('@')
1947             # or be defined as forced or ignored by default
1948             for keyword in ['_ignore','_force']:
1949                 if step.endswith(keyword):
1950                     step=step.replace(keyword,'')
1951             if step == SEP or step == SEPSFA :
1952                 continue
1953             method = getattr(TestPlc,step)
1954             name = step + '_ignore'
1955             wrapped = ignore_result(method)
1956 #            wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1957             setattr(TestPlc, name, wrapped)
1958
1959 #    @ignore_result
1960 #    def ssh_slice_again_ignore (self): pass
1961 #    @ignore_result
1962 #    def check_initscripts_ignore (self): pass
1963
1964     def standby_1_through_20(self):
1965         """convenience function to wait for a specified number of minutes"""
1966         pass
1967     @standby_generic
1968     def standby_1(): pass
1969     @standby_generic
1970     def standby_2(): pass
1971     @standby_generic
1972     def standby_3(): pass
1973     @standby_generic
1974     def standby_4(): pass
1975     @standby_generic
1976     def standby_5(): pass
1977     @standby_generic
1978     def standby_6(): pass
1979     @standby_generic
1980     def standby_7(): pass
1981     @standby_generic
1982     def standby_8(): pass
1983     @standby_generic
1984     def standby_9(): pass
1985     @standby_generic
1986     def standby_10(): pass
1987     @standby_generic
1988     def standby_11(): pass
1989     @standby_generic
1990     def standby_12(): pass
1991     @standby_generic
1992     def standby_13(): pass
1993     @standby_generic
1994     def standby_14(): pass
1995     @standby_generic
1996     def standby_15(): pass
1997     @standby_generic
1998     def standby_16(): pass
1999     @standby_generic
2000     def standby_17(): pass
2001     @standby_generic
2002     def standby_18(): pass
2003     @standby_generic
2004     def standby_19(): pass
2005     @standby_generic
2006     def standby_20(): pass
2007
2008     # convenience for debugging the test logic
2009     def yes(self): return True
2010     def no(self): return False
2011     def fail(self): return False