fix pip_install
[tests.git] / system / TestPlc.py
1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
3 #
4 import sys
5 import time
6 import os, os.path
7 import traceback
8 import socket
9 from datetime import datetime, timedelta
10
11 import utils
12 from Completer import Completer, CompleterTask
13 from TestSite import TestSite
14 from TestNode import TestNode, CompleterTaskNodeSsh
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24
25 from TestBonding import TestBonding
26
27 has_sfa_cache_filename="sfa-cache"
28
29 # step methods must take (self) and return a boolean (options is a member of the class)
30
31 def standby(minutes, dry_run):
32     utils.header('Entering StandBy for {:d} mn'.format(minutes))
33     if dry_run:
34         print('dry_run')
35     else:
36         time.sleep(60*minutes)
37     return True
38
39 def standby_generic(func):
40     def actual(self):
41         minutes = int(func.__name__.split("_")[1])
42         return standby(minutes, self.options.dry_run)
43     return actual
44
45 def node_mapper(method):
46     def map_on_nodes(self, *args, **kwds):
47         overall = True
48         node_method = TestNode.__dict__[method.__name__]
49         for test_node in self.all_nodes():
50             if not node_method(test_node, *args, **kwds):
51                 overall=False
52         return overall
53     # maintain __name__ for ignore_result
54     map_on_nodes.__name__ = method.__name__
55     # restore the doc text
56     map_on_nodes.__doc__ = TestNode.__dict__[method.__name__].__doc__
57     return map_on_nodes
58
59 def slice_mapper(method):
60     def map_on_slices(self):
61         overall = True
62         slice_method = TestSlice.__dict__[method.__name__]
63         for slice_spec in self.plc_spec['slices']:
64             site_spec = self.locate_site (slice_spec['sitename'])
65             test_site = TestSite(self,site_spec)
66             test_slice = TestSlice(self,test_site,slice_spec)
67             if not slice_method(test_slice, self.options):
68                 overall=False
69         return overall
70     # maintain __name__ for ignore_result
71     map_on_slices.__name__ = method.__name__
72     # restore the doc text
73     map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
74     return map_on_slices
75
76 def bonding_redirector(method):
77     bonding_name = method.__name__.replace('bonding_', '')
78     def redirect(self):
79         bonding_method = TestBonding.__dict__[bonding_name]
80         return bonding_method(self.test_bonding)
81     # maintain __name__ for ignore_result
82     redirect.__name__ = method.__name__
83     # restore the doc text
84     redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
85     return redirect
86
87 # run a step but return True so that we can go on
88 def ignore_result(method):
89     def ignoring(self):
90         # ssh_slice_ignore->ssh_slice
91         ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
92         ref_method = TestPlc.__dict__[ref_name]
93         result = ref_method(self)
94         print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
95         return Ignored(result)
96     name = method.__name__.replace('_ignore', '').replace('force_', '')
97     ignoring.__name__ = name
98     ignoring.__doc__ = "ignored version of " + name
99     return ignoring
100
101 # a variant that expects the TestSlice method to return a list of CompleterTasks that
102 # are then merged into a single Completer run to avoid wating for all the slices
103 # esp. useful when a test fails of course
104 # because we need to pass arguments we use a class instead..
105 class slice_mapper__tasks(object):
106     # could not get this to work with named arguments
107     def __init__(self, timeout_minutes, silent_minutes, period_seconds):
108         self.timeout = timedelta(minutes = timeout_minutes)
109         self.silent = timedelta(minutes = silent_minutes)
110         self.period = timedelta(seconds = period_seconds)
111     def __call__(self, method):
112         decorator_self=self
113         # compute augmented method name
114         method_name = method.__name__ + "__tasks"
115         # locate in TestSlice
116         slice_method = TestSlice.__dict__[ method_name ]
117         def wrappee(self):
118             tasks=[]
119             for slice_spec in self.plc_spec['slices']:
120                 site_spec = self.locate_site (slice_spec['sitename'])
121                 test_site = TestSite(self, site_spec)
122                 test_slice = TestSlice(self, test_site, slice_spec)
123                 tasks += slice_method (test_slice, self.options)
124             return Completer (tasks, message=method.__name__).\
125                 run(decorator_self.timeout, decorator_self.silent, decorator_self.period)
126         # restore the doc text from the TestSlice method even if a bit odd
127         wrappee.__name__ = method.__name__
128         wrappee.__doc__ = slice_method.__doc__
129         return wrappee
130
131 def auth_sfa_mapper(method):
132     def actual(self):
133         overall = True
134         auth_method = TestAuthSfa.__dict__[method.__name__]
135         for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
136             test_auth = TestAuthSfa(self, auth_spec)
137             if not auth_method(test_auth, self.options):
138                 overall=False
139         return overall
140     # restore the doc text
141     actual.__doc__ = TestAuthSfa.__dict__[method.__name__].__doc__
142     return actual
143
144 class Ignored:
145     def __init__(self, result):
146         self.result = result
147
148 SEP = '<sep>'
149 SEPSFA = '<sep_sfa>'
150
151 class TestPlc:
152
153     default_steps = [
154         'show', SEP,
155         'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
156         'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
157         'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
158         'plcapi_urls','speed_up_slices', SEP,
159         'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
160 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
161 # keep this our of the way for now
162         'check_vsys_defaults_ignore', SEP,
163 # run this first off so it's easier to re-run on another qemu box
164         'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
165         'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
166         'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
167         'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
168         'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
169         'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
170         'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
171         'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
172         'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
173         'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
174         'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
175         'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
176         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
177         # but as the stress test might take a while, we sometimes missed the debug mode..
178         'probe_kvm_iptables',
179         'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
180         'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
181         'ssh_slice_sfa@1', SEPSFA,
182         'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
183         'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
184         'cross_check_tcp@1', 'check_system_slice', SEP,
185         # for inspecting the slice while it runs the first time
186         #'fail',
187         # check slices are turned off properly
188         'debug_nodemanager',
189         'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
190         # check they are properly re-created with the same name
191         'fill_slices', 'ssh_slice_again', SEP,
192         'gather_logs_force', SEP,
193         ]
194     other_steps = [
195         'export', 'show_boxes', 'super_speed_up_slices', SEP,
196         'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
197         'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
198         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
199         'delete_leases', 'list_leases', SEP,
200         'populate', SEP,
201         'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
202         'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
203         'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
204         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
205         'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
206         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
207         'sfa_get_expires', SEPSFA,
208         'plc_db_dump' , 'plc_db_restore', SEP,
209         'check_netflow','check_drl', SEP,
210         'slice_fs_present', 'check_initscripts', SEP,
211         'standby_1_through_20','yes','no',SEP,
212         'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
213         ]
214     default_bonding_steps = [
215         'bonding_init_partial',
216         'bonding_add_yum',
217         'bonding_install_rpms', SEP,
218         ]
219
220     @staticmethod
221     def printable_steps(list):
222         single_line = " ".join(list) + " "
223         return single_line.replace(" "+SEP+" ", " \\\n").replace(" "+SEPSFA+" ", " \\\n")
224     @staticmethod
225     def valid_step(step):
226         return step != SEP and step != SEPSFA
227
228     # turn off the sfa-related steps when build has skipped SFA
229     # this was originally for centos5 but is still valid
230     # for up to f12 as recent SFAs with sqlalchemy won't build before f14
231     @staticmethod
232     def _has_sfa_cached(rpms_url):
233         if os.path.isfile(has_sfa_cache_filename):
234             with open(has_sfa_cache_filename) as cache:
235                 cached = cache.read() == "yes"
236             utils.header("build provides SFA (cached):{}".format(cached))
237             return cached
238         # warning, we're now building 'sface' so let's be a bit more picky
239         # full builds are expected to return with 0 here
240         utils.header("Checking if build provides SFA package...")
241         retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
242         encoded = 'yes' if retcod else 'no'
243         with open(has_sfa_cache_filename,'w') as cache:
244             cache.write(encoded)
245         return retcod
246
247     @staticmethod
248     def check_whether_build_has_sfa(rpms_url):
249         has_sfa = TestPlc._has_sfa_cached(rpms_url)
250         if has_sfa:
251             utils.header("build does provide SFA")
252         else:
253             # move all steps containing 'sfa' from default_steps to other_steps
254             utils.header("SFA package not found - removing steps with sfa or sfi")
255             sfa_steps = [ step for step in TestPlc.default_steps
256                           if step.find('sfa') >= 0 or step.find("sfi") >= 0 ]
257             TestPlc.other_steps += sfa_steps
258             for step in sfa_steps:
259                 TestPlc.default_steps.remove(step)
260
261     def __init__(self, plc_spec, options):
262         self.plc_spec = plc_spec
263         self.options = options
264         self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
265         self.vserverip = plc_spec['vserverip']
266         self.vservername = plc_spec['vservername']
267         self.vplchostname = self.vservername.split('-')[-1]
268         self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
269         self.apiserver = TestApiserver(self.url, options.dry_run)
270         (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
271         (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
272
273     def has_addresses_api(self):
274         return self.apiserver.has_method('AddIpAddress')
275
276     def name(self):
277         name = self.plc_spec['name']
278         return "{}.{}".format(name,self.vservername)
279
280     def hostname(self):
281         return self.plc_spec['host_box']
282
283     def is_local(self):
284         return self.test_ssh.is_local()
285
286     # define the API methods on this object through xmlrpc
287     # would help, but not strictly necessary
288     def connect(self):
289         pass
290
291     def actual_command_in_guest(self,command, backslash=False):
292         raw1 = self.host_to_guest(command)
293         raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
294         return raw2
295
296     def start_guest(self):
297       return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
298                                                        dry_run=self.options.dry_run))
299
300     def stop_guest(self):
301       return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
302                                                        dry_run=self.options.dry_run))
303
304     def run_in_guest(self, command, backslash=False):
305         raw = self.actual_command_in_guest(command, backslash)
306         return utils.system(raw)
307
308     def run_in_host(self,command):
309         return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
310
311     # backslashing turned out so awful at some point that I've turned off auto-backslashing
312     # see e.g. plc_start esp. the version for f14
313     #command gets run in the plc's vm
314     def host_to_guest(self, command):
315         ssh_leg = TestSsh(self.vplchostname)
316         return ssh_leg.actual_command(command, keep_stdin=True)
317
318     # this /vservers thing is legacy...
319     def vm_root_in_host(self):
320         return "/vservers/{}/".format(self.vservername)
321
322     def vm_timestamp_path(self):
323         return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
324
325     #start/stop the vserver
326     def start_guest_in_host(self):
327         return "virsh -c lxc:/// start {}".format(self.vservername)
328
329     def stop_guest_in_host(self):
330         return "virsh -c lxc:/// destroy {}".format(self.vservername)
331
332     # xxx quick n dirty
333     def run_in_guest_piped(self,local,remote):
334         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
335                                                                      keep_stdin = True))
336
337     def yum_check_installed(self, rpms):
338         if isinstance(rpms, list):
339             rpms=" ".join(rpms)
340         return self.run_in_guest("rpm -q {}".format(rpms)) == 0
341
342     # does a yum install in the vs, ignore yum retcod, check with rpm
343     def yum_install(self, rpms):
344         if isinstance(rpms, list):
345             rpms=" ".join(rpms)
346         yum_mode = self.run_in_guest("yum -y install {}".format(rpms))
347         if yum_mode != 0:
348             self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
349         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
350         self.run_in_guest("yum-complete-transaction -y")
351         return self.yum_check_installed(rpms)
352
353     def pip_install(self, package):
354         return self.run_in_guest("pip -y install {}".format(package)) == 0
355
356     def auth_root(self):
357         return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
358                 'AuthMethod' : 'password',
359                 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
360                 'Role'       : self.plc_spec['role'],
361                 }
362
363     def locate_site(self,sitename):
364         for site in self.plc_spec['sites']:
365             if site['site_fields']['name'] == sitename:
366                 return site
367             if site['site_fields']['login_base'] == sitename:
368                 return site
369         raise Exception("Cannot locate site {}".format(sitename))
370
371     def locate_node(self, nodename):
372         for site in self.plc_spec['sites']:
373             for node in site['nodes']:
374                 if node['name'] == nodename:
375                     return site, node
376         raise Exception("Cannot locate node {}".format(nodename))
377
378     def locate_hostname(self, hostname):
379         for site in self.plc_spec['sites']:
380             for node in site['nodes']:
381                 if node['node_fields']['hostname'] == hostname:
382                     return(site, node)
383         raise Exception("Cannot locate hostname {}".format(hostname))
384
385     def locate_key(self, key_name):
386         for key in self.plc_spec['keys']:
387             if key['key_name'] == key_name:
388                 return key
389         raise Exception("Cannot locate key {}".format(key_name))
390
391     def locate_private_key_from_key_names(self, key_names):
392         # locate the first avail. key
393         found = False
394         for key_name in key_names:
395             key_spec = self.locate_key(key_name)
396             test_key = TestKey(self,key_spec)
397             publickey = test_key.publicpath()
398             privatekey = test_key.privatepath()
399             if os.path.isfile(publickey) and os.path.isfile(privatekey):
400                 found = True
401         if found:
402             return privatekey
403         else:
404             return None
405
406     def locate_slice(self, slicename):
407         for slice in self.plc_spec['slices']:
408             if slice['slice_fields']['name'] == slicename:
409                 return slice
410         raise Exception("Cannot locate slice {}".format(slicename))
411
412     def all_sliver_objs(self):
413         result = []
414         for slice_spec in self.plc_spec['slices']:
415             slicename = slice_spec['slice_fields']['name']
416             for nodename in slice_spec['nodenames']:
417                 result.append(self.locate_sliver_obj(nodename, slicename))
418         return result
419
420     def locate_sliver_obj(self, nodename, slicename):
421         site,node = self.locate_node(nodename)
422         slice = self.locate_slice(slicename)
423         # build objects
424         test_site = TestSite(self, site)
425         test_node = TestNode(self, test_site, node)
426         # xxx the slice site is assumed to be the node site - mhh - probably harmless
427         test_slice = TestSlice(self, test_site, slice)
428         return TestSliver(self, test_node, test_slice)
429
430     def locate_first_node(self):
431         nodename = self.plc_spec['slices'][0]['nodenames'][0]
432         site,node = self.locate_node(nodename)
433         test_site = TestSite(self, site)
434         test_node = TestNode(self, test_site, node)
435         return test_node
436
437     def locate_first_sliver(self):
438         slice_spec = self.plc_spec['slices'][0]
439         slicename = slice_spec['slice_fields']['name']
440         nodename = slice_spec['nodenames'][0]
441         return self.locate_sliver_obj(nodename,slicename)
442
443     # all different hostboxes used in this plc
444     def get_BoxNodes(self):
445         # maps on sites and nodes, return [ (host_box,test_node) ]
446         tuples = []
447         for site_spec in self.plc_spec['sites']:
448             test_site = TestSite(self,site_spec)
449             for node_spec in site_spec['nodes']:
450                 test_node = TestNode(self, test_site, node_spec)
451                 if not test_node.is_real():
452                     tuples.append( (test_node.host_box(),test_node) )
453         # transform into a dict { 'host_box' -> [ test_node .. ] }
454         result = {}
455         for (box,node) in tuples:
456             if box not in result:
457                 result[box] = [node]
458             else:
459                 result[box].append(node)
460         return result
461
462     # a step for checking this stuff
463     def show_boxes(self):
464         'print summary of nodes location'
465         for box,nodes in self.get_BoxNodes().items():
466             print(box,":"," + ".join( [ node.name() for node in nodes ] ))
467         return True
468
469     # make this a valid step
470     def qemu_kill_all(self):
471         'kill all qemu instances on the qemu boxes involved by this setup'
472         # this is the brute force version, kill all qemus on that host box
473         for (box,nodes) in self.get_BoxNodes().items():
474             # pass the first nodename, as we don't push template-qemu on testboxes
475             nodedir = nodes[0].nodedir()
476             TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
477         return True
478
479     # make this a valid step
480     def qemu_list_all(self):
481         'list all qemu instances on the qemu boxes involved by this setup'
482         for box,nodes in self.get_BoxNodes().items():
483             # this is the brute force version, kill all qemus on that host box
484             TestBoxQemu(box, self.options.buildname).qemu_list_all()
485         return True
486
487     # kill only the qemus related to this test
488     def qemu_list_mine(self):
489         'list qemu instances for our nodes'
490         for (box,nodes) in self.get_BoxNodes().items():
491             # the fine-grain version
492             for node in nodes:
493                 node.list_qemu()
494         return True
495
496     # kill only the qemus related to this test
497     def qemu_clean_mine(self):
498         'cleanup (rm -rf) qemu instances for our nodes'
499         for box,nodes in self.get_BoxNodes().items():
500             # the fine-grain version
501             for node in nodes:
502                 node.qemu_clean()
503         return True
504
505     # kill only the right qemus
506     def qemu_kill_mine(self):
507         'kill the qemu instances for our nodes'
508         for box,nodes in self.get_BoxNodes().items():
509             # the fine-grain version
510             for node in nodes:
511                 node.kill_qemu()
512         return True
513
514     #################### display config
515     def show(self):
516         "show test configuration after localization"
517         self.show_pass(1)
518         self.show_pass(2)
519         return True
520
521     # uggly hack to make sure 'run export' only reports about the 1st plc
522     # to avoid confusion - also we use 'inri_slice1' in various aliases..
523     exported_id = 1
524     def export(self):
525         "print cut'n paste-able stuff to export env variables to your shell"
526         # guess local domain from hostname
527         if TestPlc.exported_id > 1:
528             print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
529             return True
530         TestPlc.exported_id += 1
531         domain = socket.gethostname().split('.',1)[1]
532         fqdn   = "{}.{}".format(self.plc_spec['host_box'], domain)
533         print("export BUILD={}".format(self.options.buildname))
534         print("export PLCHOSTLXC={}".format(fqdn))
535         print("export GUESTNAME={}".format(self.vservername))
536         print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
537         # find hostname of first node
538         hostname, qemubox = self.all_node_infos()[0]
539         print("export KVMHOST={}.{}".format(qemubox, domain))
540         print("export NODE={}".format(hostname))
541         return True
542
543     # entry point
544     always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
545     def show_pass(self, passno):
546         for (key,val) in self.plc_spec.items():
547             if not self.options.verbose and key not in TestPlc.always_display_keys:
548                 continue
549             if passno == 2:
550                 if key == 'sites':
551                     for site in val:
552                         self.display_site_spec(site)
553                         for node in site['nodes']:
554                             self.display_node_spec(node)
555                 elif key == 'initscripts':
556                     for initscript in val:
557                         self.display_initscript_spec(initscript)
558                 elif key == 'slices':
559                     for slice in val:
560                         self.display_slice_spec(slice)
561                 elif key == 'keys':
562                     for key in val:
563                         self.display_key_spec(key)
564             elif passno == 1:
565                 if key not in ['sites', 'initscripts', 'slices', 'keys']:
566                     print('+   ', key, ':', val)
567
568     def display_site_spec(self, site):
569         print('+ ======== site', site['site_fields']['name'])
570         for k,v in site.items():
571             if not self.options.verbose and k not in TestPlc.always_display_keys:
572                 continue
573             if k == 'nodes':
574                 if v:
575                     print('+       ','nodes : ', end=' ')
576                     for node in v:
577                         print(node['node_fields']['hostname'],'', end=' ')
578                     print('')
579             elif k == 'users':
580                 if v:
581                     print('+       users : ', end=' ')
582                     for user in v:
583                         print(user['name'],'', end=' ')
584                     print('')
585             elif k == 'site_fields':
586                 print('+       login_base', ':', v['login_base'])
587             elif k == 'address_fields':
588                 pass
589             else:
590                 print('+       ', end=' ')
591                 utils.pprint(k, v)
592
593     def display_initscript_spec(self, initscript):
594         print('+ ======== initscript', initscript['initscript_fields']['name'])
595
596     def display_key_spec(self, key):
597         print('+ ======== key', key['key_name'])
598
599     def display_slice_spec(self, slice):
600         print('+ ======== slice', slice['slice_fields']['name'])
601         for k,v in slice.items():
602             if k == 'nodenames':
603                 if v:
604                     print('+       nodes : ', end=' ')
605                     for nodename in v:
606                         print(nodename,'', end=' ')
607                     print('')
608             elif k == 'usernames':
609                 if v:
610                     print('+       users : ', end=' ')
611                     for username in v:
612                         print(username,'', end=' ')
613                     print('')
614             elif k == 'slice_fields':
615                 print('+       fields',':', end=' ')
616                 print('max_nodes=',v['max_nodes'], end=' ')
617                 print('')
618             else:
619                 print('+       ',k,v)
620
621     def display_node_spec(self, node):
622         print("+           node={} host_box={}".format(node['name'], node['host_box']), end=' ')
623         print("hostname=", node['node_fields']['hostname'], end=' ')
624         print("ip=", node['interface_fields']['ip'])
625         if self.options.verbose:
626             utils.pprint("node details", node, depth=3)
627
628     # another entry point for just showing the boxes involved
629     def display_mapping(self):
630         TestPlc.display_mapping_plc(self.plc_spec)
631         return True
632
633     @staticmethod
634     def display_mapping_plc(plc_spec):
635         print('+ MyPLC',plc_spec['name'])
636         # WARNING this would not be right for lxc-based PLC's - should be harmless though
637         print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
638         print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
639         for site_spec in plc_spec['sites']:
640             for node_spec in site_spec['nodes']:
641                 TestPlc.display_mapping_node(node_spec)
642
643     @staticmethod
644     def display_mapping_node(node_spec):
645         print('+   NODE {}'.format(node_spec['name']))
646         print('+\tqemu box {}'.format(node_spec['host_box']))
647         print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
648
649     # write a timestamp in /vservers/<>.timestamp
650     # cannot be inside the vserver, that causes vserver .. build to cough
651     def plcvm_timestamp(self):
652         "Create a timestamp to remember creation date for this plc"
653         now = int(time.time())
654         # TODO-lxc check this one
655         # a first approx. is to store the timestamp close to the VM root like vs does
656         stamp_path = self.vm_timestamp_path()
657         stamp_dir = os.path.dirname(stamp_path)
658         utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
659         return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
660
661     # this is called inconditionnally at the beginning of the test sequence
662     # just in case this is a rerun, so if the vm is not running it's fine
663     def plcvm_delete(self):
664         "vserver delete the test myplc"
665         stamp_path = self.vm_timestamp_path()
666         self.run_in_host("rm -f {}".format(stamp_path))
667         self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
668         self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
669         self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
670         return True
671
672     ### install
673     # historically the build was being fetched by the tests
674     # now the build pushes itself as a subdir of the tests workdir
675     # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
676     def plcvm_create(self):
677         "vserver creation (no install done)"
678         # push the local build/ dir to the testplc box
679         if self.is_local():
680             # a full path for the local calls
681             build_dir = os.path.dirname(sys.argv[0])
682             # sometimes this is empty - set to "." in such a case
683             if not build_dir:
684                 build_dir="."
685             build_dir += "/build"
686         else:
687             # use a standard name - will be relative to remote buildname
688             build_dir = "build"
689             # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
690             self.test_ssh.rmdir(build_dir)
691             self.test_ssh.copy(build_dir, recursive=True)
692         # the repo url is taken from arch-rpms-url
693         # with the last step (i386) removed
694         repo_url = self.options.arch_rpms_url
695         for level in [ 'arch' ]:
696             repo_url = os.path.dirname(repo_url)
697
698         # invoke initvm (drop support for vs)
699         script = "lbuild-initvm.sh"
700         script_options = ""
701         # pass the vbuild-nightly options to [lv]test-initvm
702         script_options += " -p {}".format(self.options.personality)
703         script_options += " -d {}".format(self.options.pldistro)
704         script_options += " -f {}".format(self.options.fcdistro)
705         script_options += " -r {}".format(repo_url)
706         vserver_name = self.vservername
707         try:
708             vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
709             script_options += " -n {}".format(vserver_hostname)
710         except:
711             print("Cannot reverse lookup {}".format(self.vserverip))
712             print("This is considered fatal, as this might pollute the test results")
713             return False
714         create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
715         return self.run_in_host(create_vserver) == 0
716
717     ### install django through pip
718     def django_install(self):
719         # plcapi requires Django, that is no longer provided py fedora as an rpm
720         # so we use pip instead
721         """
722         pip install Django
723         """
724         return self.pip_install('Django')
725
726     ### install_rpm
727     def plc_install(self):
728         """
729         yum install myplc, noderepo
730         """
731
732         # compute nodefamily
733         if self.options.personality == "linux32":
734             arch = "i386"
735         elif self.options.personality == "linux64":
736             arch = "x86_64"
737         else:
738             raise Exception("Unsupported personality {}".format(self.options.personality))
739         nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
740
741         pkgs_list=[]
742         pkgs_list.append("slicerepo-{}".format(nodefamily))
743         pkgs_list.append("myplc")
744         pkgs_list.append("noderepo-{}".format(nodefamily))
745         pkgs_string=" ".join(pkgs_list)
746         return self.yum_install(pkgs_list)
747
748     def install_syslinux6(self):
749         """
750         install syslinux6 from the fedora21 release
751         """
752         key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
753
754         rpms = [
755             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
756             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
757             'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
758         ]
759         # this can be done several times
760         self.run_in_guest("rpm --import {key}".format(**locals()))
761         return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
762
763     def bonding_builds(self):
764         """
765         list /etc/yum.repos.d on the myplc side
766         """
767         self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
768         return True
769
770     def bonding_nodes(self):
771         """
772         List nodes known to the myplc together with their nodefamiliy
773         """
774         print("---------------------------------------- nodes")
775         for node in self.apiserver.GetNodes(self.auth_root()):
776             print("{} -> {}".format(node['hostname'],
777                                     self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
778         print("---------------------------------------- nodes")
779
780
781     ###
782     def mod_python(self):
783         """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
784         return self.yum_install( ['mod_python'] )
785
786     ###
787     def plc_configure(self):
788         "run plc-config-tty"
789         tmpname = '{}.plc-config-tty'.format(self.name())
790         with open(tmpname,'w') as fileconf:
791             for var, value in self.plc_spec['settings'].items():
792                 fileconf.write('e {}\n{}\n'.format(var, value))
793             fileconf.write('w\n')
794             fileconf.write('q\n')
795         utils.system('cat {}'.format(tmpname))
796         self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
797         utils.system('rm {}'.format(tmpname))
798         return True
799
800     # care only about f>=27
801     def start_stop_systemd(self, service, start_or_stop):
802         "utility to start/stop a systemd-defined service (sfa)"
803         return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
804
805     def plc_start(self):
806         "start plc through systemclt"
807         return self.start_stop_systemd('plc', 'start')
808
809     def plc_stop(self):
810         "stop plc through systemctl"
811         return self.start_stop_systemd('plc', 'stop')
812
813     def plcvm_start(self):
814         "start the PLC vserver"
815         self.start_guest()
816         return True
817
818     def plcvm_stop(self):
819         "stop the PLC vserver"
820         self.stop_guest()
821         return True
822
823     # stores the keys from the config for further use
824     def keys_store(self):
825         "stores test users ssh keys in keys/"
826         for key_spec in self.plc_spec['keys']:
827                 TestKey(self,key_spec).store_key()
828         return True
829
830     def keys_clean(self):
831         "removes keys cached in keys/"
832         utils.system("rm -rf ./keys")
833         return True
834
835     # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
836     # for later direct access to the nodes
837     def keys_fetch(self):
838         "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
839         dir="./keys"
840         if not os.path.isdir(dir):
841             os.mkdir(dir)
842         vservername = self.vservername
843         vm_root = self.vm_root_in_host()
844         overall = True
845         prefix = 'debug_ssh_key'
846         for ext in ['pub', 'rsa'] :
847             src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
848             dst = "keys/{vservername}-debug.{ext}".format(**locals())
849             if self.test_ssh.fetch(src, dst) != 0:
850                 overall=False
851         return overall
852
853     def sites(self):
854         "create sites with PLCAPI"
855         return self.do_sites()
856
857     def delete_sites(self):
858         "delete sites with PLCAPI"
859         return self.do_sites(action="delete")
860
861     def do_sites(self, action="add"):
862         for site_spec in self.plc_spec['sites']:
863             test_site = TestSite(self,site_spec)
864             if (action != "add"):
865                 utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
866                 test_site.delete_site()
867                 # deleted with the site
868                 #test_site.delete_users()
869                 continue
870             else:
871                 utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
872                 test_site.create_site()
873                 test_site.create_users()
874         return True
875
876     def delete_all_sites(self):
877         "Delete all sites in PLC, and related objects"
878         print('auth_root', self.auth_root())
879         sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
880         for site in sites:
881             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
882             if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
883                 continue
884             site_id = site['site_id']
885             print('Deleting site_id', site_id)
886             self.apiserver.DeleteSite(self.auth_root(), site_id)
887         return True
888
889     def nodes(self):
890         "create nodes with PLCAPI"
891         return self.do_nodes()
892     def delete_nodes(self):
893         "delete nodes with PLCAPI"
894         return self.do_nodes(action="delete")
895
896     def do_nodes(self, action="add"):
897         for site_spec in self.plc_spec['sites']:
898             test_site = TestSite(self, site_spec)
899             if action != "add":
900                 utils.header("Deleting nodes in site {}".format(test_site.name()))
901                 for node_spec in site_spec['nodes']:
902                     test_node = TestNode(self, test_site, node_spec)
903                     utils.header("Deleting {}".format(test_node.name()))
904                     test_node.delete_node()
905             else:
906                 utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
907                 for node_spec in site_spec['nodes']:
908                     utils.pprint('Creating node {}'.format(node_spec), node_spec)
909                     test_node = TestNode(self, test_site, node_spec)
910                     test_node.create_node()
911         return True
912
913     def nodegroups(self):
914         "create nodegroups with PLCAPI"
915         return self.do_nodegroups("add")
916     def delete_nodegroups(self):
917         "delete nodegroups with PLCAPI"
918         return self.do_nodegroups("delete")
919
920     YEAR = 365*24*3600
921     @staticmethod
922     def translate_timestamp(start, grain, timestamp):
923         if timestamp < TestPlc.YEAR:
924             return start + timestamp*grain
925         else:
926             return timestamp
927
928     @staticmethod
929     def timestamp_printable(timestamp):
930         return time.strftime('%m-%d %H:%M:%S UTC', time.gmtime(timestamp))
931
932     def leases(self):
933         "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
934         now = int(time.time())
935         grain = self.apiserver.GetLeaseGranularity(self.auth_root())
936         print('API answered grain=', grain)
937         start = (now//grain)*grain
938         start += grain
939         # find out all nodes that are reservable
940         nodes = self.all_reservable_nodenames()
941         if not nodes:
942             utils.header("No reservable node found - proceeding without leases")
943             return True
944         ok = True
945         # attach them to the leases as specified in plc_specs
946         # this is where the 'leases' field gets interpreted as relative of absolute
947         for lease_spec in self.plc_spec['leases']:
948             # skip the ones that come with a null slice id
949             if not lease_spec['slice']:
950                 continue
951             lease_spec['t_from']  = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
952             lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
953             lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
954                                                       lease_spec['t_from'], lease_spec['t_until'])
955             if lease_addition['errors']:
956                 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
957                 ok = False
958             else:
959                 utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
960                              .format(nodes, lease_spec['slice'],
961                                      lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
962                                      lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
963
964         return ok
965
966     def delete_leases(self):
967         "remove all leases in the myplc side"
968         lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
969         utils.header("Cleaning leases {}".format(lease_ids))
970         self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
971         return True
972
973     def list_leases(self):
974         "list all leases known to the myplc"
975         leases = self.apiserver.GetLeases(self.auth_root())
976         now = int(time.time())
977         for l in leases:
978             current = l['t_until'] >= now
979             if self.options.verbose or current:
980                 utils.header("{} {} from {} until {}"\
981                              .format(l['hostname'], l['name'],
982                                      TestPlc.timestamp_printable(l['t_from']),
983                                      TestPlc.timestamp_printable(l['t_until'])))
984         return True
985
986     # create nodegroups if needed, and populate
987     def do_nodegroups(self, action="add"):
988         # 1st pass to scan contents
989         groups_dict = {}
990         for site_spec in self.plc_spec['sites']:
991             test_site = TestSite(self,site_spec)
992             for node_spec in site_spec['nodes']:
993                 test_node = TestNode(self, test_site, node_spec)
994                 if 'nodegroups' in node_spec:
995                     nodegroupnames = node_spec['nodegroups']
996                     if isinstance(nodegroupnames, str):
997                         nodegroupnames = [ nodegroupnames ]
998                     for nodegroupname in nodegroupnames:
999                         if nodegroupname not in groups_dict:
1000                             groups_dict[nodegroupname] = []
1001                         groups_dict[nodegroupname].append(test_node.name())
1002         auth = self.auth_root()
1003         overall = True
1004         for (nodegroupname,group_nodes) in groups_dict.items():
1005             if action == "add":
1006                 print('nodegroups:', 'dealing with nodegroup',\
1007                     nodegroupname, 'on nodes', group_nodes)
1008                 # first, check if the nodetagtype is here
1009                 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
1010                 if tag_types:
1011                     tag_type_id = tag_types[0]['tag_type_id']
1012                 else:
1013                     tag_type_id = self.apiserver.AddTagType(auth,
1014                                                             {'tagname' : nodegroupname,
1015                                                              'description' : 'for nodegroup {}'.format(nodegroupname),
1016                                                              'category' : 'test'})
1017                 print('located tag (type)', nodegroupname, 'as', tag_type_id)
1018                 # create nodegroup
1019                 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
1020                 if not nodegroups:
1021                     self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
1022                     print('created nodegroup', nodegroupname, \
1023                         'from tagname', nodegroupname, 'and value', 'yes')
1024                 # set node tag on all nodes, value='yes'
1025                 for nodename in group_nodes:
1026                     try:
1027                         self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
1028                     except:
1029                         traceback.print_exc()
1030                         print('node', nodename, 'seems to already have tag', nodegroupname)
1031                     # check anyway
1032                     try:
1033                         expect_yes = self.apiserver.GetNodeTags(auth,
1034                                                                 {'hostname' : nodename,
1035                                                                  'tagname'  : nodegroupname},
1036                                                                 ['value'])[0]['value']
1037                         if expect_yes != "yes":
1038                             print('Mismatch node tag on node',nodename,'got',expect_yes)
1039                             overall = False
1040                     except:
1041                         if not self.options.dry_run:
1042                             print('Cannot find tag', nodegroupname, 'on node', nodename)
1043                             overall = False
1044             else:
1045                 try:
1046                     print('cleaning nodegroup', nodegroupname)
1047                     self.apiserver.DeleteNodeGroup(auth, nodegroupname)
1048                 except:
1049                     traceback.print_exc()
1050                     overall = False
1051         return overall
1052
1053     # a list of TestNode objs
1054     def all_nodes(self):
1055         nodes=[]
1056         for site_spec in self.plc_spec['sites']:
1057             test_site = TestSite(self,site_spec)
1058             for node_spec in site_spec['nodes']:
1059                 nodes.append(TestNode(self, test_site, node_spec))
1060         return nodes
1061
1062     # return a list of tuples (nodename,qemuname)
1063     def all_node_infos(self) :
1064         node_infos = []
1065         for site_spec in self.plc_spec['sites']:
1066             node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
1067                                 for node_spec in site_spec['nodes'] ]
1068         return node_infos
1069
1070     def all_nodenames(self):
1071         return [ x[0] for x in self.all_node_infos() ]
1072     def all_reservable_nodenames(self):
1073         res = []
1074         for site_spec in self.plc_spec['sites']:
1075             for node_spec in site_spec['nodes']:
1076                 node_fields = node_spec['node_fields']
1077                 if 'node_type' in node_fields and node_fields['node_type'] == 'reservable':
1078                     res.append(node_fields['hostname'])
1079         return res
1080
1081     # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1082     def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
1083                                silent_minutes, period_seconds = 15):
1084         if self.options.dry_run:
1085             print('dry_run')
1086             return True
1087
1088         class CompleterTaskBootState(CompleterTask):
1089             def __init__(self, test_plc, hostname):
1090                 self.test_plc = test_plc
1091                 self.hostname = hostname
1092                 self.last_boot_state = 'undef'
1093             def actual_run(self):
1094                 try:
1095                     node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
1096                                                             [ self.hostname ],
1097                                                             ['boot_state'])[0]
1098                     self.last_boot_state = node['boot_state']
1099                     return self.last_boot_state == target_boot_state
1100                 except:
1101                     return False
1102             def message(self):
1103                 return "CompleterTaskBootState with node {}".format(self.hostname)
1104             def failure_epilogue(self):
1105                 print("node {} in state {} - expected {}"\
1106                     .format(self.hostname, self.last_boot_state, target_boot_state))
1107
1108         timeout = timedelta(minutes=timeout_minutes)
1109         graceout = timedelta(minutes=silent_minutes)
1110         period   = timedelta(seconds=period_seconds)
1111         # the nodes that haven't checked yet - start with a full list and shrink over time
1112         utils.header("checking nodes boot state (expected {})".format(target_boot_state))
1113         tasks = [ CompleterTaskBootState(self,hostname) \
1114                       for (hostname,_) in self.all_node_infos() ]
1115         message = 'check_boot_state={}'.format(target_boot_state)
1116         return Completer(tasks, message=message).run(timeout, graceout, period)
1117
1118     def nodes_booted(self):
1119         return self.nodes_check_boot_state('boot', timeout_minutes=30, silent_minutes=28)
1120
1121     def probe_kvm_iptables(self):
1122         (_,kvmbox) = self.all_node_infos()[0]
1123         TestSsh(kvmbox).run("iptables-save")
1124         return True
1125
1126     # probing nodes
1127     def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
1128         class CompleterTaskPingNode(CompleterTask):
1129             def __init__(self, hostname):
1130                 self.hostname = hostname
1131             def run(self, silent):
1132                 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
1133                 return utils.system(command, silent=silent) == 0
1134             def failure_epilogue(self):
1135                 print("Cannot ping node with name {}".format(self.hostname))
1136         timeout = timedelta(seconds = timeout_seconds)
1137         graceout = timeout
1138         period = timedelta(seconds = period_seconds)
1139         node_infos = self.all_node_infos()
1140         tasks = [ CompleterTaskPingNode(h) for (h,_) in node_infos ]
1141         return Completer(tasks, message='ping_node').run(timeout, graceout, period)
1142
1143     # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1144     def ping_node(self):
1145         "Ping nodes"
1146         return self.check_nodes_ping()
1147
1148     def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1149         # various delays
1150         timeout  = timedelta(minutes=timeout_minutes)
1151         graceout = timedelta(minutes=silent_minutes)
1152         period   = timedelta(seconds=period_seconds)
1153         vservername = self.vservername
1154         if debug:
1155             message = "debug"
1156             completer_message = 'ssh_node_debug'
1157             local_key = "keys/{vservername}-debug.rsa".format(**locals())
1158         else:
1159             message = "boot"
1160             completer_message = 'ssh_node_boot'
1161             local_key = "keys/key_admin.rsa"
1162         utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
1163         node_infos = self.all_node_infos()
1164         tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
1165                                         boot_state=message, dry_run=self.options.dry_run) \
1166                       for (nodename, qemuname) in node_infos ]
1167         return Completer(tasks, message=completer_message).run(timeout, graceout, period)
1168
1169     def ssh_node_debug(self):
1170         "Tries to ssh into nodes in debug mode with the debug ssh key"
1171         return self.check_nodes_ssh(debug = True,
1172                                     timeout_minutes = self.ssh_node_debug_timeout,
1173                                     silent_minutes = self.ssh_node_debug_silent)
1174
1175     def ssh_node_boot(self):
1176         "Tries to ssh into nodes in production mode with the root ssh key"
1177         return self.check_nodes_ssh(debug = False,
1178                                     timeout_minutes = self.ssh_node_boot_timeout,
1179                                     silent_minutes = self.ssh_node_boot_silent)
1180
1181     def node_bmlogs(self):
1182         "Checks that there's a non-empty dir. /var/log/bm/raw"
1183         return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
1184
1185     @node_mapper
1186     def qemu_local_init(self): pass
1187     @node_mapper
1188     def bootcd(self): pass
1189     @node_mapper
1190     def qemu_local_config(self): pass
1191     @node_mapper
1192     def qemu_export(self): pass
1193     @node_mapper
1194     def qemu_cleanlog(self): pass
1195     @node_mapper
1196     def nodestate_reinstall(self): pass
1197     @node_mapper
1198     def nodestate_upgrade(self): pass
1199     @node_mapper
1200     def nodestate_safeboot(self): pass
1201     @node_mapper
1202     def nodestate_boot(self): pass
1203     @node_mapper
1204     def nodestate_show(self): pass
1205     @node_mapper
1206     def nodedistro_f14(self): pass
1207     @node_mapper
1208     def nodedistro_f18(self): pass
1209     @node_mapper
1210     def nodedistro_f20(self): pass
1211     @node_mapper
1212     def nodedistro_f21(self): pass
1213     @node_mapper
1214     def nodedistro_f22(self): pass
1215     @node_mapper
1216     def nodedistro_show(self): pass
1217
1218     ### check hooks : invoke scripts from hooks/{node,slice}
1219     def check_hooks_node(self):
1220         return self.locate_first_node().check_hooks()
1221     def check_hooks_sliver(self) :
1222         return self.locate_first_sliver().check_hooks()
1223
1224     def check_hooks(self):
1225         "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1226         return self.check_hooks_node() and self.check_hooks_sliver()
1227
1228     ### initscripts
1229     def do_check_initscripts(self):
1230         class CompleterTaskInitscript(CompleterTask):
1231             def __init__(self, test_sliver, stamp):
1232                 self.test_sliver = test_sliver
1233                 self.stamp = stamp
1234             def actual_run(self):
1235                 return self.test_sliver.check_initscript_stamp(self.stamp)
1236             def message(self):
1237                 return "initscript checker for {}".format(self.test_sliver.name())
1238             def failure_epilogue(self):
1239                 print("initscript stamp {} not found in sliver {}"\
1240                     .format(self.stamp, self.test_sliver.name()))
1241
1242         tasks = []
1243         for slice_spec in self.plc_spec['slices']:
1244             if 'initscriptstamp' not in slice_spec:
1245                 continue
1246             stamp = slice_spec['initscriptstamp']
1247             slicename = slice_spec['slice_fields']['name']
1248             for nodename in slice_spec['nodenames']:
1249                 print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
1250                 site,node = self.locate_node(nodename)
1251                 # xxx - passing the wrong site - probably harmless
1252                 test_site = TestSite(self, site)
1253                 test_slice = TestSlice(self, test_site, slice_spec)
1254                 test_node = TestNode(self, test_site, node)
1255                 test_sliver = TestSliver(self, test_node, test_slice)
1256                 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
1257         return Completer(tasks, message='check_initscripts').\
1258             run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1259
1260     def check_initscripts(self):
1261         "check that the initscripts have triggered"
1262         return self.do_check_initscripts()
1263
1264     def initscripts(self):
1265         "create initscripts with PLCAPI"
1266         for initscript in self.plc_spec['initscripts']:
1267             utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
1268             self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
1269         return True
1270
1271     def delete_initscripts(self):
1272         "delete initscripts with PLCAPI"
1273         for initscript in self.plc_spec['initscripts']:
1274             initscript_name = initscript['initscript_fields']['name']
1275             print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
1276             try:
1277                 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
1278                 print(initscript_name, 'deleted')
1279             except:
1280                 print('deletion went wrong - probably did not exist')
1281         return True
1282
1283     ### manage slices
1284     def slices(self):
1285         "create slices with PLCAPI"
1286         return self.do_slices(action="add")
1287
1288     def delete_slices(self):
1289         "delete slices with PLCAPI"
1290         return self.do_slices(action="delete")
1291
1292     def fill_slices(self):
1293         "add nodes in slices with PLCAPI"
1294         return self.do_slices(action="fill")
1295
1296     def empty_slices(self):
1297         "remove nodes from slices with PLCAPI"
1298         return self.do_slices(action="empty")
1299
1300     def do_slices(self,  action="add"):
1301         for slice in self.plc_spec['slices']:
1302             site_spec = self.locate_site(slice['sitename'])
1303             test_site = TestSite(self,site_spec)
1304             test_slice=TestSlice(self,test_site,slice)
1305             if action == "delete":
1306                 test_slice.delete_slice()
1307             elif action == "fill":
1308                 test_slice.add_nodes()
1309             elif action == "empty":
1310                 test_slice.delete_nodes()
1311             else:
1312                 test_slice.create_slice()
1313         return True
1314
1315     @slice_mapper__tasks(20, 10, 15)
1316     def ssh_slice(self): pass
1317     @slice_mapper__tasks(20, 19, 15)
1318     def ssh_slice_off(self): pass
1319     @slice_mapper__tasks(1, 1, 15)
1320     def slice_fs_present(self): pass
1321     @slice_mapper__tasks(1, 1, 15)
1322     def slice_fs_deleted(self): pass
1323
1324     # use another name so we can exclude/ignore it from the tests on the nightly command line
1325     def ssh_slice_again(self): return self.ssh_slice()
1326     # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1327     # but for some reason the ignore-wrapping thing would not
1328
1329     @slice_mapper
1330     def ssh_slice_basics(self): pass
1331     @slice_mapper
1332     def check_vsys_defaults(self): pass
1333
1334     @node_mapper
1335     def keys_clear_known_hosts(self): pass
1336
1337     def plcapi_urls(self):
1338         """
1339         attempts to reach the PLCAPI with various forms for the URL
1340         """
1341         return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
1342
1343     def speed_up_slices(self):
1344         "tweak nodemanager cycle (wait time) to 30+/-10 s"
1345         return self._speed_up_slices (30, 10)
1346     def super_speed_up_slices(self):
1347         "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1348         return self._speed_up_slices(5, 1)
1349
1350     def _speed_up_slices(self, p, r):
1351         # create the template on the server-side
1352         template = "{}.nodemanager".format(self.name())
1353         with open(template,"w") as template_file:
1354             template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
1355         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1356         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1357         self.test_ssh.copy_abs(template, remote)
1358         # Add a conf file
1359         if not self.apiserver.GetConfFiles(self.auth_root(),
1360                                            {'dest' : '/etc/sysconfig/nodemanager'}):
1361             self.apiserver.AddConfFile(self.auth_root(),
1362                                         {'dest' : '/etc/sysconfig/nodemanager',
1363                                          'source' : 'PlanetLabConf/nodemanager',
1364                                          'postinstall_cmd' : 'service nm restart',})
1365         return True
1366
1367     def debug_nodemanager(self):
1368         "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1369         template = "{}.nodemanager".format(self.name())
1370         with open(template,"w") as template_file:
1371             template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1372         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
1373         remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1374         self.test_ssh.copy_abs(template, remote)
1375         return True
1376
1377     @node_mapper
1378     def qemu_start(self) : pass
1379
1380     @node_mapper
1381     def qemu_timestamp(self) : pass
1382
1383     @node_mapper
1384     def qemu_nodefamily(self): pass
1385
1386     # when a spec refers to a node possibly on another plc
1387     def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
1388         for plc in [ self ] + other_plcs:
1389             try:
1390                 return plc.locate_sliver_obj(nodename, slicename)
1391             except:
1392                 pass
1393         raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
1394
1395     # implement this one as a cross step so that we can take advantage of different nodes
1396     # in multi-plcs mode
1397     def cross_check_tcp(self, other_plcs):
1398         "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1399         if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1400             utils.header("check_tcp: no/empty config found")
1401             return True
1402         specs = self.plc_spec['tcp_specs']
1403         overall = True
1404
1405         # first wait for the network to be up and ready from the slices
1406         class CompleterTaskNetworkReadyInSliver(CompleterTask):
1407             def __init__(self, test_sliver):
1408                 self.test_sliver = test_sliver
1409             def actual_run(self):
1410                 return self.test_sliver.check_tcp_ready(port = 9999)
1411             def message(self):
1412                 return "network ready checker for {}".format(self.test_sliver.name())
1413             def failure_epilogue(self):
1414                 print("could not bind port from sliver {}".format(self.test_sliver.name()))
1415
1416         sliver_specs = {}
1417         tasks = []
1418         managed_sliver_names = set()
1419         for spec in specs:
1420             # locate the TestSliver instances involved, and cache them in the spec instance
1421             spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
1422             spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
1423             message = "Will check TCP between s={} and c={}"\
1424                       .format(spec['s_sliver'].name(), spec['c_sliver'].name())
1425             if 'client_connect' in spec:
1426                 message += " (using {})".format(spec['client_connect'])
1427             utils.header(message)
1428             # we need to check network presence in both slivers, but also
1429             # avoid to insert a sliver several times
1430             for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
1431                 if sliver.name() not in managed_sliver_names:
1432                     tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
1433                     # add this sliver's name in the set
1434                     managed_sliver_names .update( {sliver.name()} )
1435
1436         # wait for the netork to be OK in all server sides
1437         if not Completer(tasks, message='check for network readiness in slivers').\
1438            run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
1439             return False
1440
1441         # run server and client
1442         for spec in specs:
1443             port = spec['port']
1444             # server side
1445             # the issue here is that we have the server run in background
1446             # and so we have no clue if it took off properly or not
1447             # looks like in some cases it does not
1448             address = spec['s_sliver'].test_node.name()
1449             if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
1450                 overall = False
1451                 break
1452
1453             # idem for the client side
1454             # use nodename from located sliver, unless 'client_connect' is set
1455             if 'client_connect' in spec:
1456                 destination = spec['client_connect']
1457             else:
1458                 destination = spec['s_sliver'].test_node.name()
1459             if not spec['c_sliver'].run_tcp_client(destination, port):
1460                 overall = False
1461         return overall
1462
1463     # painfully enough, we need to allow for some time as netflow might show up last
1464     def check_system_slice(self):
1465         "all nodes: check that a system slice is alive"
1466         # netflow currently not working in the lxc distro
1467         # drl not built at all in the wtx distro
1468         # if we find either of them we're happy
1469         return self.check_netflow() or self.check_drl()
1470
1471     # expose these
1472     def check_netflow(self): return self._check_system_slice('netflow')
1473     def check_drl(self): return self._check_system_slice('drl')
1474
1475     # we have the slices up already here, so it should not take too long
1476     def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
1477         class CompleterTaskSystemSlice(CompleterTask):
1478             def __init__(self, test_node, dry_run):
1479                 self.test_node = test_node
1480                 self.dry_run = dry_run
1481             def actual_run(self):
1482                 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
1483             def message(self):
1484                 return "System slice {} @ {}".format(slicename, self.test_node.name())
1485             def failure_epilogue(self):
1486                 print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
1487         timeout = timedelta(minutes=timeout_minutes)
1488         silent  = timedelta(0)
1489         period  = timedelta(seconds=period_seconds)
1490         tasks = [ CompleterTaskSystemSlice(test_node, self.options.dry_run) \
1491                       for test_node in self.all_nodes() ]
1492         return Completer(tasks, message='_check_system_slice').run(timeout, silent, period)
1493
1494     def plcsh_stress_test(self):
1495         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1496         # install the stress-test in the plc image
1497         location = "/usr/share/plc_api/plcsh_stress_test.py"
1498         remote = "{}/{}".format(self.vm_root_in_host(), location)
1499         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1500         command = location
1501         command += " -- --check"
1502         if self.options.size == 1:
1503             command +=  " --tiny"
1504         return self.run_in_guest(command) == 0
1505
1506     # populate runs the same utility without slightly different options
1507     # in particular runs with --preserve (dont cleanup) and without --check
1508     # also it gets run twice, once with the --foreign option for creating fake foreign entries
1509
1510     def sfa_install_all(self):
1511         "yum install sfa sfa-plc sfa-sfatables sfa-client"
1512         return (self.yum_install("sfa sfa-plc sfa-sfatables sfa-client") and
1513                 self.run_in_guest("systemctl enable sfa-registry")==0 and
1514                 self.run_in_guest("systemctl enable sfa-aggregate")==0)
1515
1516     def sfa_install_core(self):
1517         "yum install sfa"
1518         return self.yum_install("sfa")
1519
1520     def sfa_install_plc(self):
1521         "yum install sfa-plc"
1522         return self.yum_install("sfa-plc")
1523
1524     def sfa_install_sfatables(self):
1525         "yum install sfa-sfatables"
1526         return self.yum_install("sfa-sfatables")
1527
1528     # for some very odd reason, this sometimes fails with the following symptom
1529     # # yum install sfa-client
1530     # Setting up Install Process
1531     # ...
1532     # Downloading Packages:
1533     # Running rpm_check_debug
1534     # Running Transaction Test
1535     # Transaction Test Succeeded
1536     # Running Transaction
1537     # Transaction couldn't start:
1538     # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1539     # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1540     # even though in the same context I have
1541     # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1542     # Filesystem            Size  Used Avail Use% Mounted on
1543     # /dev/hdv1             806G  264G  501G  35% /
1544     # none                   16M   36K   16M   1% /tmp
1545     #
1546     # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1547     def sfa_install_client(self):
1548         "yum install sfa-client"
1549         first_try = self.yum_install("sfa-client")
1550         if first_try:
1551             return True
1552         utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
1553         code, cached_rpm_path = \
1554                 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1555         utils.header("rpm_path=<<{}>>".format(rpm_path))
1556         # just for checking
1557         self.run_in_guest("rpm -i {}".format(cached_rpm_path))
1558         return self.yum_check_installed("sfa-client")
1559
1560     def sfa_dbclean(self):
1561         "thoroughly wipes off the SFA database"
1562         return self.run_in_guest("sfaadmin reg nuke") == 0 or \
1563             self.run_in_guest("sfa-nuke.py") == 0 or \
1564             self.run_in_guest("sfa-nuke-plc.py") == 0 or \
1565             self.run_in_guest("sfaadmin registry nuke") == 0
1566
1567     def sfa_fsclean(self):
1568         "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1569         self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1570         return True
1571
1572     def sfa_plcclean(self):
1573         "cleans the PLC entries that were created as a side effect of running the script"
1574         # ignore result
1575         sfa_spec = self.plc_spec['sfa']
1576
1577         for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1578             login_base = auth_sfa_spec['login_base']
1579             try:
1580                 self.apiserver.DeleteSite(self.auth_root(),login_base)
1581             except:
1582                 print("Site {} already absent from PLC db".format(login_base))
1583
1584             for spec_name in ['pi_spec','user_spec']:
1585                 user_spec = auth_sfa_spec[spec_name]
1586                 username = user_spec['email']
1587                 try:
1588                     self.apiserver.DeletePerson(self.auth_root(),username)
1589                 except:
1590                     # this in fact is expected as sites delete their members
1591                     #print "User {} already absent from PLC db".format(username)
1592                     pass
1593
1594         print("REMEMBER TO RUN sfa_import AGAIN")
1595         return True
1596
1597     def sfa_uninstall(self):
1598         "uses rpm to uninstall sfa - ignore result"
1599         self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1600         self.run_in_guest("rm -rf /var/lib/sfa")
1601         self.run_in_guest("rm -rf /etc/sfa")
1602         self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1603         # xxx tmp
1604         self.run_in_guest("rpm -e --noscripts sfa-plc")
1605         return True
1606
1607     ### run unit tests for SFA
1608     # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1609     # Running Transaction
1610     # Transaction couldn't start:
1611     # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1612     # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1613     # no matter how many Gbs are available on the testplc
1614     # could not figure out what's wrong, so...
1615     # if the yum install phase fails, consider the test is successful
1616     # other combinations will eventually run it hopefully
1617     def sfa_utest(self):
1618         "yum install sfa-tests and run SFA unittests"
1619         self.run_in_guest("yum -y install sfa-tests")
1620         # failed to install - forget it
1621         if self.run_in_guest("rpm -q sfa-tests") != 0:
1622             utils.header("WARNING: SFA unit tests failed to install, ignoring")
1623             return True
1624         return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
1625
1626     ###
1627     def confdir(self):
1628         dirname = "conf.{}".format(self.plc_spec['name'])
1629         if not os.path.isdir(dirname):
1630             utils.system("mkdir -p {}".format(dirname))
1631         if not os.path.isdir(dirname):
1632             raise Exception("Cannot create config dir for plc {}".format(self.name()))
1633         return dirname
1634
1635     def conffile(self, filename):
1636         return "{}/{}".format(self.confdir(), filename)
1637     def confsubdir(self, dirname, clean, dry_run=False):
1638         subdirname = "{}/{}".format(self.confdir(), dirname)
1639         if clean:
1640             utils.system("rm -rf {}".format(subdirname))
1641         if not os.path.isdir(subdirname):
1642             utils.system("mkdir -p {}".format(subdirname))
1643         if not dry_run and not os.path.isdir(subdirname):
1644             raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
1645         return subdirname
1646
1647     def conffile_clean(self, filename):
1648         filename=self.conffile(filename)
1649         return utils.system("rm -rf {}".format(filename))==0
1650
1651     ###
1652     def sfa_configure(self):
1653         "run sfa-config-tty"
1654         tmpname = self.conffile("sfa-config-tty")
1655         with open(tmpname,'w') as fileconf:
1656             for var, value in self.plc_spec['sfa']['settings'].items():
1657                 fileconf.write('e {}\n{}\n'.format(var, value))
1658             fileconf.write('w\n')
1659             fileconf.write('R\n')
1660             fileconf.write('q\n')
1661         utils.system('cat {}'.format(tmpname))
1662         self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
1663         return True
1664
1665     def aggregate_xml_line(self):
1666         port = self.plc_spec['sfa']['neighbours-port']
1667         return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
1668             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
1669
1670     def registry_xml_line(self):
1671         return '<registry addr="{}" hrn="{}" port="12345"/>'\
1672             .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1673
1674
1675     # a cross step that takes all other plcs in argument
1676     def cross_sfa_configure(self, other_plcs):
1677         "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1678         # of course with a single plc, other_plcs is an empty list
1679         if not other_plcs:
1680             return True
1681         agg_fname = self.conffile("agg.xml")
1682         with open(agg_fname,"w") as out:
1683             out.write("<aggregates>{}</aggregates>\n"\
1684                       .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
1685         utils.header("(Over)wrote {}".format(agg_fname))
1686         reg_fname=self.conffile("reg.xml")
1687         with open(reg_fname,"w") as out:
1688             out.write("<registries>{}</registries>\n"\
1689                       .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
1690         utils.header("(Over)wrote {}".format(reg_fname))
1691         return self.test_ssh.copy_abs(agg_fname,
1692                                       '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
1693            and self.test_ssh.copy_abs(reg_fname,
1694                                       '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
1695
1696     def sfa_import(self):
1697         "use sfaadmin to import from plc"
1698         auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1699         return self.run_in_guest('sfaadmin reg import_registry') == 0
1700
1701     def sfa_start(self):
1702         "start SFA through systemctl"
1703         return (self.start_stop_systemd('sfa-registry', 'start') and
1704                 self.start_stop_systemd('sfa-aggregate', 'start'))
1705
1706
1707     def sfi_configure(self):
1708         "Create /root/sfi on the plc side for sfi client configuration"
1709         if self.options.dry_run:
1710             utils.header("DRY RUN - skipping step")
1711             return True
1712         sfa_spec = self.plc_spec['sfa']
1713         # cannot use auth_sfa_mapper to pass dir_name
1714         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1715             test_slice = TestAuthSfa(self, slice_spec)
1716             dir_basename = os.path.basename(test_slice.sfi_path())
1717             dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
1718                                        clean=True, dry_run=self.options.dry_run)
1719             test_slice.sfi_configure(dir_name)
1720             # push into the remote /root/sfi area
1721             location = test_slice.sfi_path()
1722             remote = "{}/{}".format(self.vm_root_in_host(), location)
1723             self.test_ssh.mkdir(remote, abs=True)
1724             # need to strip last level or remote otherwise we get an extra dir level
1725             self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1726
1727         return True
1728
1729     def sfi_clean(self):
1730         "clean up /root/sfi on the plc side"
1731         self.run_in_guest("rm -rf /root/sfi")
1732         return True
1733
1734     def sfa_rspec_empty(self):
1735         "expose a static empty rspec (ships with the tests module) in the sfi directory"
1736         filename = "empty-rspec.xml"
1737         overall = True
1738         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1739             test_slice = TestAuthSfa(self, slice_spec)
1740             in_vm = test_slice.sfi_path()
1741             remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
1742             if self.test_ssh.copy_abs(filename, remote) !=0:
1743                 overall = False
1744         return overall
1745
1746     @auth_sfa_mapper
1747     def sfa_register_site(self): pass
1748     @auth_sfa_mapper
1749     def sfa_register_pi(self): pass
1750     @auth_sfa_mapper
1751     def sfa_register_user(self): pass
1752     @auth_sfa_mapper
1753     def sfa_update_user(self): pass
1754     @auth_sfa_mapper
1755     def sfa_register_slice(self): pass
1756     @auth_sfa_mapper
1757     def sfa_renew_slice(self): pass
1758     @auth_sfa_mapper
1759     def sfa_get_expires(self): pass
1760     @auth_sfa_mapper
1761     def sfa_discover(self): pass
1762     @auth_sfa_mapper
1763     def sfa_rspec(self): pass
1764     @auth_sfa_mapper
1765     def sfa_allocate(self): pass
1766     @auth_sfa_mapper
1767     def sfa_allocate_empty(self): pass
1768     @auth_sfa_mapper
1769     def sfa_provision(self): pass
1770     @auth_sfa_mapper
1771     def sfa_provision_empty(self): pass
1772     @auth_sfa_mapper
1773     def sfa_describe(self): pass
1774     @auth_sfa_mapper
1775     def sfa_check_slice_plc(self): pass
1776     @auth_sfa_mapper
1777     def sfa_check_slice_plc_empty(self): pass
1778     @auth_sfa_mapper
1779     def sfa_update_slice(self): pass
1780     @auth_sfa_mapper
1781     def sfa_remove_user_from_slice(self): pass
1782     @auth_sfa_mapper
1783     def sfa_insert_user_in_slice(self): pass
1784     @auth_sfa_mapper
1785     def sfi_list(self): pass
1786     @auth_sfa_mapper
1787     def sfi_show_site(self): pass
1788     @auth_sfa_mapper
1789     def sfi_show_slice(self): pass
1790     @auth_sfa_mapper
1791     def sfi_show_slice_researchers(self): pass
1792     @auth_sfa_mapper
1793     def ssh_slice_sfa(self): pass
1794     @auth_sfa_mapper
1795     def sfa_delete_user(self): pass
1796     @auth_sfa_mapper
1797     def sfa_delete_slice(self): pass
1798
1799     def sfa_stop(self):
1800         "stop sfa through systemclt"
1801         return (self.start_stop_systemd('sfa-aggregate', 'stop') and
1802                 self.start_stop_systemd('sfa-registry', 'stop'))
1803
1804     def populate(self):
1805         "creates random entries in the PLCAPI"
1806         # install the stress-test in the plc image
1807         location = "/usr/share/plc_api/plcsh_stress_test.py"
1808         remote = "{}/{}".format(self.vm_root_in_host(), location)
1809         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
1810         command = location
1811         command += " -- --preserve --short-names"
1812         local = (self.run_in_guest(command) == 0);
1813         # second run with --foreign
1814         command += ' --foreign'
1815         remote = (self.run_in_guest(command) == 0);
1816         return local and remote
1817
1818
1819     ####################
1820     @bonding_redirector
1821     def bonding_init_partial(self): pass
1822
1823     @bonding_redirector
1824     def bonding_add_yum(self): pass
1825
1826     @bonding_redirector
1827     def bonding_install_rpms(self): pass
1828
1829     ####################
1830
1831     def gather_logs(self):
1832         "gets all possible logs from plc's/qemu node's/slice's for future reference"
1833         # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1834         # (1.b) get the plc's  /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1835         # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1836         # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1837         # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1838         # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1839         # (1.a)
1840         print("-------------------- TestPlc.gather_logs : PLC's /var/log")
1841         self.gather_var_logs()
1842         # (1.b)
1843         print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
1844         self.gather_pgsql_logs()
1845         # (1.c)
1846         print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
1847         self.gather_root_sfi()
1848         # (2)
1849         print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
1850         for site_spec in self.plc_spec['sites']:
1851             test_site = TestSite(self,site_spec)
1852             for node_spec in site_spec['nodes']:
1853                 test_node = TestNode(self, test_site, node_spec)
1854                 test_node.gather_qemu_logs()
1855         # (3)
1856         print("-------------------- TestPlc.gather_logs : nodes's /var/log")
1857         self.gather_nodes_var_logs()
1858         # (4)
1859         print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
1860         self.gather_slivers_var_logs()
1861         return True
1862
1863     def gather_slivers_var_logs(self):
1864         for test_sliver in self.all_sliver_objs():
1865             remote = test_sliver.tar_var_logs()
1866             utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
1867             command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
1868             utils.system(command)
1869         return True
1870
1871     def gather_var_logs(self):
1872         utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
1873         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1874         command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
1875         utils.system(command)
1876         command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
1877         utils.system(command)
1878
1879     def gather_pgsql_logs(self):
1880         utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
1881         to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1882         command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
1883         utils.system(command)
1884
1885     def gather_root_sfi(self):
1886         utils.system("mkdir -p logs/sfi.{}".format(self.name()))
1887         to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1888         command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
1889         utils.system(command)
1890
1891     def gather_nodes_var_logs(self):
1892         for site_spec in self.plc_spec['sites']:
1893             test_site = TestSite(self, site_spec)
1894             for node_spec in site_spec['nodes']:
1895                 test_node = TestNode(self, test_site, node_spec)
1896                 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
1897                 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1898                 command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
1899                 utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
1900                 utils.system(command)
1901
1902
1903     # returns the filename to use for sql dump/restore, using options.dbname if set
1904     def dbfile(self, database):
1905         # uses options.dbname if it is found
1906         try:
1907             name = self.options.dbname
1908             if not isinstance(name, str):
1909                 raise Exception
1910         except:
1911             t = datetime.now()
1912             d = t.date()
1913             name = str(d)
1914         return "/root/{}-{}.sql".format(database, name)
1915
1916     def plc_db_dump(self):
1917         'dump the planetlab5 DB in /root in the PLC - filename has time'
1918         dump=self.dbfile("planetab5")
1919         self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1920         utils.header('Dumped planetlab5 database in {}'.format(dump))
1921         return True
1922
1923     def plc_db_restore(self):
1924         'restore the planetlab5 DB - looks broken, but run -n might help'
1925         dump = self.dbfile("planetab5")
1926         self.run_in_guest('systemctl stop httpd')
1927         # xxx - need another wrapper
1928         self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
1929         self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1930         self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
1931         ##starting httpd service
1932         self.run_in_guest('systemctl start httpd')
1933
1934         utils.header('Database restored from ' + dump)
1935
1936     @staticmethod
1937     def create_ignore_steps():
1938         for step in TestPlc.default_steps + TestPlc.other_steps:
1939             # default step can have a plc qualifier
1940             if '@' in step:
1941                 step, qualifier = step.split('@')
1942             # or be defined as forced or ignored by default
1943             for keyword in ['_ignore','_force']:
1944                 if step.endswith(keyword):
1945                     step=step.replace(keyword,'')
1946             if step == SEP or step == SEPSFA :
1947                 continue
1948             method = getattr(TestPlc,step)
1949             name = step + '_ignore'
1950             wrapped = ignore_result(method)
1951 #            wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1952             setattr(TestPlc, name, wrapped)
1953
1954 #    @ignore_result
1955 #    def ssh_slice_again_ignore (self): pass
1956 #    @ignore_result
1957 #    def check_initscripts_ignore (self): pass
1958
1959     def standby_1_through_20(self):
1960         """convenience function to wait for a specified number of minutes"""
1961         pass
1962     @standby_generic
1963     def standby_1(): pass
1964     @standby_generic
1965     def standby_2(): pass
1966     @standby_generic
1967     def standby_3(): pass
1968     @standby_generic
1969     def standby_4(): pass
1970     @standby_generic
1971     def standby_5(): pass
1972     @standby_generic
1973     def standby_6(): pass
1974     @standby_generic
1975     def standby_7(): pass
1976     @standby_generic
1977     def standby_8(): pass
1978     @standby_generic
1979     def standby_9(): pass
1980     @standby_generic
1981     def standby_10(): pass
1982     @standby_generic
1983     def standby_11(): pass
1984     @standby_generic
1985     def standby_12(): pass
1986     @standby_generic
1987     def standby_13(): pass
1988     @standby_generic
1989     def standby_14(): pass
1990     @standby_generic
1991     def standby_15(): pass
1992     @standby_generic
1993     def standby_16(): pass
1994     @standby_generic
1995     def standby_17(): pass
1996     @standby_generic
1997     def standby_18(): pass
1998     @standby_generic
1999     def standby_19(): pass
2000     @standby_generic
2001     def standby_20(): pass
2002
2003     # convenience for debugging the test logic
2004     def yes(self): return True
2005     def no(self): return False
2006     def fail(self): return False