1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__doc__ = slice_method.__doc__
115 def auth_sfa_mapper (method):
118 auth_method = TestAuthSfa.__dict__[method.__name__]
119 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
120 test_auth=TestAuthSfa(self,auth_spec)
121 if not auth_method(test_auth,self.options): overall=False
123 # restore the doc text
124 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
128 def __init__ (self,result):
138 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
139 'plc_install', 'plc_configure', 'plc_start', SEP,
140 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
141 'plcapi_urls','speed_up_slices', SEP,
142 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
143 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
144 # keep this our of the way for now
145 'check_vsys_defaults_ignore', SEP,
146 # run this first off so it's easier to re-run on another qemu box
147 'qemu_kill_mine', SEP,
148 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
149 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
150 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
151 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
152 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
153 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
154 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
155 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
156 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
157 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
158 # but as the stress test might take a while, we sometimes missed the debug mode..
159 'probe_kvm_iptables',
160 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
161 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
162 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
163 'cross_check_tcp@1', 'check_system_slice', SEP,
164 # check slices are turned off properly
165 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted', SEP,
166 # check they are properly re-created with the same name
167 'fill_slices', 'ssh_slice_again_ignore', SEP,
168 'gather_logs_force', SEP,
171 'export', 'show_boxes', SEP,
172 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
173 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
174 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
175 'delete_leases', 'list_leases', SEP,
177 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
178 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
179 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
180 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
181 'plc_db_dump' , 'plc_db_restore', SEP,
182 'check_netflow','check_drl', SEP,
183 'debug_nodemanager', 'slice_fs_present', SEP,
184 'standby_1_through_20','yes','no',SEP,
188 def printable_steps (list):
189 single_line=" ".join(list)+" "
190 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
192 def valid_step (step):
193 return step != SEP and step != SEPSFA
195 # turn off the sfa-related steps when build has skipped SFA
196 # this was originally for centos5 but is still valid
197 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
199 def _has_sfa_cached (rpms_url):
200 if os.path.isfile(has_sfa_cache_filename):
201 cached=file(has_sfa_cache_filename).read()=="yes"
202 utils.header("build provides SFA (cached):%s"%cached)
204 # warning, we're now building 'sface' so let's be a bit more picky
205 # full builds are expected to return with 0 here
206 utils.header ("Checking if build provides SFA package...")
207 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
208 encoded='yes' if retcod else 'no'
209 file(has_sfa_cache_filename,'w').write(encoded)
213 def check_whether_build_has_sfa (rpms_url):
214 has_sfa=TestPlc._has_sfa_cached(rpms_url)
216 utils.header("build does provide SFA")
218 # move all steps containing 'sfa' from default_steps to other_steps
219 utils.header("SFA package not found - removing steps with sfa or sfi")
220 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
221 TestPlc.other_steps += sfa_steps
222 for step in sfa_steps: TestPlc.default_steps.remove(step)
224 def __init__ (self,plc_spec,options):
225 self.plc_spec=plc_spec
227 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
228 self.vserverip=plc_spec['vserverip']
229 self.vservername=plc_spec['vservername']
230 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
231 self.apiserver=TestApiserver(self.url,options.dry_run)
232 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
233 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
235 def has_addresses_api (self):
236 return self.apiserver.has_method('AddIpAddress')
239 name=self.plc_spec['name']
240 return "%s.%s"%(name,self.vservername)
243 return self.plc_spec['host_box']
246 return self.test_ssh.is_local()
248 # define the API methods on this object through xmlrpc
249 # would help, but not strictly necessary
253 def actual_command_in_guest (self,command, backslash=False):
254 raw1=self.host_to_guest(command)
255 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
258 def start_guest (self):
259 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
261 def stop_guest (self):
262 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
264 def run_in_guest (self,command,backslash=False):
265 raw=self.actual_command_in_guest(command,backslash)
266 return utils.system(raw)
268 def run_in_host (self,command):
269 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
271 # backslashing turned out so awful at some point that I've turned off auto-backslashing
272 # see e.g. plc_start esp. the version for f14
273 #command gets run in the plc's vm
274 def host_to_guest(self,command):
275 # f14 still needs some extra help
276 if self.options.fcdistro == 'f14':
277 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
279 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
282 # this /vservers thing is legacy...
283 def vm_root_in_host(self):
284 return "/vservers/%s/"%(self.vservername)
286 def vm_timestamp_path (self):
287 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
289 #start/stop the vserver
290 def start_guest_in_host(self):
291 return "virsh -c lxc:/// start %s"%(self.vservername)
293 def stop_guest_in_host(self):
294 return "virsh -c lxc:/// destroy %s"%(self.vservername)
297 def run_in_guest_piped (self,local,remote):
298 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
300 def yum_check_installed (self, rpms):
301 if isinstance (rpms, list):
303 return self.run_in_guest("rpm -q %s"%rpms)==0
305 # does a yum install in the vs, ignore yum retcod, check with rpm
306 def yum_install (self, rpms):
307 if isinstance (rpms, list):
309 self.run_in_guest("yum -y install %s"%rpms)
310 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
311 self.run_in_guest("yum-complete-transaction -y")
312 return self.yum_check_installed (rpms)
314 def auth_root (self):
315 return {'Username':self.plc_spec['PLC_ROOT_USER'],
316 'AuthMethod':'password',
317 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
318 'Role' : self.plc_spec['role']
320 def locate_site (self,sitename):
321 for site in self.plc_spec['sites']:
322 if site['site_fields']['name'] == sitename:
324 if site['site_fields']['login_base'] == sitename:
326 raise Exception,"Cannot locate site %s"%sitename
328 def locate_node (self,nodename):
329 for site in self.plc_spec['sites']:
330 for node in site['nodes']:
331 if node['name'] == nodename:
333 raise Exception,"Cannot locate node %s"%nodename
335 def locate_hostname (self,hostname):
336 for site in self.plc_spec['sites']:
337 for node in site['nodes']:
338 if node['node_fields']['hostname'] == hostname:
340 raise Exception,"Cannot locate hostname %s"%hostname
342 def locate_key (self,key_name):
343 for key in self.plc_spec['keys']:
344 if key['key_name'] == key_name:
346 raise Exception,"Cannot locate key %s"%key_name
348 def locate_private_key_from_key_names (self, key_names):
349 # locate the first avail. key
351 for key_name in key_names:
352 key_spec=self.locate_key(key_name)
353 test_key=TestKey(self,key_spec)
354 publickey=test_key.publicpath()
355 privatekey=test_key.privatepath()
356 if os.path.isfile(publickey) and os.path.isfile(privatekey):
358 if found: return privatekey
361 def locate_slice (self, slicename):
362 for slice in self.plc_spec['slices']:
363 if slice['slice_fields']['name'] == slicename:
365 raise Exception,"Cannot locate slice %s"%slicename
367 def all_sliver_objs (self):
369 for slice_spec in self.plc_spec['slices']:
370 slicename = slice_spec['slice_fields']['name']
371 for nodename in slice_spec['nodenames']:
372 result.append(self.locate_sliver_obj (nodename,slicename))
375 def locate_sliver_obj (self,nodename,slicename):
376 (site,node) = self.locate_node(nodename)
377 slice = self.locate_slice (slicename)
379 test_site = TestSite (self, site)
380 test_node = TestNode (self, test_site,node)
381 # xxx the slice site is assumed to be the node site - mhh - probably harmless
382 test_slice = TestSlice (self, test_site, slice)
383 return TestSliver (self, test_node, test_slice)
385 def locate_first_node(self):
386 nodename=self.plc_spec['slices'][0]['nodenames'][0]
387 (site,node) = self.locate_node(nodename)
388 test_site = TestSite (self, site)
389 test_node = TestNode (self, test_site,node)
392 def locate_first_sliver (self):
393 slice_spec=self.plc_spec['slices'][0]
394 slicename=slice_spec['slice_fields']['name']
395 nodename=slice_spec['nodenames'][0]
396 return self.locate_sliver_obj(nodename,slicename)
398 # all different hostboxes used in this plc
399 def get_BoxNodes(self):
400 # maps on sites and nodes, return [ (host_box,test_node) ]
402 for site_spec in self.plc_spec['sites']:
403 test_site = TestSite (self,site_spec)
404 for node_spec in site_spec['nodes']:
405 test_node = TestNode (self, test_site, node_spec)
406 if not test_node.is_real():
407 tuples.append( (test_node.host_box(),test_node) )
408 # transform into a dict { 'host_box' -> [ test_node .. ] }
410 for (box,node) in tuples:
411 if not result.has_key(box):
414 result[box].append(node)
417 # a step for checking this stuff
418 def show_boxes (self):
419 'print summary of nodes location'
420 for (box,nodes) in self.get_BoxNodes().iteritems():
421 print box,":"," + ".join( [ node.name() for node in nodes ] )
424 # make this a valid step
425 def qemu_kill_all(self):
426 'kill all qemu instances on the qemu boxes involved by this setup'
427 # this is the brute force version, kill all qemus on that host box
428 for (box,nodes) in self.get_BoxNodes().iteritems():
429 # pass the first nodename, as we don't push template-qemu on testboxes
430 nodedir=nodes[0].nodedir()
431 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
434 # make this a valid step
435 def qemu_list_all(self):
436 'list all qemu instances on the qemu boxes involved by this setup'
437 for (box,nodes) in self.get_BoxNodes().iteritems():
438 # this is the brute force version, kill all qemus on that host box
439 TestBoxQemu(box,self.options.buildname).qemu_list_all()
442 # kill only the qemus related to this test
443 def qemu_list_mine(self):
444 'list qemu instances for our nodes'
445 for (box,nodes) in self.get_BoxNodes().iteritems():
446 # the fine-grain version
451 # kill only the qemus related to this test
452 def qemu_clean_mine(self):
453 'cleanup (rm -rf) qemu instances for our nodes'
454 for (box,nodes) in self.get_BoxNodes().iteritems():
455 # the fine-grain version
460 # kill only the right qemus
461 def qemu_kill_mine(self):
462 'kill the qemu instances for our nodes'
463 for (box,nodes) in self.get_BoxNodes().iteritems():
464 # the fine-grain version
469 #################### display config
471 "show test configuration after localization"
476 # uggly hack to make sure 'run export' only reports about the 1st plc
477 # to avoid confusion - also we use 'inri_slice1' in various aliases..
480 "print cut'n paste-able stuff to export env variables to your shell"
481 # guess local domain from hostname
482 if TestPlc.exported_id>1:
483 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
485 TestPlc.exported_id+=1
486 domain=socket.gethostname().split('.',1)[1]
487 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
488 print "export BUILD=%s"%self.options.buildname
489 print "export PLCHOSTLXC=%s"%fqdn
490 print "export GUESTNAME=%s"%self.plc_spec['vservername']
491 vplcname=self.plc_spec['vservername'].split('-')[-1]
492 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
493 # find hostname of first node
494 (hostname,qemubox) = self.all_node_infos()[0]
495 print "export KVMHOST=%s.%s"%(qemubox,domain)
496 print "export NODE=%s"%(hostname)
500 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
501 def show_pass (self,passno):
502 for (key,val) in self.plc_spec.iteritems():
503 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
507 self.display_site_spec(site)
508 for node in site['nodes']:
509 self.display_node_spec(node)
510 elif key=='initscripts':
511 for initscript in val:
512 self.display_initscript_spec (initscript)
515 self.display_slice_spec (slice)
518 self.display_key_spec (key)
520 if key not in ['sites','initscripts','slices','keys']:
521 print '+ ',key,':',val
523 def display_site_spec (self,site):
524 print '+ ======== site',site['site_fields']['name']
525 for (k,v) in site.iteritems():
526 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
529 print '+ ','nodes : ',
531 print node['node_fields']['hostname'],'',
537 print user['name'],'',
539 elif k == 'site_fields':
540 print '+ login_base',':',v['login_base']
541 elif k == 'address_fields':
547 def display_initscript_spec (self,initscript):
548 print '+ ======== initscript',initscript['initscript_fields']['name']
550 def display_key_spec (self,key):
551 print '+ ======== key',key['key_name']
553 def display_slice_spec (self,slice):
554 print '+ ======== slice',slice['slice_fields']['name']
555 for (k,v) in slice.iteritems():
568 elif k=='slice_fields':
569 print '+ fields',':',
570 print 'max_nodes=',v['max_nodes'],
575 def display_node_spec (self,node):
576 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
577 print "hostname=",node['node_fields']['hostname'],
578 print "ip=",node['interface_fields']['ip']
579 if self.options.verbose:
580 utils.pprint("node details",node,depth=3)
582 # another entry point for just showing the boxes involved
583 def display_mapping (self):
584 TestPlc.display_mapping_plc(self.plc_spec)
588 def display_mapping_plc (plc_spec):
589 print '+ MyPLC',plc_spec['name']
590 # WARNING this would not be right for lxc-based PLC's - should be harmless though
591 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
592 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
593 for site_spec in plc_spec['sites']:
594 for node_spec in site_spec['nodes']:
595 TestPlc.display_mapping_node(node_spec)
598 def display_mapping_node (node_spec):
599 print '+ NODE %s'%(node_spec['name'])
600 print '+\tqemu box %s'%node_spec['host_box']
601 print '+\thostname=%s'%node_spec['node_fields']['hostname']
603 # write a timestamp in /vservers/<>.timestamp
604 # cannot be inside the vserver, that causes vserver .. build to cough
605 def plcvm_timestamp (self):
606 "Create a timestamp to remember creation date for this plc"
608 # TODO-lxc check this one
609 # a first approx. is to store the timestamp close to the VM root like vs does
610 stamp_path=self.vm_timestamp_path ()
611 stamp_dir = os.path.dirname (stamp_path)
612 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
613 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
615 # this is called inconditionnally at the beginning of the test sequence
616 # just in case this is a rerun, so if the vm is not running it's fine
617 def plcvm_delete(self):
618 "vserver delete the test myplc"
619 stamp_path=self.vm_timestamp_path()
620 self.run_in_host("rm -f %s"%stamp_path)
621 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
622 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
623 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
627 # historically the build was being fetched by the tests
628 # now the build pushes itself as a subdir of the tests workdir
629 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
630 def plcvm_create (self):
631 "vserver creation (no install done)"
632 # push the local build/ dir to the testplc box
634 # a full path for the local calls
635 build_dir=os.path.dirname(sys.argv[0])
636 # sometimes this is empty - set to "." in such a case
637 if not build_dir: build_dir="."
638 build_dir += "/build"
640 # use a standard name - will be relative to remote buildname
642 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
643 self.test_ssh.rmdir(build_dir)
644 self.test_ssh.copy(build_dir,recursive=True)
645 # the repo url is taken from arch-rpms-url
646 # with the last step (i386) removed
647 repo_url = self.options.arch_rpms_url
648 for level in [ 'arch' ]:
649 repo_url = os.path.dirname(repo_url)
651 # invoke initvm (drop support for vs)
652 script="lbuild-initvm.sh"
654 # pass the vbuild-nightly options to [lv]test-initvm
655 script_options += " -p %s"%self.options.personality
656 script_options += " -d %s"%self.options.pldistro
657 script_options += " -f %s"%self.options.fcdistro
658 script_options += " -r %s"%repo_url
659 vserver_name = self.vservername
661 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
662 script_options += " -n %s"%vserver_hostname
664 print "Cannot reverse lookup %s"%self.vserverip
665 print "This is considered fatal, as this might pollute the test results"
667 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
668 return self.run_in_host(create_vserver) == 0
671 def plc_install(self):
672 "yum install myplc, noderepo, and the plain bootstrapfs"
674 # workaround for getting pgsql8.2 on centos5
675 if self.options.fcdistro == "centos5":
676 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
679 if self.options.personality == "linux32":
681 elif self.options.personality == "linux64":
684 raise Exception, "Unsupported personality %r"%self.options.personality
685 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
688 pkgs_list.append ("slicerepo-%s"%nodefamily)
689 pkgs_list.append ("myplc")
690 pkgs_list.append ("noderepo-%s"%nodefamily)
691 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
692 pkgs_string=" ".join(pkgs_list)
693 return self.yum_install (pkgs_list)
696 def mod_python(self):
697 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
698 return self.yum_install ( [ 'mod_python' ] )
701 def plc_configure(self):
703 tmpname='%s.plc-config-tty'%(self.name())
704 fileconf=open(tmpname,'w')
705 for var in [ 'PLC_NAME',
710 'PLC_MAIL_SUPPORT_ADDRESS',
713 # Above line was added for integrating SFA Testing
719 'PLC_RESERVATION_GRANULARITY',
721 'PLC_OMF_XMPP_SERVER',
724 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
725 fileconf.write('w\n')
726 fileconf.write('q\n')
728 utils.system('cat %s'%tmpname)
729 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
730 utils.system('rm %s'%tmpname)
733 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
734 # however using a vplc guest under f20 requires this trick
735 # the symptom is this: service plc start
736 # Starting plc (via systemctl): Failed to get D-Bus connection: \
737 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
738 # weird thing is the doc says f14 uses upstart by default and not systemd
739 # so this sounds kind of harmless
740 def start_service (self,service): return self.start_stop_service (service,'start')
741 def stop_service (self,service): return self.start_stop_service (service,'stop')
743 def start_stop_service (self, service,start_or_stop):
744 "utility to start/stop a service with the special trick for f14"
745 if self.options.fcdistro != 'f14':
746 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
748 # patch /sbin/service so it does not reset environment
749 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
750 # this is because our own scripts in turn call service
751 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
755 return self.start_service ('plc')
759 return self.stop_service ('plc')
761 def plcvm_start (self):
762 "start the PLC vserver"
766 def plcvm_stop (self):
767 "stop the PLC vserver"
771 # stores the keys from the config for further use
772 def keys_store(self):
773 "stores test users ssh keys in keys/"
774 for key_spec in self.plc_spec['keys']:
775 TestKey(self,key_spec).store_key()
778 def keys_clean(self):
779 "removes keys cached in keys/"
780 utils.system("rm -rf ./keys")
783 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
784 # for later direct access to the nodes
785 def keys_fetch(self):
786 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
788 if not os.path.isdir(dir):
790 vservername=self.vservername
791 vm_root=self.vm_root_in_host()
793 prefix = 'debug_ssh_key'
794 for ext in [ 'pub', 'rsa' ] :
795 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
796 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
797 if self.test_ssh.fetch(src,dst) != 0: overall=False
801 "create sites with PLCAPI"
802 return self.do_sites()
804 def delete_sites (self):
805 "delete sites with PLCAPI"
806 return self.do_sites(action="delete")
808 def do_sites (self,action="add"):
809 for site_spec in self.plc_spec['sites']:
810 test_site = TestSite (self,site_spec)
811 if (action != "add"):
812 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
813 test_site.delete_site()
814 # deleted with the site
815 #test_site.delete_users()
818 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
819 test_site.create_site()
820 test_site.create_users()
823 def delete_all_sites (self):
824 "Delete all sites in PLC, and related objects"
825 print 'auth_root',self.auth_root()
826 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
828 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
829 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
830 site_id=site['site_id']
831 print 'Deleting site_id',site_id
832 self.apiserver.DeleteSite(self.auth_root(),site_id)
836 "create nodes with PLCAPI"
837 return self.do_nodes()
838 def delete_nodes (self):
839 "delete nodes with PLCAPI"
840 return self.do_nodes(action="delete")
842 def do_nodes (self,action="add"):
843 for site_spec in self.plc_spec['sites']:
844 test_site = TestSite (self,site_spec)
846 utils.header("Deleting nodes in site %s"%test_site.name())
847 for node_spec in site_spec['nodes']:
848 test_node=TestNode(self,test_site,node_spec)
849 utils.header("Deleting %s"%test_node.name())
850 test_node.delete_node()
852 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
853 for node_spec in site_spec['nodes']:
854 utils.pprint('Creating node %s'%node_spec,node_spec)
855 test_node = TestNode (self,test_site,node_spec)
856 test_node.create_node ()
859 def nodegroups (self):
860 "create nodegroups with PLCAPI"
861 return self.do_nodegroups("add")
862 def delete_nodegroups (self):
863 "delete nodegroups with PLCAPI"
864 return self.do_nodegroups("delete")
868 def translate_timestamp (start,grain,timestamp):
869 if timestamp < TestPlc.YEAR: return start+timestamp*grain
870 else: return timestamp
873 def timestamp_printable (timestamp):
874 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
877 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
879 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
880 print 'API answered grain=',grain
881 start=(now/grain)*grain
883 # find out all nodes that are reservable
884 nodes=self.all_reservable_nodenames()
886 utils.header ("No reservable node found - proceeding without leases")
889 # attach them to the leases as specified in plc_specs
890 # this is where the 'leases' field gets interpreted as relative of absolute
891 for lease_spec in self.plc_spec['leases']:
892 # skip the ones that come with a null slice id
893 if not lease_spec['slice']: continue
894 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
895 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
896 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
897 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
898 if lease_addition['errors']:
899 utils.header("Cannot create leases, %s"%lease_addition['errors'])
902 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
903 (nodes,lease_spec['slice'],
904 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
905 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
909 def delete_leases (self):
910 "remove all leases in the myplc side"
911 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
912 utils.header("Cleaning leases %r"%lease_ids)
913 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
916 def list_leases (self):
917 "list all leases known to the myplc"
918 leases = self.apiserver.GetLeases(self.auth_root())
921 current=l['t_until']>=now
922 if self.options.verbose or current:
923 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
924 TestPlc.timestamp_printable(l['t_from']),
925 TestPlc.timestamp_printable(l['t_until'])))
928 # create nodegroups if needed, and populate
929 def do_nodegroups (self, action="add"):
930 # 1st pass to scan contents
932 for site_spec in self.plc_spec['sites']:
933 test_site = TestSite (self,site_spec)
934 for node_spec in site_spec['nodes']:
935 test_node=TestNode (self,test_site,node_spec)
936 if node_spec.has_key('nodegroups'):
937 nodegroupnames=node_spec['nodegroups']
938 if isinstance(nodegroupnames,StringTypes):
939 nodegroupnames = [ nodegroupnames ]
940 for nodegroupname in nodegroupnames:
941 if not groups_dict.has_key(nodegroupname):
942 groups_dict[nodegroupname]=[]
943 groups_dict[nodegroupname].append(test_node.name())
944 auth=self.auth_root()
946 for (nodegroupname,group_nodes) in groups_dict.iteritems():
948 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
949 # first, check if the nodetagtype is here
950 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
952 tag_type_id = tag_types[0]['tag_type_id']
954 tag_type_id = self.apiserver.AddTagType(auth,
955 {'tagname':nodegroupname,
956 'description': 'for nodegroup %s'%nodegroupname,
958 print 'located tag (type)',nodegroupname,'as',tag_type_id
960 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
962 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
963 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
964 # set node tag on all nodes, value='yes'
965 for nodename in group_nodes:
967 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
969 traceback.print_exc()
970 print 'node',nodename,'seems to already have tag',nodegroupname
973 expect_yes = self.apiserver.GetNodeTags(auth,
974 {'hostname':nodename,
975 'tagname':nodegroupname},
976 ['value'])[0]['value']
977 if expect_yes != "yes":
978 print 'Mismatch node tag on node',nodename,'got',expect_yes
981 if not self.options.dry_run:
982 print 'Cannot find tag',nodegroupname,'on node',nodename
986 print 'cleaning nodegroup',nodegroupname
987 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
989 traceback.print_exc()
993 # a list of TestNode objs
994 def all_nodes (self):
996 for site_spec in self.plc_spec['sites']:
997 test_site = TestSite (self,site_spec)
998 for node_spec in site_spec['nodes']:
999 nodes.append(TestNode (self,test_site,node_spec))
1002 # return a list of tuples (nodename,qemuname)
1003 def all_node_infos (self) :
1005 for site_spec in self.plc_spec['sites']:
1006 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
1007 for node_spec in site_spec['nodes'] ]
1010 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
1011 def all_reservable_nodenames (self):
1013 for site_spec in self.plc_spec['sites']:
1014 for node_spec in site_spec['nodes']:
1015 node_fields=node_spec['node_fields']
1016 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1017 res.append(node_fields['hostname'])
1020 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1021 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1022 if self.options.dry_run:
1026 class CompleterTaskBootState (CompleterTask):
1027 def __init__ (self, test_plc,hostname):
1028 self.test_plc=test_plc
1029 self.hostname=hostname
1030 self.last_boot_state='undef'
1031 def actual_run (self):
1033 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1035 self.last_boot_state = node['boot_state']
1036 return self.last_boot_state == target_boot_state
1040 return "CompleterTaskBootState with node %s"%self.hostname
1041 def failure_message (self):
1042 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1044 timeout = timedelta(minutes=timeout_minutes)
1045 graceout = timedelta(minutes=silent_minutes)
1046 period = timedelta(seconds=period_seconds)
1047 # the nodes that haven't checked yet - start with a full list and shrink over time
1048 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1049 tasks = [ CompleterTaskBootState (self,hostname) \
1050 for (hostname,_) in self.all_node_infos() ]
1051 return Completer (tasks).run (timeout, graceout, period)
1053 def nodes_booted(self):
1054 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1056 def probe_kvm_iptables (self):
1057 (_,kvmbox) = self.all_node_infos()[0]
1058 TestSsh(kvmbox).run("iptables-save")
1062 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1063 class CompleterTaskPingNode (CompleterTask):
1064 def __init__ (self, hostname):
1065 self.hostname=hostname
1066 def run(self,silent):
1067 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1068 return utils.system (command, silent=silent)==0
1069 def failure_message (self):
1070 return "Cannot ping node with name %s"%self.hostname
1071 timeout=timedelta (seconds=timeout_seconds)
1073 period=timedelta (seconds=period_seconds)
1074 node_infos = self.all_node_infos()
1075 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1076 return Completer (tasks).run (timeout, graceout, period)
1078 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1079 def ping_node (self):
1081 return self.check_nodes_ping ()
1083 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1084 class CompleterTaskNodeSsh (CompleterTask):
1085 def __init__ (self, hostname, qemuname, boot_state, local_key):
1086 self.hostname=hostname
1087 self.qemuname=qemuname
1088 self.boot_state=boot_state
1089 self.local_key=local_key
1090 def run (self, silent):
1091 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1092 return utils.system (command, silent=silent)==0
1093 def failure_message (self):
1094 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1097 timeout = timedelta(minutes=timeout_minutes)
1098 graceout = timedelta(minutes=silent_minutes)
1099 period = timedelta(seconds=period_seconds)
1100 vservername=self.vservername
1103 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1106 local_key = "keys/key_admin.rsa"
1107 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1108 node_infos = self.all_node_infos()
1109 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1110 for (nodename,qemuname) in node_infos ]
1111 return Completer (tasks).run (timeout, graceout, period)
1113 def ssh_node_debug(self):
1114 "Tries to ssh into nodes in debug mode with the debug ssh key"
1115 return self.check_nodes_ssh(debug=True,
1116 timeout_minutes=self.ssh_node_debug_timeout,
1117 silent_minutes=self.ssh_node_debug_silent)
1119 def ssh_node_boot(self):
1120 "Tries to ssh into nodes in production mode with the root ssh key"
1121 return self.check_nodes_ssh(debug=False,
1122 timeout_minutes=self.ssh_node_boot_timeout,
1123 silent_minutes=self.ssh_node_boot_silent)
1125 def node_bmlogs(self):
1126 "Checks that there's a non-empty dir. /var/log/bm/raw"
1127 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1130 def qemu_local_init (self): pass
1132 def bootcd (self): pass
1134 def qemu_local_config (self): pass
1136 def nodestate_reinstall (self): pass
1138 def nodestate_safeboot (self): pass
1140 def nodestate_boot (self): pass
1142 def nodestate_show (self): pass
1144 def qemu_export (self): pass
1146 ### check hooks : invoke scripts from hooks/{node,slice}
1147 def check_hooks_node (self):
1148 return self.locate_first_node().check_hooks()
1149 def check_hooks_sliver (self) :
1150 return self.locate_first_sliver().check_hooks()
1152 def check_hooks (self):
1153 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1154 return self.check_hooks_node() and self.check_hooks_sliver()
1157 def do_check_initscripts(self):
1158 class CompleterTaskInitscript (CompleterTask):
1159 def __init__ (self, test_sliver, stamp):
1160 self.test_sliver=test_sliver
1162 def actual_run (self):
1163 return self.test_sliver.check_initscript_stamp (self.stamp)
1165 return "initscript checker for %s"%self.test_sliver.name()
1166 def failure_message (self):
1167 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1170 for slice_spec in self.plc_spec['slices']:
1171 if not slice_spec.has_key('initscriptstamp'):
1173 stamp=slice_spec['initscriptstamp']
1174 slicename=slice_spec['slice_fields']['name']
1175 for nodename in slice_spec['nodenames']:
1176 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1177 (site,node) = self.locate_node (nodename)
1178 # xxx - passing the wrong site - probably harmless
1179 test_site = TestSite (self,site)
1180 test_slice = TestSlice (self,test_site,slice_spec)
1181 test_node = TestNode (self,test_site,node)
1182 test_sliver = TestSliver (self, test_node, test_slice)
1183 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1184 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1186 def check_initscripts(self):
1187 "check that the initscripts have triggered"
1188 return self.do_check_initscripts()
1190 def initscripts (self):
1191 "create initscripts with PLCAPI"
1192 for initscript in self.plc_spec['initscripts']:
1193 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1194 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1197 def delete_initscripts (self):
1198 "delete initscripts with PLCAPI"
1199 for initscript in self.plc_spec['initscripts']:
1200 initscript_name = initscript['initscript_fields']['name']
1201 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1203 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1204 print initscript_name,'deleted'
1206 print 'deletion went wrong - probably did not exist'
1211 "create slices with PLCAPI"
1212 return self.do_slices(action="add")
1214 def delete_slices (self):
1215 "delete slices with PLCAPI"
1216 return self.do_slices(action="delete")
1218 def fill_slices (self):
1219 "add nodes in slices with PLCAPI"
1220 return self.do_slices(action="fill")
1222 def empty_slices (self):
1223 "remove nodes from slices with PLCAPI"
1224 return self.do_slices(action="empty")
1226 def do_slices (self, action="add"):
1227 for slice in self.plc_spec['slices']:
1228 site_spec = self.locate_site (slice['sitename'])
1229 test_site = TestSite(self,site_spec)
1230 test_slice=TestSlice(self,test_site,slice)
1231 if action == "delete":
1232 test_slice.delete_slice()
1233 elif action=="fill":
1234 test_slice.add_nodes()
1235 elif action=="empty":
1236 test_slice.delete_nodes()
1238 test_slice.create_slice()
1241 @slice_mapper__tasks(20,10,15)
1242 def ssh_slice(self): pass
1243 @slice_mapper__tasks(20,19,15)
1244 def ssh_slice_off (self): pass
1246 # use another name so we can exclude/ignore it from the tests on the nightly command line
1247 def ssh_slice_again(self): return self.ssh_slice()
1248 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1249 # but for some reason the ignore-wrapping thing would not
1252 def ssh_slice_basics(self): pass
1254 def slice_fs_present(self): pass
1256 def slice_fs_deleted(self): pass
1258 def check_vsys_defaults(self): pass
1261 def keys_clear_known_hosts (self): pass
1263 def plcapi_urls (self):
1264 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1266 def speed_up_slices (self):
1267 "tweak nodemanager settings on all nodes using a conf file"
1268 # create the template on the server-side
1269 template="%s.nodemanager"%self.name()
1270 template_file = open (template,"w")
1271 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1272 template_file.close()
1273 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1274 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1275 self.test_ssh.copy_abs(template,remote)
1277 self.apiserver.AddConfFile (self.auth_root(),
1278 {'dest':'/etc/sysconfig/nodemanager',
1279 'source':'PlanetLabConf/nodemanager',
1280 'postinstall_cmd':'service nm restart',})
1283 def debug_nodemanager (self):
1284 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1285 template="%s.nodemanager"%self.name()
1286 template_file = open (template,"w")
1287 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1288 template_file.close()
1289 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1290 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1291 self.test_ssh.copy_abs(template,remote)
1295 def qemu_start (self) : pass
1298 def qemu_timestamp (self) : pass
1300 # when a spec refers to a node possibly on another plc
1301 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1302 for plc in [ self ] + other_plcs:
1304 return plc.locate_sliver_obj (nodename, slicename)
1307 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1309 # implement this one as a cross step so that we can take advantage of different nodes
1310 # in multi-plcs mode
1311 def cross_check_tcp (self, other_plcs):
1312 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1313 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1314 utils.header ("check_tcp: no/empty config found")
1316 specs = self.plc_spec['tcp_specs']
1321 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1322 if not s_test_sliver.run_tcp_server(port,timeout=20):
1326 # idem for the client side
1327 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1328 # use nodename from locatesd sliver, unless 'client_connect' is set
1329 if 'client_connect' in spec:
1330 destination = spec['client_connect']
1332 destination=s_test_sliver.test_node.name()
1333 if not c_test_sliver.run_tcp_client(destination,port):
1337 # painfully enough, we need to allow for some time as netflow might show up last
1338 def check_system_slice (self):
1339 "all nodes: check that a system slice is alive"
1340 # netflow currently not working in the lxc distro
1341 # drl not built at all in the wtx distro
1342 # if we find either of them we're happy
1343 return self.check_netflow() or self.check_drl()
1346 def check_netflow (self): return self._check_system_slice ('netflow')
1347 def check_drl (self): return self._check_system_slice ('drl')
1349 # we have the slices up already here, so it should not take too long
1350 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1351 class CompleterTaskSystemSlice (CompleterTask):
1352 def __init__ (self, test_node, dry_run):
1353 self.test_node=test_node
1354 self.dry_run=dry_run
1355 def actual_run (self):
1356 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1358 return "System slice %s @ %s"%(slicename, self.test_node.name())
1359 def failure_message (self):
1360 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1361 timeout = timedelta(minutes=timeout_minutes)
1362 silent = timedelta (0)
1363 period = timedelta (seconds=period_seconds)
1364 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1365 for test_node in self.all_nodes() ]
1366 return Completer (tasks) . run (timeout, silent, period)
1368 def plcsh_stress_test (self):
1369 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1370 # install the stress-test in the plc image
1371 location = "/usr/share/plc_api/plcsh_stress_test.py"
1372 remote="%s/%s"%(self.vm_root_in_host(),location)
1373 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1375 command += " -- --check"
1376 if self.options.size == 1:
1377 command += " --tiny"
1378 return ( self.run_in_guest(command) == 0)
1380 # populate runs the same utility without slightly different options
1381 # in particular runs with --preserve (dont cleanup) and without --check
1382 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1384 def sfa_install_all (self):
1385 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1386 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1388 def sfa_install_core(self):
1390 return self.yum_install ("sfa")
1392 def sfa_install_plc(self):
1393 "yum install sfa-plc"
1394 return self.yum_install("sfa-plc")
1396 def sfa_install_sfatables(self):
1397 "yum install sfa-sfatables"
1398 return self.yum_install ("sfa-sfatables")
1400 # for some very odd reason, this sometimes fails with the following symptom
1401 # # yum install sfa-client
1402 # Setting up Install Process
1404 # Downloading Packages:
1405 # Running rpm_check_debug
1406 # Running Transaction Test
1407 # Transaction Test Succeeded
1408 # Running Transaction
1409 # Transaction couldn't start:
1410 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1411 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1412 # even though in the same context I have
1413 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1414 # Filesystem Size Used Avail Use% Mounted on
1415 # /dev/hdv1 806G 264G 501G 35% /
1416 # none 16M 36K 16M 1% /tmp
1418 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1419 def sfa_install_client(self):
1420 "yum install sfa-client"
1421 first_try=self.yum_install("sfa-client")
1422 if first_try: return True
1423 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1424 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1425 utils.header("rpm_path=<<%s>>"%rpm_path)
1427 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1428 return self.yum_check_installed ("sfa-client")
1430 def sfa_dbclean(self):
1431 "thoroughly wipes off the SFA database"
1432 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1433 self.run_in_guest("sfa-nuke.py")==0 or \
1434 self.run_in_guest("sfa-nuke-plc.py")==0
1436 def sfa_fsclean(self):
1437 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1438 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1441 def sfa_plcclean(self):
1442 "cleans the PLC entries that were created as a side effect of running the script"
1444 sfa_spec=self.plc_spec['sfa']
1446 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1447 login_base=auth_sfa_spec['login_base']
1448 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1449 except: print "Site %s already absent from PLC db"%login_base
1451 for spec_name in ['pi_spec','user_spec']:
1452 user_spec=auth_sfa_spec[spec_name]
1453 username=user_spec['email']
1454 try: self.apiserver.DeletePerson(self.auth_root(),username)
1456 # this in fact is expected as sites delete their members
1457 #print "User %s already absent from PLC db"%username
1460 print "REMEMBER TO RUN sfa_import AGAIN"
1463 def sfa_uninstall(self):
1464 "uses rpm to uninstall sfa - ignore result"
1465 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1466 self.run_in_guest("rm -rf /var/lib/sfa")
1467 self.run_in_guest("rm -rf /etc/sfa")
1468 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1470 self.run_in_guest("rpm -e --noscripts sfa-plc")
1473 ### run unit tests for SFA
1474 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1475 # Running Transaction
1476 # Transaction couldn't start:
1477 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1478 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1479 # no matter how many Gbs are available on the testplc
1480 # could not figure out what's wrong, so...
1481 # if the yum install phase fails, consider the test is successful
1482 # other combinations will eventually run it hopefully
1483 def sfa_utest(self):
1484 "yum install sfa-tests and run SFA unittests"
1485 self.run_in_guest("yum -y install sfa-tests")
1486 # failed to install - forget it
1487 if self.run_in_guest("rpm -q sfa-tests")!=0:
1488 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1490 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1494 dirname="conf.%s"%self.plc_spec['name']
1495 if not os.path.isdir(dirname):
1496 utils.system("mkdir -p %s"%dirname)
1497 if not os.path.isdir(dirname):
1498 raise Exception,"Cannot create config dir for plc %s"%self.name()
1501 def conffile(self,filename):
1502 return "%s/%s"%(self.confdir(),filename)
1503 def confsubdir(self,dirname,clean,dry_run=False):
1504 subdirname="%s/%s"%(self.confdir(),dirname)
1506 utils.system("rm -rf %s"%subdirname)
1507 if not os.path.isdir(subdirname):
1508 utils.system("mkdir -p %s"%subdirname)
1509 if not dry_run and not os.path.isdir(subdirname):
1510 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1513 def conffile_clean (self,filename):
1514 filename=self.conffile(filename)
1515 return utils.system("rm -rf %s"%filename)==0
1518 def sfa_configure(self):
1519 "run sfa-config-tty"
1520 tmpname=self.conffile("sfa-config-tty")
1521 fileconf=open(tmpname,'w')
1522 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1523 'SFA_INTERFACE_HRN',
1524 'SFA_REGISTRY_LEVEL1_AUTH',
1525 'SFA_REGISTRY_HOST',
1526 'SFA_AGGREGATE_HOST',
1536 'SFA_GENERIC_FLAVOUR',
1537 'SFA_AGGREGATE_ENABLED',
1539 if self.plc_spec['sfa'].has_key(var):
1540 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1541 # the way plc_config handles booleans just sucks..
1544 if self.plc_spec['sfa'][var]: val='true'
1545 fileconf.write ('e %s\n%s\n'%(var,val))
1546 fileconf.write('w\n')
1547 fileconf.write('R\n')
1548 fileconf.write('q\n')
1550 utils.system('cat %s'%tmpname)
1551 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1554 def aggregate_xml_line(self):
1555 port=self.plc_spec['sfa']['neighbours-port']
1556 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1557 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1559 def registry_xml_line(self):
1560 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1561 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1564 # a cross step that takes all other plcs in argument
1565 def cross_sfa_configure(self, other_plcs):
1566 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1567 # of course with a single plc, other_plcs is an empty list
1570 agg_fname=self.conffile("agg.xml")
1571 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1572 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1573 utils.header ("(Over)wrote %s"%agg_fname)
1574 reg_fname=self.conffile("reg.xml")
1575 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1576 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1577 utils.header ("(Over)wrote %s"%reg_fname)
1578 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1579 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1581 def sfa_import(self):
1582 "use sfaadmin to import from plc"
1583 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1584 return self.run_in_guest('sfaadmin reg import_registry')==0
1586 def sfa_start(self):
1588 return self.start_service('sfa')
1591 def sfi_configure(self):
1592 "Create /root/sfi on the plc side for sfi client configuration"
1593 if self.options.dry_run:
1594 utils.header("DRY RUN - skipping step")
1596 sfa_spec=self.plc_spec['sfa']
1597 # cannot use auth_sfa_mapper to pass dir_name
1598 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1599 test_slice=TestAuthSfa(self,slice_spec)
1600 dir_basename=os.path.basename(test_slice.sfi_path())
1601 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1602 test_slice.sfi_configure(dir_name)
1603 # push into the remote /root/sfi area
1604 location = test_slice.sfi_path()
1605 remote="%s/%s"%(self.vm_root_in_host(),location)
1606 self.test_ssh.mkdir(remote,abs=True)
1607 # need to strip last level or remote otherwise we get an extra dir level
1608 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1612 def sfi_clean (self):
1613 "clean up /root/sfi on the plc side"
1614 self.run_in_guest("rm -rf /root/sfi")
1618 def sfa_register_site (self): pass
1620 def sfa_register_pi (self): pass
1622 def sfa_register_user(self): pass
1624 def sfa_update_user(self): pass
1626 def sfa_register_slice(self): pass
1628 def sfa_renew_slice(self): pass
1630 def sfa_discover(self): pass
1632 def sfa_create_slice(self): pass
1634 def sfa_check_slice_plc(self): pass
1636 def sfa_update_slice(self): pass
1638 def sfa_remove_user_from_slice(self): pass
1640 def sfa_insert_user_in_slice(self): pass
1642 def sfi_list(self): pass
1644 def sfi_show_site(self): pass
1646 def sfi_show_slice(self): pass
1648 def sfi_show_slice_researchers(self): pass
1650 def ssh_slice_sfa(self): pass
1652 def sfa_delete_user(self): pass
1654 def sfa_delete_slice(self): pass
1658 return self.stop_service ('sfa')
1660 def populate (self):
1661 "creates random entries in the PLCAPI"
1662 # install the stress-test in the plc image
1663 location = "/usr/share/plc_api/plcsh_stress_test.py"
1664 remote="%s/%s"%(self.vm_root_in_host(),location)
1665 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1667 command += " -- --preserve --short-names"
1668 local = (self.run_in_guest(command) == 0);
1669 # second run with --foreign
1670 command += ' --foreign'
1671 remote = (self.run_in_guest(command) == 0);
1672 return ( local and remote)
1674 def gather_logs (self):
1675 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1676 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1677 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1678 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1679 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1680 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1681 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1683 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1684 self.gather_var_logs ()
1686 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1687 self.gather_pgsql_logs ()
1689 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1690 self.gather_root_sfi ()
1692 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1693 for site_spec in self.plc_spec['sites']:
1694 test_site = TestSite (self,site_spec)
1695 for node_spec in site_spec['nodes']:
1696 test_node=TestNode(self,test_site,node_spec)
1697 test_node.gather_qemu_logs()
1699 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1700 self.gather_nodes_var_logs()
1702 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1703 self.gather_slivers_var_logs()
1706 def gather_slivers_var_logs(self):
1707 for test_sliver in self.all_sliver_objs():
1708 remote = test_sliver.tar_var_logs()
1709 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1710 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1711 utils.system(command)
1714 def gather_var_logs (self):
1715 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1716 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1717 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1718 utils.system(command)
1719 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1720 utils.system(command)
1722 def gather_pgsql_logs (self):
1723 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1724 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1725 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1726 utils.system(command)
1728 def gather_root_sfi (self):
1729 utils.system("mkdir -p logs/sfi.%s"%self.name())
1730 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1731 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1732 utils.system(command)
1734 def gather_nodes_var_logs (self):
1735 for site_spec in self.plc_spec['sites']:
1736 test_site = TestSite (self,site_spec)
1737 for node_spec in site_spec['nodes']:
1738 test_node=TestNode(self,test_site,node_spec)
1739 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1740 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1741 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1742 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1743 utils.system(command)
1746 # returns the filename to use for sql dump/restore, using options.dbname if set
1747 def dbfile (self, database):
1748 # uses options.dbname if it is found
1750 name=self.options.dbname
1751 if not isinstance(name,StringTypes):
1757 return "/root/%s-%s.sql"%(database,name)
1759 def plc_db_dump(self):
1760 'dump the planetlab5 DB in /root in the PLC - filename has time'
1761 dump=self.dbfile("planetab5")
1762 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1763 utils.header('Dumped planetlab5 database in %s'%dump)
1766 def plc_db_restore(self):
1767 'restore the planetlab5 DB - looks broken, but run -n might help'
1768 dump=self.dbfile("planetab5")
1769 ##stop httpd service
1770 self.run_in_guest('service httpd stop')
1771 # xxx - need another wrapper
1772 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1773 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1774 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1775 ##starting httpd service
1776 self.run_in_guest('service httpd start')
1778 utils.header('Database restored from ' + dump)
1781 def create_ignore_steps ():
1782 for step in TestPlc.default_steps + TestPlc.other_steps:
1783 # default step can have a plc qualifier
1784 if '@' in step: (step,qualifier)=step.split('@')
1785 # or be defined as forced or ignored by default
1786 for keyword in ['_ignore','_force']:
1787 if step.endswith (keyword): step=step.replace(keyword,'')
1788 if step == SEP or step == SEPSFA : continue
1789 method=getattr(TestPlc,step)
1791 wrapped=ignore_result(method)
1792 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1793 setattr(TestPlc, name, wrapped)
1796 # def ssh_slice_again_ignore (self): pass
1798 # def check_initscripts_ignore (self): pass
1800 def standby_1_through_20(self):
1801 """convenience function to wait for a specified number of minutes"""
1804 def standby_1(): pass
1806 def standby_2(): pass
1808 def standby_3(): pass
1810 def standby_4(): pass
1812 def standby_5(): pass
1814 def standby_6(): pass
1816 def standby_7(): pass
1818 def standby_8(): pass
1820 def standby_9(): pass
1822 def standby_10(): pass
1824 def standby_11(): pass
1826 def standby_12(): pass
1828 def standby_13(): pass
1830 def standby_14(): pass
1832 def standby_15(): pass
1834 def standby_16(): pass
1836 def standby_17(): pass
1838 def standby_18(): pass
1840 def standby_19(): pass
1842 def standby_20(): pass
1844 # convenience for debugging the test logic
1845 def yes (self): return True
1846 def no (self): return False