1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__doc__ = slice_method.__doc__
115 def auth_sfa_mapper (method):
118 auth_method = TestAuthSfa.__dict__[method.__name__]
119 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
120 test_auth=TestAuthSfa(self,auth_spec)
121 if not auth_method(test_auth,self.options): overall=False
123 # restore the doc text
124 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
128 def __init__ (self,result):
138 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
139 'plc_install', 'plc_configure', 'plc_start', SEP,
140 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
141 'plcapi_urls','speed_up_slices', SEP,
142 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
143 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
144 # keep this our of the way for now
145 'check_vsys_defaults_ignore', SEP,
146 # run this first off so it's easier to re-run on another qemu box
147 'qemu_kill_mine', SEP,
148 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
149 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
150 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
151 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
152 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
153 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
154 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
155 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
156 # but as the stress test might take a while, we sometimes missed the debug mode..
157 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
158 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
159 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
160 'cross_check_tcp@1', 'check_system_slice', SEP,
161 # check slices are turned off properly
162 'empty_slices', 'ssh_slice_off', SEP,
163 # check they are properly re-created with the same name
164 'fill_slices', 'ssh_slice_again_ignore', SEP,
165 'gather_logs_force', SEP,
168 'export', 'show_boxes', SEP,
169 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
170 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
171 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
172 'delete_leases', 'list_leases', SEP,
174 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
175 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
176 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
177 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
178 'plc_db_dump' , 'plc_db_restore', SEP,
179 'check_netflow','check_drl', SEP,
180 'debug_nodemanager', SEP,
181 'standby_1_through_20','yes','no',SEP,
185 def printable_steps (list):
186 single_line=" ".join(list)+" "
187 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
189 def valid_step (step):
190 return step != SEP and step != SEPSFA
192 # turn off the sfa-related steps when build has skipped SFA
193 # this was originally for centos5 but is still valid
194 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
196 def _has_sfa_cached (rpms_url):
197 if os.path.isfile(has_sfa_cache_filename):
198 cached=file(has_sfa_cache_filename).read()=="yes"
199 utils.header("build provides SFA (cached):%s"%cached)
201 # warning, we're now building 'sface' so let's be a bit more picky
202 # full builds are expected to return with 0 here
203 utils.header ("Checking if build provides SFA package...")
204 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
205 encoded='yes' if retcod else 'no'
206 file(has_sfa_cache_filename,'w').write(encoded)
210 def check_whether_build_has_sfa (rpms_url):
211 has_sfa=TestPlc._has_sfa_cached(rpms_url)
213 utils.header("build does provide SFA")
215 # move all steps containing 'sfa' from default_steps to other_steps
216 utils.header("SFA package not found - removing steps with sfa or sfi")
217 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
218 TestPlc.other_steps += sfa_steps
219 for step in sfa_steps: TestPlc.default_steps.remove(step)
221 def __init__ (self,plc_spec,options):
222 self.plc_spec=plc_spec
224 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
225 self.vserverip=plc_spec['vserverip']
226 self.vservername=plc_spec['vservername']
227 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
228 self.apiserver=TestApiserver(self.url,options.dry_run)
229 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
230 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
232 def has_addresses_api (self):
233 return self.apiserver.has_method('AddIpAddress')
236 name=self.plc_spec['name']
237 return "%s.%s"%(name,self.vservername)
240 return self.plc_spec['host_box']
243 return self.test_ssh.is_local()
245 # define the API methods on this object through xmlrpc
246 # would help, but not strictly necessary
250 def actual_command_in_guest (self,command, backslash=False):
251 raw1=self.host_to_guest(command)
252 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
255 def start_guest (self):
256 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
258 def stop_guest (self):
259 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
261 def run_in_guest (self,command,backslash=False):
262 raw=self.actual_command_in_guest(command,backslash)
263 return utils.system(raw)
265 def run_in_host (self,command):
266 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
268 # backslashing turned out so awful at some point that I've turned off auto-backslashing
269 # see e.g. plc_start esp. the version for f14
270 #command gets run in the plc's vm
271 def host_to_guest(self,command):
272 # f14 still needs some extra help
273 if self.options.fcdistro == 'f14':
274 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
276 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
279 # this /vservers thing is legacy...
280 def vm_root_in_host(self):
281 return "/vservers/%s/"%(self.vservername)
283 def vm_timestamp_path (self):
284 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
286 #start/stop the vserver
287 def start_guest_in_host(self):
288 return "virsh -c lxc:/// start %s"%(self.vservername)
290 def stop_guest_in_host(self):
291 return "virsh -c lxc:/// destroy %s"%(self.vservername)
294 def run_in_guest_piped (self,local,remote):
295 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
297 def yum_check_installed (self, rpms):
298 if isinstance (rpms, list):
300 return self.run_in_guest("rpm -q %s"%rpms)==0
302 # does a yum install in the vs, ignore yum retcod, check with rpm
303 def yum_install (self, rpms):
304 if isinstance (rpms, list):
306 self.run_in_guest("yum -y install %s"%rpms)
307 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
308 self.run_in_guest("yum-complete-transaction -y")
309 return self.yum_check_installed (rpms)
311 def auth_root (self):
312 return {'Username':self.plc_spec['PLC_ROOT_USER'],
313 'AuthMethod':'password',
314 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
315 'Role' : self.plc_spec['role']
317 def locate_site (self,sitename):
318 for site in self.plc_spec['sites']:
319 if site['site_fields']['name'] == sitename:
321 if site['site_fields']['login_base'] == sitename:
323 raise Exception,"Cannot locate site %s"%sitename
325 def locate_node (self,nodename):
326 for site in self.plc_spec['sites']:
327 for node in site['nodes']:
328 if node['name'] == nodename:
330 raise Exception,"Cannot locate node %s"%nodename
332 def locate_hostname (self,hostname):
333 for site in self.plc_spec['sites']:
334 for node in site['nodes']:
335 if node['node_fields']['hostname'] == hostname:
337 raise Exception,"Cannot locate hostname %s"%hostname
339 def locate_key (self,key_name):
340 for key in self.plc_spec['keys']:
341 if key['key_name'] == key_name:
343 raise Exception,"Cannot locate key %s"%key_name
345 def locate_private_key_from_key_names (self, key_names):
346 # locate the first avail. key
348 for key_name in key_names:
349 key_spec=self.locate_key(key_name)
350 test_key=TestKey(self,key_spec)
351 publickey=test_key.publicpath()
352 privatekey=test_key.privatepath()
353 if os.path.isfile(publickey) and os.path.isfile(privatekey):
355 if found: return privatekey
358 def locate_slice (self, slicename):
359 for slice in self.plc_spec['slices']:
360 if slice['slice_fields']['name'] == slicename:
362 raise Exception,"Cannot locate slice %s"%slicename
364 def all_sliver_objs (self):
366 for slice_spec in self.plc_spec['slices']:
367 slicename = slice_spec['slice_fields']['name']
368 for nodename in slice_spec['nodenames']:
369 result.append(self.locate_sliver_obj (nodename,slicename))
372 def locate_sliver_obj (self,nodename,slicename):
373 (site,node) = self.locate_node(nodename)
374 slice = self.locate_slice (slicename)
376 test_site = TestSite (self, site)
377 test_node = TestNode (self, test_site,node)
378 # xxx the slice site is assumed to be the node site - mhh - probably harmless
379 test_slice = TestSlice (self, test_site, slice)
380 return TestSliver (self, test_node, test_slice)
382 def locate_first_node(self):
383 nodename=self.plc_spec['slices'][0]['nodenames'][0]
384 (site,node) = self.locate_node(nodename)
385 test_site = TestSite (self, site)
386 test_node = TestNode (self, test_site,node)
389 def locate_first_sliver (self):
390 slice_spec=self.plc_spec['slices'][0]
391 slicename=slice_spec['slice_fields']['name']
392 nodename=slice_spec['nodenames'][0]
393 return self.locate_sliver_obj(nodename,slicename)
395 # all different hostboxes used in this plc
396 def get_BoxNodes(self):
397 # maps on sites and nodes, return [ (host_box,test_node) ]
399 for site_spec in self.plc_spec['sites']:
400 test_site = TestSite (self,site_spec)
401 for node_spec in site_spec['nodes']:
402 test_node = TestNode (self, test_site, node_spec)
403 if not test_node.is_real():
404 tuples.append( (test_node.host_box(),test_node) )
405 # transform into a dict { 'host_box' -> [ test_node .. ] }
407 for (box,node) in tuples:
408 if not result.has_key(box):
411 result[box].append(node)
414 # a step for checking this stuff
415 def show_boxes (self):
416 'print summary of nodes location'
417 for (box,nodes) in self.get_BoxNodes().iteritems():
418 print box,":"," + ".join( [ node.name() for node in nodes ] )
421 # make this a valid step
422 def qemu_kill_all(self):
423 'kill all qemu instances on the qemu boxes involved by this setup'
424 # this is the brute force version, kill all qemus on that host box
425 for (box,nodes) in self.get_BoxNodes().iteritems():
426 # pass the first nodename, as we don't push template-qemu on testboxes
427 nodedir=nodes[0].nodedir()
428 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
431 # make this a valid step
432 def qemu_list_all(self):
433 'list all qemu instances on the qemu boxes involved by this setup'
434 for (box,nodes) in self.get_BoxNodes().iteritems():
435 # this is the brute force version, kill all qemus on that host box
436 TestBoxQemu(box,self.options.buildname).qemu_list_all()
439 # kill only the qemus related to this test
440 def qemu_list_mine(self):
441 'list qemu instances for our nodes'
442 for (box,nodes) in self.get_BoxNodes().iteritems():
443 # the fine-grain version
448 # kill only the qemus related to this test
449 def qemu_clean_mine(self):
450 'cleanup (rm -rf) qemu instances for our nodes'
451 for (box,nodes) in self.get_BoxNodes().iteritems():
452 # the fine-grain version
457 # kill only the right qemus
458 def qemu_kill_mine(self):
459 'kill the qemu instances for our nodes'
460 for (box,nodes) in self.get_BoxNodes().iteritems():
461 # the fine-grain version
466 #################### display config
468 "show test configuration after localization"
473 # uggly hack to make sure 'run export' only reports about the 1st plc
474 # to avoid confusion - also we use 'inri_slice1' in various aliases..
477 "print cut'n paste-able stuff to export env variables to your shell"
478 # guess local domain from hostname
479 if TestPlc.exported_id>1:
480 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
482 TestPlc.exported_id+=1
483 domain=socket.gethostname().split('.',1)[1]
484 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
485 print "export BUILD=%s"%self.options.buildname
486 print "export PLCHOSTLXC=%s"%fqdn
487 print "export GUESTNAME=%s"%self.plc_spec['vservername']
488 vplcname=self.plc_spec['vservername'].split('-')[-1]
489 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
490 # find hostname of first node
491 (hostname,qemubox) = self.all_node_infos()[0]
492 print "export KVMHOST=%s.%s"%(qemubox,domain)
493 print "export NODE=%s"%(hostname)
497 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
498 def show_pass (self,passno):
499 for (key,val) in self.plc_spec.iteritems():
500 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
504 self.display_site_spec(site)
505 for node in site['nodes']:
506 self.display_node_spec(node)
507 elif key=='initscripts':
508 for initscript in val:
509 self.display_initscript_spec (initscript)
512 self.display_slice_spec (slice)
515 self.display_key_spec (key)
517 if key not in ['sites','initscripts','slices','keys']:
518 print '+ ',key,':',val
520 def display_site_spec (self,site):
521 print '+ ======== site',site['site_fields']['name']
522 for (k,v) in site.iteritems():
523 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
526 print '+ ','nodes : ',
528 print node['node_fields']['hostname'],'',
534 print user['name'],'',
536 elif k == 'site_fields':
537 print '+ login_base',':',v['login_base']
538 elif k == 'address_fields':
544 def display_initscript_spec (self,initscript):
545 print '+ ======== initscript',initscript['initscript_fields']['name']
547 def display_key_spec (self,key):
548 print '+ ======== key',key['key_name']
550 def display_slice_spec (self,slice):
551 print '+ ======== slice',slice['slice_fields']['name']
552 for (k,v) in slice.iteritems():
565 elif k=='slice_fields':
566 print '+ fields',':',
567 print 'max_nodes=',v['max_nodes'],
572 def display_node_spec (self,node):
573 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
574 print "hostname=",node['node_fields']['hostname'],
575 print "ip=",node['interface_fields']['ip']
576 if self.options.verbose:
577 utils.pprint("node details",node,depth=3)
579 # another entry point for just showing the boxes involved
580 def display_mapping (self):
581 TestPlc.display_mapping_plc(self.plc_spec)
585 def display_mapping_plc (plc_spec):
586 print '+ MyPLC',plc_spec['name']
587 # WARNING this would not be right for lxc-based PLC's - should be harmless though
588 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
589 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
590 for site_spec in plc_spec['sites']:
591 for node_spec in site_spec['nodes']:
592 TestPlc.display_mapping_node(node_spec)
595 def display_mapping_node (node_spec):
596 print '+ NODE %s'%(node_spec['name'])
597 print '+\tqemu box %s'%node_spec['host_box']
598 print '+\thostname=%s'%node_spec['node_fields']['hostname']
600 # write a timestamp in /vservers/<>.timestamp
601 # cannot be inside the vserver, that causes vserver .. build to cough
602 def plcvm_timestamp (self):
603 "Create a timestamp to remember creation date for this plc"
605 # TODO-lxc check this one
606 # a first approx. is to store the timestamp close to the VM root like vs does
607 stamp_path=self.vm_timestamp_path ()
608 stamp_dir = os.path.dirname (stamp_path)
609 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
610 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
612 # this is called inconditionnally at the beginning of the test sequence
613 # just in case this is a rerun, so if the vm is not running it's fine
614 def plcvm_delete(self):
615 "vserver delete the test myplc"
616 stamp_path=self.vm_timestamp_path()
617 self.run_in_host("rm -f %s"%stamp_path)
618 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
619 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
620 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
624 # historically the build was being fetched by the tests
625 # now the build pushes itself as a subdir of the tests workdir
626 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
627 def plcvm_create (self):
628 "vserver creation (no install done)"
629 # push the local build/ dir to the testplc box
631 # a full path for the local calls
632 build_dir=os.path.dirname(sys.argv[0])
633 # sometimes this is empty - set to "." in such a case
634 if not build_dir: build_dir="."
635 build_dir += "/build"
637 # use a standard name - will be relative to remote buildname
639 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
640 self.test_ssh.rmdir(build_dir)
641 self.test_ssh.copy(build_dir,recursive=True)
642 # the repo url is taken from arch-rpms-url
643 # with the last step (i386) removed
644 repo_url = self.options.arch_rpms_url
645 for level in [ 'arch' ]:
646 repo_url = os.path.dirname(repo_url)
648 # invoke initvm (drop support for vs)
649 script="lbuild-initvm.sh"
651 # pass the vbuild-nightly options to [lv]test-initvm
652 script_options += " -p %s"%self.options.personality
653 script_options += " -d %s"%self.options.pldistro
654 script_options += " -f %s"%self.options.fcdistro
655 script_options += " -r %s"%repo_url
656 vserver_name = self.vservername
658 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
659 script_options += " -n %s"%vserver_hostname
661 print "Cannot reverse lookup %s"%self.vserverip
662 print "This is considered fatal, as this might pollute the test results"
664 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
665 return self.run_in_host(create_vserver) == 0
668 def plc_install(self):
669 "yum install myplc, noderepo, and the plain bootstrapfs"
671 # workaround for getting pgsql8.2 on centos5
672 if self.options.fcdistro == "centos5":
673 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
676 if self.options.personality == "linux32":
678 elif self.options.personality == "linux64":
681 raise Exception, "Unsupported personality %r"%self.options.personality
682 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
685 pkgs_list.append ("slicerepo-%s"%nodefamily)
686 pkgs_list.append ("myplc")
687 pkgs_list.append ("noderepo-%s"%nodefamily)
688 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
689 pkgs_string=" ".join(pkgs_list)
690 return self.yum_install (pkgs_list)
693 def mod_python(self):
694 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
695 return self.yum_install ( [ 'mod_python' ] )
698 def plc_configure(self):
700 tmpname='%s.plc-config-tty'%(self.name())
701 fileconf=open(tmpname,'w')
702 for var in [ 'PLC_NAME',
707 'PLC_MAIL_SUPPORT_ADDRESS',
710 # Above line was added for integrating SFA Testing
716 'PLC_RESERVATION_GRANULARITY',
718 'PLC_OMF_XMPP_SERVER',
721 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
722 fileconf.write('w\n')
723 fileconf.write('q\n')
725 utils.system('cat %s'%tmpname)
726 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
727 utils.system('rm %s'%tmpname)
730 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
731 # however using a vplc guest under f20 requires this trick
732 # the symptom is this: service plc start
733 # Starting plc (via systemctl): Failed to get D-Bus connection: \
734 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
735 # weird thing is the doc says f14 uses upstart by default and not systemd
736 # so this sounds kind of harmless
737 def start_service (self,service): return self.start_stop_service (service,'start')
738 def stop_service (self,service): return self.start_stop_service (service,'stop')
740 def start_stop_service (self, service,start_or_stop):
741 "utility to start/stop a service with the special trick for f14"
742 if self.options.fcdistro != 'f14':
743 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
745 # patch /sbin/service so it does not reset environment
746 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
747 # this is because our own scripts in turn call service
748 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
752 return self.start_service ('plc')
756 return self.stop_service ('plc')
758 def plcvm_start (self):
759 "start the PLC vserver"
763 def plcvm_stop (self):
764 "stop the PLC vserver"
768 # stores the keys from the config for further use
769 def keys_store(self):
770 "stores test users ssh keys in keys/"
771 for key_spec in self.plc_spec['keys']:
772 TestKey(self,key_spec).store_key()
775 def keys_clean(self):
776 "removes keys cached in keys/"
777 utils.system("rm -rf ./keys")
780 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
781 # for later direct access to the nodes
782 def keys_fetch(self):
783 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
785 if not os.path.isdir(dir):
787 vservername=self.vservername
788 vm_root=self.vm_root_in_host()
790 prefix = 'debug_ssh_key'
791 for ext in [ 'pub', 'rsa' ] :
792 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
793 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
794 if self.test_ssh.fetch(src,dst) != 0: overall=False
798 "create sites with PLCAPI"
799 return self.do_sites()
801 def delete_sites (self):
802 "delete sites with PLCAPI"
803 return self.do_sites(action="delete")
805 def do_sites (self,action="add"):
806 for site_spec in self.plc_spec['sites']:
807 test_site = TestSite (self,site_spec)
808 if (action != "add"):
809 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
810 test_site.delete_site()
811 # deleted with the site
812 #test_site.delete_users()
815 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
816 test_site.create_site()
817 test_site.create_users()
820 def delete_all_sites (self):
821 "Delete all sites in PLC, and related objects"
822 print 'auth_root',self.auth_root()
823 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
825 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
826 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
827 site_id=site['site_id']
828 print 'Deleting site_id',site_id
829 self.apiserver.DeleteSite(self.auth_root(),site_id)
833 "create nodes with PLCAPI"
834 return self.do_nodes()
835 def delete_nodes (self):
836 "delete nodes with PLCAPI"
837 return self.do_nodes(action="delete")
839 def do_nodes (self,action="add"):
840 for site_spec in self.plc_spec['sites']:
841 test_site = TestSite (self,site_spec)
843 utils.header("Deleting nodes in site %s"%test_site.name())
844 for node_spec in site_spec['nodes']:
845 test_node=TestNode(self,test_site,node_spec)
846 utils.header("Deleting %s"%test_node.name())
847 test_node.delete_node()
849 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
850 for node_spec in site_spec['nodes']:
851 utils.pprint('Creating node %s'%node_spec,node_spec)
852 test_node = TestNode (self,test_site,node_spec)
853 test_node.create_node ()
856 def nodegroups (self):
857 "create nodegroups with PLCAPI"
858 return self.do_nodegroups("add")
859 def delete_nodegroups (self):
860 "delete nodegroups with PLCAPI"
861 return self.do_nodegroups("delete")
865 def translate_timestamp (start,grain,timestamp):
866 if timestamp < TestPlc.YEAR: return start+timestamp*grain
867 else: return timestamp
870 def timestamp_printable (timestamp):
871 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
874 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
876 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
877 print 'API answered grain=',grain
878 start=(now/grain)*grain
880 # find out all nodes that are reservable
881 nodes=self.all_reservable_nodenames()
883 utils.header ("No reservable node found - proceeding without leases")
886 # attach them to the leases as specified in plc_specs
887 # this is where the 'leases' field gets interpreted as relative of absolute
888 for lease_spec in self.plc_spec['leases']:
889 # skip the ones that come with a null slice id
890 if not lease_spec['slice']: continue
891 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
892 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
893 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
894 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
895 if lease_addition['errors']:
896 utils.header("Cannot create leases, %s"%lease_addition['errors'])
899 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
900 (nodes,lease_spec['slice'],
901 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
902 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
906 def delete_leases (self):
907 "remove all leases in the myplc side"
908 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
909 utils.header("Cleaning leases %r"%lease_ids)
910 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
913 def list_leases (self):
914 "list all leases known to the myplc"
915 leases = self.apiserver.GetLeases(self.auth_root())
918 current=l['t_until']>=now
919 if self.options.verbose or current:
920 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
921 TestPlc.timestamp_printable(l['t_from']),
922 TestPlc.timestamp_printable(l['t_until'])))
925 # create nodegroups if needed, and populate
926 def do_nodegroups (self, action="add"):
927 # 1st pass to scan contents
929 for site_spec in self.plc_spec['sites']:
930 test_site = TestSite (self,site_spec)
931 for node_spec in site_spec['nodes']:
932 test_node=TestNode (self,test_site,node_spec)
933 if node_spec.has_key('nodegroups'):
934 nodegroupnames=node_spec['nodegroups']
935 if isinstance(nodegroupnames,StringTypes):
936 nodegroupnames = [ nodegroupnames ]
937 for nodegroupname in nodegroupnames:
938 if not groups_dict.has_key(nodegroupname):
939 groups_dict[nodegroupname]=[]
940 groups_dict[nodegroupname].append(test_node.name())
941 auth=self.auth_root()
943 for (nodegroupname,group_nodes) in groups_dict.iteritems():
945 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
946 # first, check if the nodetagtype is here
947 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
949 tag_type_id = tag_types[0]['tag_type_id']
951 tag_type_id = self.apiserver.AddTagType(auth,
952 {'tagname':nodegroupname,
953 'description': 'for nodegroup %s'%nodegroupname,
955 print 'located tag (type)',nodegroupname,'as',tag_type_id
957 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
959 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
960 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
961 # set node tag on all nodes, value='yes'
962 for nodename in group_nodes:
964 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
966 traceback.print_exc()
967 print 'node',nodename,'seems to already have tag',nodegroupname
970 expect_yes = self.apiserver.GetNodeTags(auth,
971 {'hostname':nodename,
972 'tagname':nodegroupname},
973 ['value'])[0]['value']
974 if expect_yes != "yes":
975 print 'Mismatch node tag on node',nodename,'got',expect_yes
978 if not self.options.dry_run:
979 print 'Cannot find tag',nodegroupname,'on node',nodename
983 print 'cleaning nodegroup',nodegroupname
984 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
986 traceback.print_exc()
990 # a list of TestNode objs
991 def all_nodes (self):
993 for site_spec in self.plc_spec['sites']:
994 test_site = TestSite (self,site_spec)
995 for node_spec in site_spec['nodes']:
996 nodes.append(TestNode (self,test_site,node_spec))
999 # return a list of tuples (nodename,qemuname)
1000 def all_node_infos (self) :
1002 for site_spec in self.plc_spec['sites']:
1003 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
1004 for node_spec in site_spec['nodes'] ]
1007 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
1008 def all_reservable_nodenames (self):
1010 for site_spec in self.plc_spec['sites']:
1011 for node_spec in site_spec['nodes']:
1012 node_fields=node_spec['node_fields']
1013 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1014 res.append(node_fields['hostname'])
1017 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1018 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1019 if self.options.dry_run:
1023 class CompleterTaskBootState (CompleterTask):
1024 def __init__ (self, test_plc,hostname):
1025 self.test_plc=test_plc
1026 self.hostname=hostname
1027 self.last_boot_state='undef'
1028 def actual_run (self):
1030 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1032 self.last_boot_state = node['boot_state']
1033 return self.last_boot_state == target_boot_state
1037 return "CompleterTaskBootState with node %s"%self.hostname
1038 def failure_message (self):
1039 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1041 timeout = timedelta(minutes=timeout_minutes)
1042 graceout = timedelta(minutes=silent_minutes)
1043 period = timedelta(seconds=period_seconds)
1044 # the nodes that haven't checked yet - start with a full list and shrink over time
1045 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1046 tasks = [ CompleterTaskBootState (self,hostname) \
1047 for (hostname,_) in self.all_node_infos() ]
1048 return Completer (tasks).run (timeout, graceout, period)
1050 def nodes_booted(self):
1051 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1054 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1055 class CompleterTaskPingNode (CompleterTask):
1056 def __init__ (self, hostname):
1057 self.hostname=hostname
1058 def run(self,silent):
1059 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1060 return utils.system (command, silent=silent)==0
1061 def failure_message (self):
1062 return "Cannot ping node with name %s"%self.hostname
1063 timeout=timedelta (seconds=timeout_seconds)
1065 period=timedelta (seconds=period_seconds)
1066 node_infos = self.all_node_infos()
1067 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1068 return Completer (tasks).run (timeout, graceout, period)
1070 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1071 def ping_node (self):
1073 return self.check_nodes_ping ()
1075 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1076 class CompleterTaskNodeSsh (CompleterTask):
1077 def __init__ (self, hostname, qemuname, boot_state, local_key):
1078 self.hostname=hostname
1079 self.qemuname=qemuname
1080 self.boot_state=boot_state
1081 self.local_key=local_key
1082 def run (self, silent):
1083 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1084 return utils.system (command, silent=silent)==0
1085 def failure_message (self):
1086 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1089 timeout = timedelta(minutes=timeout_minutes)
1090 graceout = timedelta(minutes=silent_minutes)
1091 period = timedelta(seconds=period_seconds)
1092 vservername=self.vservername
1095 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1098 local_key = "keys/key_admin.rsa"
1099 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1100 node_infos = self.all_node_infos()
1101 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1102 for (nodename,qemuname) in node_infos ]
1103 return Completer (tasks).run (timeout, graceout, period)
1105 def ssh_node_debug(self):
1106 "Tries to ssh into nodes in debug mode with the debug ssh key"
1107 return self.check_nodes_ssh(debug=True,
1108 timeout_minutes=self.ssh_node_debug_timeout,
1109 silent_minutes=self.ssh_node_debug_silent)
1111 def ssh_node_boot(self):
1112 "Tries to ssh into nodes in production mode with the root ssh key"
1113 return self.check_nodes_ssh(debug=False,
1114 timeout_minutes=self.ssh_node_boot_timeout,
1115 silent_minutes=self.ssh_node_boot_silent)
1117 def node_bmlogs(self):
1118 "Checks that there's a non-empty dir. /var/log/bm/raw"
1119 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1122 def qemu_local_init (self): pass
1124 def bootcd (self): pass
1126 def qemu_local_config (self): pass
1128 def nodestate_reinstall (self): pass
1130 def nodestate_safeboot (self): pass
1132 def nodestate_boot (self): pass
1134 def nodestate_show (self): pass
1136 def qemu_export (self): pass
1138 ### check hooks : invoke scripts from hooks/{node,slice}
1139 def check_hooks_node (self):
1140 return self.locate_first_node().check_hooks()
1141 def check_hooks_sliver (self) :
1142 return self.locate_first_sliver().check_hooks()
1144 def check_hooks (self):
1145 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1146 return self.check_hooks_node() and self.check_hooks_sliver()
1149 def do_check_initscripts(self):
1150 class CompleterTaskInitscript (CompleterTask):
1151 def __init__ (self, test_sliver, stamp):
1152 self.test_sliver=test_sliver
1154 def actual_run (self):
1155 return self.test_sliver.check_initscript_stamp (self.stamp)
1157 return "initscript checker for %s"%self.test_sliver.name()
1158 def failure_message (self):
1159 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1162 for slice_spec in self.plc_spec['slices']:
1163 if not slice_spec.has_key('initscriptstamp'):
1165 stamp=slice_spec['initscriptstamp']
1166 slicename=slice_spec['slice_fields']['name']
1167 for nodename in slice_spec['nodenames']:
1168 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1169 (site,node) = self.locate_node (nodename)
1170 # xxx - passing the wrong site - probably harmless
1171 test_site = TestSite (self,site)
1172 test_slice = TestSlice (self,test_site,slice_spec)
1173 test_node = TestNode (self,test_site,node)
1174 test_sliver = TestSliver (self, test_node, test_slice)
1175 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1176 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1178 def check_initscripts(self):
1179 "check that the initscripts have triggered"
1180 return self.do_check_initscripts()
1182 def initscripts (self):
1183 "create initscripts with PLCAPI"
1184 for initscript in self.plc_spec['initscripts']:
1185 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1186 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1189 def delete_initscripts (self):
1190 "delete initscripts with PLCAPI"
1191 for initscript in self.plc_spec['initscripts']:
1192 initscript_name = initscript['initscript_fields']['name']
1193 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1195 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1196 print initscript_name,'deleted'
1198 print 'deletion went wrong - probably did not exist'
1203 "create slices with PLCAPI"
1204 return self.do_slices(action="add")
1206 def delete_slices (self):
1207 "delete slices with PLCAPI"
1208 return self.do_slices(action="delete")
1210 def fill_slices (self):
1211 "add nodes in slices with PLCAPI"
1212 return self.do_slices(action="fill")
1214 def empty_slices (self):
1215 "remove nodes from slices with PLCAPI"
1216 return self.do_slices(action="empty")
1218 def do_slices (self, action="add"):
1219 for slice in self.plc_spec['slices']:
1220 site_spec = self.locate_site (slice['sitename'])
1221 test_site = TestSite(self,site_spec)
1222 test_slice=TestSlice(self,test_site,slice)
1223 if action == "delete":
1224 test_slice.delete_slice()
1225 elif action=="fill":
1226 test_slice.add_nodes()
1227 elif action=="empty":
1228 test_slice.delete_nodes()
1230 test_slice.create_slice()
1233 @slice_mapper__tasks(20,10,15)
1234 def ssh_slice(self): pass
1235 @slice_mapper__tasks(20,19,15)
1236 def ssh_slice_off (self): pass
1238 # use another name so we can exclude/ignore it from the tests on the nightly command line
1239 def ssh_slice_again(self): return self.ssh_slice()
1240 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1241 # but for some reason the ignore-wrapping thing would not
1244 def ssh_slice_basics(self): pass
1247 def check_vsys_defaults(self): pass
1250 def keys_clear_known_hosts (self): pass
1252 def plcapi_urls (self):
1253 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1255 def speed_up_slices (self):
1256 "tweak nodemanager settings on all nodes using a conf file"
1257 # create the template on the server-side
1258 template="%s.nodemanager"%self.name()
1259 template_file = open (template,"w")
1260 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1261 template_file.close()
1262 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1263 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1264 self.test_ssh.copy_abs(template,remote)
1266 self.apiserver.AddConfFile (self.auth_root(),
1267 {'dest':'/etc/sysconfig/nodemanager',
1268 'source':'PlanetLabConf/nodemanager',
1269 'postinstall_cmd':'service nm restart',})
1272 def debug_nodemanager (self):
1273 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1274 template="%s.nodemanager"%self.name()
1275 template_file = open (template,"w")
1276 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1277 template_file.close()
1278 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1279 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1280 self.test_ssh.copy_abs(template,remote)
1284 def qemu_start (self) : pass
1287 def qemu_timestamp (self) : pass
1289 # when a spec refers to a node possibly on another plc
1290 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1291 for plc in [ self ] + other_plcs:
1293 return plc.locate_sliver_obj (nodename, slicename)
1296 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1298 # implement this one as a cross step so that we can take advantage of different nodes
1299 # in multi-plcs mode
1300 def cross_check_tcp (self, other_plcs):
1301 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1302 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1303 utils.header ("check_tcp: no/empty config found")
1305 specs = self.plc_spec['tcp_specs']
1310 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1311 if not s_test_sliver.run_tcp_server(port,timeout=20):
1315 # idem for the client side
1316 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1317 # use nodename from locatesd sliver, unless 'client_connect' is set
1318 if 'client_connect' in spec:
1319 destination = spec['client_connect']
1321 destination=s_test_sliver.test_node.name()
1322 if not c_test_sliver.run_tcp_client(destination,port):
1326 # painfully enough, we need to allow for some time as netflow might show up last
1327 def check_system_slice (self):
1328 "all nodes: check that a system slice is alive"
1329 # netflow currently not working in the lxc distro
1330 # drl not built at all in the wtx distro
1331 # if we find either of them we're happy
1332 return self.check_netflow() or self.check_drl()
1335 def check_netflow (self): return self._check_system_slice ('netflow')
1336 def check_drl (self): return self._check_system_slice ('drl')
1338 # we have the slices up already here, so it should not take too long
1339 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1340 class CompleterTaskSystemSlice (CompleterTask):
1341 def __init__ (self, test_node, dry_run):
1342 self.test_node=test_node
1343 self.dry_run=dry_run
1344 def actual_run (self):
1345 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1347 return "System slice %s @ %s"%(slicename, self.test_node.name())
1348 def failure_message (self):
1349 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1350 timeout = timedelta(minutes=timeout_minutes)
1351 silent = timedelta (0)
1352 period = timedelta (seconds=period_seconds)
1353 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1354 for test_node in self.all_nodes() ]
1355 return Completer (tasks) . run (timeout, silent, period)
1357 def plcsh_stress_test (self):
1358 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1359 # install the stress-test in the plc image
1360 location = "/usr/share/plc_api/plcsh_stress_test.py"
1361 remote="%s/%s"%(self.vm_root_in_host(),location)
1362 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1364 command += " -- --check"
1365 if self.options.size == 1:
1366 command += " --tiny"
1367 return ( self.run_in_guest(command) == 0)
1369 # populate runs the same utility without slightly different options
1370 # in particular runs with --preserve (dont cleanup) and without --check
1371 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1373 def sfa_install_all (self):
1374 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1375 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1377 def sfa_install_core(self):
1379 return self.yum_install ("sfa")
1381 def sfa_install_plc(self):
1382 "yum install sfa-plc"
1383 return self.yum_install("sfa-plc")
1385 def sfa_install_sfatables(self):
1386 "yum install sfa-sfatables"
1387 return self.yum_install ("sfa-sfatables")
1389 # for some very odd reason, this sometimes fails with the following symptom
1390 # # yum install sfa-client
1391 # Setting up Install Process
1393 # Downloading Packages:
1394 # Running rpm_check_debug
1395 # Running Transaction Test
1396 # Transaction Test Succeeded
1397 # Running Transaction
1398 # Transaction couldn't start:
1399 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1400 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1401 # even though in the same context I have
1402 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1403 # Filesystem Size Used Avail Use% Mounted on
1404 # /dev/hdv1 806G 264G 501G 35% /
1405 # none 16M 36K 16M 1% /tmp
1407 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1408 def sfa_install_client(self):
1409 "yum install sfa-client"
1410 first_try=self.yum_install("sfa-client")
1411 if first_try: return True
1412 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1413 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1414 utils.header("rpm_path=<<%s>>"%rpm_path)
1416 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1417 return self.yum_check_installed ("sfa-client")
1419 def sfa_dbclean(self):
1420 "thoroughly wipes off the SFA database"
1421 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1422 self.run_in_guest("sfa-nuke.py")==0 or \
1423 self.run_in_guest("sfa-nuke-plc.py")==0
1425 def sfa_fsclean(self):
1426 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1427 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1430 def sfa_plcclean(self):
1431 "cleans the PLC entries that were created as a side effect of running the script"
1433 sfa_spec=self.plc_spec['sfa']
1435 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1436 login_base=auth_sfa_spec['login_base']
1437 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1438 except: print "Site %s already absent from PLC db"%login_base
1440 for spec_name in ['pi_spec','user_spec']:
1441 user_spec=auth_sfa_spec[spec_name]
1442 username=user_spec['email']
1443 try: self.apiserver.DeletePerson(self.auth_root(),username)
1445 # this in fact is expected as sites delete their members
1446 #print "User %s already absent from PLC db"%username
1449 print "REMEMBER TO RUN sfa_import AGAIN"
1452 def sfa_uninstall(self):
1453 "uses rpm to uninstall sfa - ignore result"
1454 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1455 self.run_in_guest("rm -rf /var/lib/sfa")
1456 self.run_in_guest("rm -rf /etc/sfa")
1457 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1459 self.run_in_guest("rpm -e --noscripts sfa-plc")
1462 ### run unit tests for SFA
1463 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1464 # Running Transaction
1465 # Transaction couldn't start:
1466 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1467 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1468 # no matter how many Gbs are available on the testplc
1469 # could not figure out what's wrong, so...
1470 # if the yum install phase fails, consider the test is successful
1471 # other combinations will eventually run it hopefully
1472 def sfa_utest(self):
1473 "yum install sfa-tests and run SFA unittests"
1474 self.run_in_guest("yum -y install sfa-tests")
1475 # failed to install - forget it
1476 if self.run_in_guest("rpm -q sfa-tests")!=0:
1477 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1479 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1483 dirname="conf.%s"%self.plc_spec['name']
1484 if not os.path.isdir(dirname):
1485 utils.system("mkdir -p %s"%dirname)
1486 if not os.path.isdir(dirname):
1487 raise Exception,"Cannot create config dir for plc %s"%self.name()
1490 def conffile(self,filename):
1491 return "%s/%s"%(self.confdir(),filename)
1492 def confsubdir(self,dirname,clean,dry_run=False):
1493 subdirname="%s/%s"%(self.confdir(),dirname)
1495 utils.system("rm -rf %s"%subdirname)
1496 if not os.path.isdir(subdirname):
1497 utils.system("mkdir -p %s"%subdirname)
1498 if not dry_run and not os.path.isdir(subdirname):
1499 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1502 def conffile_clean (self,filename):
1503 filename=self.conffile(filename)
1504 return utils.system("rm -rf %s"%filename)==0
1507 def sfa_configure(self):
1508 "run sfa-config-tty"
1509 tmpname=self.conffile("sfa-config-tty")
1510 fileconf=open(tmpname,'w')
1511 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1512 'SFA_INTERFACE_HRN',
1513 'SFA_REGISTRY_LEVEL1_AUTH',
1514 'SFA_REGISTRY_HOST',
1515 'SFA_AGGREGATE_HOST',
1525 'SFA_GENERIC_FLAVOUR',
1526 'SFA_AGGREGATE_ENABLED',
1528 if self.plc_spec['sfa'].has_key(var):
1529 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1530 # the way plc_config handles booleans just sucks..
1533 if self.plc_spec['sfa'][var]: val='true'
1534 fileconf.write ('e %s\n%s\n'%(var,val))
1535 fileconf.write('w\n')
1536 fileconf.write('R\n')
1537 fileconf.write('q\n')
1539 utils.system('cat %s'%tmpname)
1540 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1543 def aggregate_xml_line(self):
1544 port=self.plc_spec['sfa']['neighbours-port']
1545 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1546 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1548 def registry_xml_line(self):
1549 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1550 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1553 # a cross step that takes all other plcs in argument
1554 def cross_sfa_configure(self, other_plcs):
1555 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1556 # of course with a single plc, other_plcs is an empty list
1559 agg_fname=self.conffile("agg.xml")
1560 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1561 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1562 utils.header ("(Over)wrote %s"%agg_fname)
1563 reg_fname=self.conffile("reg.xml")
1564 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1565 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1566 utils.header ("(Over)wrote %s"%reg_fname)
1567 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1568 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1570 def sfa_import(self):
1571 "use sfaadmin to import from plc"
1572 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1573 return self.run_in_guest('sfaadmin reg import_registry')==0
1575 def sfa_start(self):
1577 return self.start_service('sfa')
1580 def sfi_configure(self):
1581 "Create /root/sfi on the plc side for sfi client configuration"
1582 if self.options.dry_run:
1583 utils.header("DRY RUN - skipping step")
1585 sfa_spec=self.plc_spec['sfa']
1586 # cannot use auth_sfa_mapper to pass dir_name
1587 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1588 test_slice=TestAuthSfa(self,slice_spec)
1589 dir_basename=os.path.basename(test_slice.sfi_path())
1590 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1591 test_slice.sfi_configure(dir_name)
1592 # push into the remote /root/sfi area
1593 location = test_slice.sfi_path()
1594 remote="%s/%s"%(self.vm_root_in_host(),location)
1595 self.test_ssh.mkdir(remote,abs=True)
1596 # need to strip last level or remote otherwise we get an extra dir level
1597 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1601 def sfi_clean (self):
1602 "clean up /root/sfi on the plc side"
1603 self.run_in_guest("rm -rf /root/sfi")
1607 def sfa_add_site (self): pass
1609 def sfa_add_pi (self): pass
1611 def sfa_add_user(self): pass
1613 def sfa_update_user(self): pass
1615 def sfa_add_slice(self): pass
1617 def sfa_renew_slice(self): pass
1619 def sfa_discover(self): pass
1621 def sfa_create_slice(self): pass
1623 def sfa_check_slice_plc(self): pass
1625 def sfa_update_slice(self): pass
1627 def sfi_list(self): pass
1629 def sfi_show(self): pass
1631 def ssh_slice_sfa(self): pass
1633 def sfa_delete_user(self): pass
1635 def sfa_delete_slice(self): pass
1639 return self.stop_service ('sfa')
1641 def populate (self):
1642 "creates random entries in the PLCAPI"
1643 # install the stress-test in the plc image
1644 location = "/usr/share/plc_api/plcsh_stress_test.py"
1645 remote="%s/%s"%(self.vm_root_in_host(),location)
1646 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1648 command += " -- --preserve --short-names"
1649 local = (self.run_in_guest(command) == 0);
1650 # second run with --foreign
1651 command += ' --foreign'
1652 remote = (self.run_in_guest(command) == 0);
1653 return ( local and remote)
1655 def gather_logs (self):
1656 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1657 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1658 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1659 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1660 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1661 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1662 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1664 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1665 self.gather_var_logs ()
1667 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1668 self.gather_pgsql_logs ()
1670 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1671 self.gather_root_sfi ()
1673 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1674 for site_spec in self.plc_spec['sites']:
1675 test_site = TestSite (self,site_spec)
1676 for node_spec in site_spec['nodes']:
1677 test_node=TestNode(self,test_site,node_spec)
1678 test_node.gather_qemu_logs()
1680 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1681 self.gather_nodes_var_logs()
1683 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1684 self.gather_slivers_var_logs()
1687 def gather_slivers_var_logs(self):
1688 for test_sliver in self.all_sliver_objs():
1689 remote = test_sliver.tar_var_logs()
1690 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1691 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1692 utils.system(command)
1695 def gather_var_logs (self):
1696 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1697 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1698 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1699 utils.system(command)
1700 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1701 utils.system(command)
1703 def gather_pgsql_logs (self):
1704 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1705 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1706 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1707 utils.system(command)
1709 def gather_root_sfi (self):
1710 utils.system("mkdir -p logs/sfi.%s"%self.name())
1711 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1712 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1713 utils.system(command)
1715 def gather_nodes_var_logs (self):
1716 for site_spec in self.plc_spec['sites']:
1717 test_site = TestSite (self,site_spec)
1718 for node_spec in site_spec['nodes']:
1719 test_node=TestNode(self,test_site,node_spec)
1720 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1721 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1722 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1723 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1724 utils.system(command)
1727 # returns the filename to use for sql dump/restore, using options.dbname if set
1728 def dbfile (self, database):
1729 # uses options.dbname if it is found
1731 name=self.options.dbname
1732 if not isinstance(name,StringTypes):
1738 return "/root/%s-%s.sql"%(database,name)
1740 def plc_db_dump(self):
1741 'dump the planetlab5 DB in /root in the PLC - filename has time'
1742 dump=self.dbfile("planetab5")
1743 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1744 utils.header('Dumped planetlab5 database in %s'%dump)
1747 def plc_db_restore(self):
1748 'restore the planetlab5 DB - looks broken, but run -n might help'
1749 dump=self.dbfile("planetab5")
1750 ##stop httpd service
1751 self.run_in_guest('service httpd stop')
1752 # xxx - need another wrapper
1753 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1754 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1755 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1756 ##starting httpd service
1757 self.run_in_guest('service httpd start')
1759 utils.header('Database restored from ' + dump)
1762 def create_ignore_steps ():
1763 for step in TestPlc.default_steps + TestPlc.other_steps:
1764 # default step can have a plc qualifier
1765 if '@' in step: (step,qualifier)=step.split('@')
1766 # or be defined as forced or ignored by default
1767 for keyword in ['_ignore','_force']:
1768 if step.endswith (keyword): step=step.replace(keyword,'')
1769 if step == SEP or step == SEPSFA : continue
1770 method=getattr(TestPlc,step)
1772 wrapped=ignore_result(method)
1773 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1774 setattr(TestPlc, name, wrapped)
1777 # def ssh_slice_again_ignore (self): pass
1779 # def check_initscripts_ignore (self): pass
1781 def standby_1_through_20(self):
1782 """convenience function to wait for a specified number of minutes"""
1785 def standby_1(): pass
1787 def standby_2(): pass
1789 def standby_3(): pass
1791 def standby_4(): pass
1793 def standby_5(): pass
1795 def standby_6(): pass
1797 def standby_7(): pass
1799 def standby_8(): pass
1801 def standby_9(): pass
1803 def standby_10(): pass
1805 def standby_11(): pass
1807 def standby_12(): pass
1809 def standby_13(): pass
1811 def standby_14(): pass
1813 def standby_15(): pass
1815 def standby_16(): pass
1817 def standby_17(): pass
1819 def standby_18(): pass
1821 def standby_19(): pass
1823 def standby_20(): pass
1825 # convenience for debugging the test logic
1826 def yes (self): return True
1827 def no (self): return False