1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__doc__ = slice_method.__doc__
115 def auth_sfa_mapper (method):
118 auth_method = TestAuthSfa.__dict__[method.__name__]
119 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
120 test_auth=TestAuthSfa(self,auth_spec)
121 if not auth_method(test_auth,self.options): overall=False
123 # restore the doc text
124 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
128 def __init__ (self,result):
138 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
139 'plc_install', 'plc_configure', 'plc_start', SEP,
140 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
141 'plcapi_urls','speed_up_slices', SEP,
142 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
143 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
144 # keep this our of the way for now
145 'check_vsys_defaults_ignore', SEP,
146 # run this first off so it's easier to re-run on another qemu box
147 'qemu_kill_mine', SEP,
148 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
149 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
150 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
151 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
152 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
153 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
154 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
155 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
156 # but as the stress test might take a while, we sometimes missed the debug mode..
157 'probe_kvm_iptables',
158 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
159 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
160 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
161 'cross_check_tcp@1', 'check_system_slice', SEP,
162 # check slices are turned off properly
163 'empty_slices', 'ssh_slice_off', SEP,
164 # check they are properly re-created with the same name
165 'fill_slices', 'ssh_slice_again_ignore', SEP,
166 'gather_logs_force', SEP,
169 'export', 'show_boxes', SEP,
170 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
171 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
172 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
173 'delete_leases', 'list_leases', SEP,
175 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
176 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
177 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
178 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
179 'plc_db_dump' , 'plc_db_restore', SEP,
180 'check_netflow','check_drl', SEP,
181 'debug_nodemanager', SEP,
182 'standby_1_through_20','yes','no',SEP,
186 def printable_steps (list):
187 single_line=" ".join(list)+" "
188 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
190 def valid_step (step):
191 return step != SEP and step != SEPSFA
193 # turn off the sfa-related steps when build has skipped SFA
194 # this was originally for centos5 but is still valid
195 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
197 def _has_sfa_cached (rpms_url):
198 if os.path.isfile(has_sfa_cache_filename):
199 cached=file(has_sfa_cache_filename).read()=="yes"
200 utils.header("build provides SFA (cached):%s"%cached)
202 # warning, we're now building 'sface' so let's be a bit more picky
203 # full builds are expected to return with 0 here
204 utils.header ("Checking if build provides SFA package...")
205 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
206 encoded='yes' if retcod else 'no'
207 file(has_sfa_cache_filename,'w').write(encoded)
211 def check_whether_build_has_sfa (rpms_url):
212 has_sfa=TestPlc._has_sfa_cached(rpms_url)
214 utils.header("build does provide SFA")
216 # move all steps containing 'sfa' from default_steps to other_steps
217 utils.header("SFA package not found - removing steps with sfa or sfi")
218 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
219 TestPlc.other_steps += sfa_steps
220 for step in sfa_steps: TestPlc.default_steps.remove(step)
222 def __init__ (self,plc_spec,options):
223 self.plc_spec=plc_spec
225 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
226 self.vserverip=plc_spec['vserverip']
227 self.vservername=plc_spec['vservername']
228 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
229 self.apiserver=TestApiserver(self.url,options.dry_run)
230 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
231 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
233 def has_addresses_api (self):
234 return self.apiserver.has_method('AddIpAddress')
237 name=self.plc_spec['name']
238 return "%s.%s"%(name,self.vservername)
241 return self.plc_spec['host_box']
244 return self.test_ssh.is_local()
246 # define the API methods on this object through xmlrpc
247 # would help, but not strictly necessary
251 def actual_command_in_guest (self,command, backslash=False):
252 raw1=self.host_to_guest(command)
253 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
256 def start_guest (self):
257 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
259 def stop_guest (self):
260 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
262 def run_in_guest (self,command,backslash=False):
263 raw=self.actual_command_in_guest(command,backslash)
264 return utils.system(raw)
266 def run_in_host (self,command):
267 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
269 # backslashing turned out so awful at some point that I've turned off auto-backslashing
270 # see e.g. plc_start esp. the version for f14
271 #command gets run in the plc's vm
272 def host_to_guest(self,command):
273 # f14 still needs some extra help
274 if self.options.fcdistro == 'f14':
275 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
277 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
280 # this /vservers thing is legacy...
281 def vm_root_in_host(self):
282 return "/vservers/%s/"%(self.vservername)
284 def vm_timestamp_path (self):
285 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
287 #start/stop the vserver
288 def start_guest_in_host(self):
289 return "virsh -c lxc:/// start %s"%(self.vservername)
291 def stop_guest_in_host(self):
292 return "virsh -c lxc:/// destroy %s"%(self.vservername)
295 def run_in_guest_piped (self,local,remote):
296 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
298 def yum_check_installed (self, rpms):
299 if isinstance (rpms, list):
301 return self.run_in_guest("rpm -q %s"%rpms)==0
303 # does a yum install in the vs, ignore yum retcod, check with rpm
304 def yum_install (self, rpms):
305 if isinstance (rpms, list):
307 self.run_in_guest("yum -y install %s"%rpms)
308 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
309 self.run_in_guest("yum-complete-transaction -y")
310 return self.yum_check_installed (rpms)
312 def auth_root (self):
313 return {'Username':self.plc_spec['PLC_ROOT_USER'],
314 'AuthMethod':'password',
315 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
316 'Role' : self.plc_spec['role']
318 def locate_site (self,sitename):
319 for site in self.plc_spec['sites']:
320 if site['site_fields']['name'] == sitename:
322 if site['site_fields']['login_base'] == sitename:
324 raise Exception,"Cannot locate site %s"%sitename
326 def locate_node (self,nodename):
327 for site in self.plc_spec['sites']:
328 for node in site['nodes']:
329 if node['name'] == nodename:
331 raise Exception,"Cannot locate node %s"%nodename
333 def locate_hostname (self,hostname):
334 for site in self.plc_spec['sites']:
335 for node in site['nodes']:
336 if node['node_fields']['hostname'] == hostname:
338 raise Exception,"Cannot locate hostname %s"%hostname
340 def locate_key (self,key_name):
341 for key in self.plc_spec['keys']:
342 if key['key_name'] == key_name:
344 raise Exception,"Cannot locate key %s"%key_name
346 def locate_private_key_from_key_names (self, key_names):
347 # locate the first avail. key
349 for key_name in key_names:
350 key_spec=self.locate_key(key_name)
351 test_key=TestKey(self,key_spec)
352 publickey=test_key.publicpath()
353 privatekey=test_key.privatepath()
354 if os.path.isfile(publickey) and os.path.isfile(privatekey):
356 if found: return privatekey
359 def locate_slice (self, slicename):
360 for slice in self.plc_spec['slices']:
361 if slice['slice_fields']['name'] == slicename:
363 raise Exception,"Cannot locate slice %s"%slicename
365 def all_sliver_objs (self):
367 for slice_spec in self.plc_spec['slices']:
368 slicename = slice_spec['slice_fields']['name']
369 for nodename in slice_spec['nodenames']:
370 result.append(self.locate_sliver_obj (nodename,slicename))
373 def locate_sliver_obj (self,nodename,slicename):
374 (site,node) = self.locate_node(nodename)
375 slice = self.locate_slice (slicename)
377 test_site = TestSite (self, site)
378 test_node = TestNode (self, test_site,node)
379 # xxx the slice site is assumed to be the node site - mhh - probably harmless
380 test_slice = TestSlice (self, test_site, slice)
381 return TestSliver (self, test_node, test_slice)
383 def locate_first_node(self):
384 nodename=self.plc_spec['slices'][0]['nodenames'][0]
385 (site,node) = self.locate_node(nodename)
386 test_site = TestSite (self, site)
387 test_node = TestNode (self, test_site,node)
390 def locate_first_sliver (self):
391 slice_spec=self.plc_spec['slices'][0]
392 slicename=slice_spec['slice_fields']['name']
393 nodename=slice_spec['nodenames'][0]
394 return self.locate_sliver_obj(nodename,slicename)
396 # all different hostboxes used in this plc
397 def get_BoxNodes(self):
398 # maps on sites and nodes, return [ (host_box,test_node) ]
400 for site_spec in self.plc_spec['sites']:
401 test_site = TestSite (self,site_spec)
402 for node_spec in site_spec['nodes']:
403 test_node = TestNode (self, test_site, node_spec)
404 if not test_node.is_real():
405 tuples.append( (test_node.host_box(),test_node) )
406 # transform into a dict { 'host_box' -> [ test_node .. ] }
408 for (box,node) in tuples:
409 if not result.has_key(box):
412 result[box].append(node)
415 # a step for checking this stuff
416 def show_boxes (self):
417 'print summary of nodes location'
418 for (box,nodes) in self.get_BoxNodes().iteritems():
419 print box,":"," + ".join( [ node.name() for node in nodes ] )
422 # make this a valid step
423 def qemu_kill_all(self):
424 'kill all qemu instances on the qemu boxes involved by this setup'
425 # this is the brute force version, kill all qemus on that host box
426 for (box,nodes) in self.get_BoxNodes().iteritems():
427 # pass the first nodename, as we don't push template-qemu on testboxes
428 nodedir=nodes[0].nodedir()
429 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
432 # make this a valid step
433 def qemu_list_all(self):
434 'list all qemu instances on the qemu boxes involved by this setup'
435 for (box,nodes) in self.get_BoxNodes().iteritems():
436 # this is the brute force version, kill all qemus on that host box
437 TestBoxQemu(box,self.options.buildname).qemu_list_all()
440 # kill only the qemus related to this test
441 def qemu_list_mine(self):
442 'list qemu instances for our nodes'
443 for (box,nodes) in self.get_BoxNodes().iteritems():
444 # the fine-grain version
449 # kill only the qemus related to this test
450 def qemu_clean_mine(self):
451 'cleanup (rm -rf) qemu instances for our nodes'
452 for (box,nodes) in self.get_BoxNodes().iteritems():
453 # the fine-grain version
458 # kill only the right qemus
459 def qemu_kill_mine(self):
460 'kill the qemu instances for our nodes'
461 for (box,nodes) in self.get_BoxNodes().iteritems():
462 # the fine-grain version
467 #################### display config
469 "show test configuration after localization"
474 # uggly hack to make sure 'run export' only reports about the 1st plc
475 # to avoid confusion - also we use 'inri_slice1' in various aliases..
478 "print cut'n paste-able stuff to export env variables to your shell"
479 # guess local domain from hostname
480 if TestPlc.exported_id>1:
481 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
483 TestPlc.exported_id+=1
484 domain=socket.gethostname().split('.',1)[1]
485 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
486 print "export BUILD=%s"%self.options.buildname
487 print "export PLCHOSTLXC=%s"%fqdn
488 print "export GUESTNAME=%s"%self.plc_spec['vservername']
489 vplcname=self.plc_spec['vservername'].split('-')[-1]
490 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
491 # find hostname of first node
492 (hostname,qemubox) = self.all_node_infos()[0]
493 print "export KVMHOST=%s.%s"%(qemubox,domain)
494 print "export NODE=%s"%(hostname)
498 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
499 def show_pass (self,passno):
500 for (key,val) in self.plc_spec.iteritems():
501 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
505 self.display_site_spec(site)
506 for node in site['nodes']:
507 self.display_node_spec(node)
508 elif key=='initscripts':
509 for initscript in val:
510 self.display_initscript_spec (initscript)
513 self.display_slice_spec (slice)
516 self.display_key_spec (key)
518 if key not in ['sites','initscripts','slices','keys']:
519 print '+ ',key,':',val
521 def display_site_spec (self,site):
522 print '+ ======== site',site['site_fields']['name']
523 for (k,v) in site.iteritems():
524 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
527 print '+ ','nodes : ',
529 print node['node_fields']['hostname'],'',
535 print user['name'],'',
537 elif k == 'site_fields':
538 print '+ login_base',':',v['login_base']
539 elif k == 'address_fields':
545 def display_initscript_spec (self,initscript):
546 print '+ ======== initscript',initscript['initscript_fields']['name']
548 def display_key_spec (self,key):
549 print '+ ======== key',key['key_name']
551 def display_slice_spec (self,slice):
552 print '+ ======== slice',slice['slice_fields']['name']
553 for (k,v) in slice.iteritems():
566 elif k=='slice_fields':
567 print '+ fields',':',
568 print 'max_nodes=',v['max_nodes'],
573 def display_node_spec (self,node):
574 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
575 print "hostname=",node['node_fields']['hostname'],
576 print "ip=",node['interface_fields']['ip']
577 if self.options.verbose:
578 utils.pprint("node details",node,depth=3)
580 # another entry point for just showing the boxes involved
581 def display_mapping (self):
582 TestPlc.display_mapping_plc(self.plc_spec)
586 def display_mapping_plc (plc_spec):
587 print '+ MyPLC',plc_spec['name']
588 # WARNING this would not be right for lxc-based PLC's - should be harmless though
589 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
590 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
591 for site_spec in plc_spec['sites']:
592 for node_spec in site_spec['nodes']:
593 TestPlc.display_mapping_node(node_spec)
596 def display_mapping_node (node_spec):
597 print '+ NODE %s'%(node_spec['name'])
598 print '+\tqemu box %s'%node_spec['host_box']
599 print '+\thostname=%s'%node_spec['node_fields']['hostname']
601 # write a timestamp in /vservers/<>.timestamp
602 # cannot be inside the vserver, that causes vserver .. build to cough
603 def plcvm_timestamp (self):
604 "Create a timestamp to remember creation date for this plc"
606 # TODO-lxc check this one
607 # a first approx. is to store the timestamp close to the VM root like vs does
608 stamp_path=self.vm_timestamp_path ()
609 stamp_dir = os.path.dirname (stamp_path)
610 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
611 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
613 # this is called inconditionnally at the beginning of the test sequence
614 # just in case this is a rerun, so if the vm is not running it's fine
615 def plcvm_delete(self):
616 "vserver delete the test myplc"
617 stamp_path=self.vm_timestamp_path()
618 self.run_in_host("rm -f %s"%stamp_path)
619 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
620 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
621 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
625 # historically the build was being fetched by the tests
626 # now the build pushes itself as a subdir of the tests workdir
627 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
628 def plcvm_create (self):
629 "vserver creation (no install done)"
630 # push the local build/ dir to the testplc box
632 # a full path for the local calls
633 build_dir=os.path.dirname(sys.argv[0])
634 # sometimes this is empty - set to "." in such a case
635 if not build_dir: build_dir="."
636 build_dir += "/build"
638 # use a standard name - will be relative to remote buildname
640 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
641 self.test_ssh.rmdir(build_dir)
642 self.test_ssh.copy(build_dir,recursive=True)
643 # the repo url is taken from arch-rpms-url
644 # with the last step (i386) removed
645 repo_url = self.options.arch_rpms_url
646 for level in [ 'arch' ]:
647 repo_url = os.path.dirname(repo_url)
649 # invoke initvm (drop support for vs)
650 script="lbuild-initvm.sh"
652 # pass the vbuild-nightly options to [lv]test-initvm
653 script_options += " -p %s"%self.options.personality
654 script_options += " -d %s"%self.options.pldistro
655 script_options += " -f %s"%self.options.fcdistro
656 script_options += " -r %s"%repo_url
657 vserver_name = self.vservername
659 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
660 script_options += " -n %s"%vserver_hostname
662 print "Cannot reverse lookup %s"%self.vserverip
663 print "This is considered fatal, as this might pollute the test results"
665 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
666 return self.run_in_host(create_vserver) == 0
669 def plc_install(self):
670 "yum install myplc, noderepo, and the plain bootstrapfs"
672 # workaround for getting pgsql8.2 on centos5
673 if self.options.fcdistro == "centos5":
674 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
677 if self.options.personality == "linux32":
679 elif self.options.personality == "linux64":
682 raise Exception, "Unsupported personality %r"%self.options.personality
683 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
686 pkgs_list.append ("slicerepo-%s"%nodefamily)
687 pkgs_list.append ("myplc")
688 pkgs_list.append ("noderepo-%s"%nodefamily)
689 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
690 pkgs_string=" ".join(pkgs_list)
691 return self.yum_install (pkgs_list)
694 def mod_python(self):
695 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
696 return self.yum_install ( [ 'mod_python' ] )
699 def plc_configure(self):
701 tmpname='%s.plc-config-tty'%(self.name())
702 fileconf=open(tmpname,'w')
703 for var in [ 'PLC_NAME',
708 'PLC_MAIL_SUPPORT_ADDRESS',
711 # Above line was added for integrating SFA Testing
717 'PLC_RESERVATION_GRANULARITY',
719 'PLC_OMF_XMPP_SERVER',
722 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
723 fileconf.write('w\n')
724 fileconf.write('q\n')
726 utils.system('cat %s'%tmpname)
727 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
728 utils.system('rm %s'%tmpname)
731 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
732 # however using a vplc guest under f20 requires this trick
733 # the symptom is this: service plc start
734 # Starting plc (via systemctl): Failed to get D-Bus connection: \
735 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
736 # weird thing is the doc says f14 uses upstart by default and not systemd
737 # so this sounds kind of harmless
738 def start_service (self,service): return self.start_stop_service (service,'start')
739 def stop_service (self,service): return self.start_stop_service (service,'stop')
741 def start_stop_service (self, service,start_or_stop):
742 "utility to start/stop a service with the special trick for f14"
743 if self.options.fcdistro != 'f14':
744 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
746 # patch /sbin/service so it does not reset environment
747 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
748 # this is because our own scripts in turn call service
749 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
753 return self.start_service ('plc')
757 return self.stop_service ('plc')
759 def plcvm_start (self):
760 "start the PLC vserver"
764 def plcvm_stop (self):
765 "stop the PLC vserver"
769 # stores the keys from the config for further use
770 def keys_store(self):
771 "stores test users ssh keys in keys/"
772 for key_spec in self.plc_spec['keys']:
773 TestKey(self,key_spec).store_key()
776 def keys_clean(self):
777 "removes keys cached in keys/"
778 utils.system("rm -rf ./keys")
781 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
782 # for later direct access to the nodes
783 def keys_fetch(self):
784 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
786 if not os.path.isdir(dir):
788 vservername=self.vservername
789 vm_root=self.vm_root_in_host()
791 prefix = 'debug_ssh_key'
792 for ext in [ 'pub', 'rsa' ] :
793 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
794 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
795 if self.test_ssh.fetch(src,dst) != 0: overall=False
799 "create sites with PLCAPI"
800 return self.do_sites()
802 def delete_sites (self):
803 "delete sites with PLCAPI"
804 return self.do_sites(action="delete")
806 def do_sites (self,action="add"):
807 for site_spec in self.plc_spec['sites']:
808 test_site = TestSite (self,site_spec)
809 if (action != "add"):
810 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
811 test_site.delete_site()
812 # deleted with the site
813 #test_site.delete_users()
816 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
817 test_site.create_site()
818 test_site.create_users()
821 def delete_all_sites (self):
822 "Delete all sites in PLC, and related objects"
823 print 'auth_root',self.auth_root()
824 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
826 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
827 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
828 site_id=site['site_id']
829 print 'Deleting site_id',site_id
830 self.apiserver.DeleteSite(self.auth_root(),site_id)
834 "create nodes with PLCAPI"
835 return self.do_nodes()
836 def delete_nodes (self):
837 "delete nodes with PLCAPI"
838 return self.do_nodes(action="delete")
840 def do_nodes (self,action="add"):
841 for site_spec in self.plc_spec['sites']:
842 test_site = TestSite (self,site_spec)
844 utils.header("Deleting nodes in site %s"%test_site.name())
845 for node_spec in site_spec['nodes']:
846 test_node=TestNode(self,test_site,node_spec)
847 utils.header("Deleting %s"%test_node.name())
848 test_node.delete_node()
850 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
851 for node_spec in site_spec['nodes']:
852 utils.pprint('Creating node %s'%node_spec,node_spec)
853 test_node = TestNode (self,test_site,node_spec)
854 test_node.create_node ()
857 def nodegroups (self):
858 "create nodegroups with PLCAPI"
859 return self.do_nodegroups("add")
860 def delete_nodegroups (self):
861 "delete nodegroups with PLCAPI"
862 return self.do_nodegroups("delete")
866 def translate_timestamp (start,grain,timestamp):
867 if timestamp < TestPlc.YEAR: return start+timestamp*grain
868 else: return timestamp
871 def timestamp_printable (timestamp):
872 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
875 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
877 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
878 print 'API answered grain=',grain
879 start=(now/grain)*grain
881 # find out all nodes that are reservable
882 nodes=self.all_reservable_nodenames()
884 utils.header ("No reservable node found - proceeding without leases")
887 # attach them to the leases as specified in plc_specs
888 # this is where the 'leases' field gets interpreted as relative of absolute
889 for lease_spec in self.plc_spec['leases']:
890 # skip the ones that come with a null slice id
891 if not lease_spec['slice']: continue
892 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
893 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
894 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
895 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
896 if lease_addition['errors']:
897 utils.header("Cannot create leases, %s"%lease_addition['errors'])
900 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
901 (nodes,lease_spec['slice'],
902 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
903 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
907 def delete_leases (self):
908 "remove all leases in the myplc side"
909 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
910 utils.header("Cleaning leases %r"%lease_ids)
911 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
914 def list_leases (self):
915 "list all leases known to the myplc"
916 leases = self.apiserver.GetLeases(self.auth_root())
919 current=l['t_until']>=now
920 if self.options.verbose or current:
921 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
922 TestPlc.timestamp_printable(l['t_from']),
923 TestPlc.timestamp_printable(l['t_until'])))
926 # create nodegroups if needed, and populate
927 def do_nodegroups (self, action="add"):
928 # 1st pass to scan contents
930 for site_spec in self.plc_spec['sites']:
931 test_site = TestSite (self,site_spec)
932 for node_spec in site_spec['nodes']:
933 test_node=TestNode (self,test_site,node_spec)
934 if node_spec.has_key('nodegroups'):
935 nodegroupnames=node_spec['nodegroups']
936 if isinstance(nodegroupnames,StringTypes):
937 nodegroupnames = [ nodegroupnames ]
938 for nodegroupname in nodegroupnames:
939 if not groups_dict.has_key(nodegroupname):
940 groups_dict[nodegroupname]=[]
941 groups_dict[nodegroupname].append(test_node.name())
942 auth=self.auth_root()
944 for (nodegroupname,group_nodes) in groups_dict.iteritems():
946 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
947 # first, check if the nodetagtype is here
948 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
950 tag_type_id = tag_types[0]['tag_type_id']
952 tag_type_id = self.apiserver.AddTagType(auth,
953 {'tagname':nodegroupname,
954 'description': 'for nodegroup %s'%nodegroupname,
956 print 'located tag (type)',nodegroupname,'as',tag_type_id
958 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
960 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
961 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
962 # set node tag on all nodes, value='yes'
963 for nodename in group_nodes:
965 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
967 traceback.print_exc()
968 print 'node',nodename,'seems to already have tag',nodegroupname
971 expect_yes = self.apiserver.GetNodeTags(auth,
972 {'hostname':nodename,
973 'tagname':nodegroupname},
974 ['value'])[0]['value']
975 if expect_yes != "yes":
976 print 'Mismatch node tag on node',nodename,'got',expect_yes
979 if not self.options.dry_run:
980 print 'Cannot find tag',nodegroupname,'on node',nodename
984 print 'cleaning nodegroup',nodegroupname
985 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
987 traceback.print_exc()
991 # a list of TestNode objs
992 def all_nodes (self):
994 for site_spec in self.plc_spec['sites']:
995 test_site = TestSite (self,site_spec)
996 for node_spec in site_spec['nodes']:
997 nodes.append(TestNode (self,test_site,node_spec))
1000 # return a list of tuples (nodename,qemuname)
1001 def all_node_infos (self) :
1003 for site_spec in self.plc_spec['sites']:
1004 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
1005 for node_spec in site_spec['nodes'] ]
1008 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
1009 def all_reservable_nodenames (self):
1011 for site_spec in self.plc_spec['sites']:
1012 for node_spec in site_spec['nodes']:
1013 node_fields=node_spec['node_fields']
1014 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1015 res.append(node_fields['hostname'])
1018 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1019 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1020 if self.options.dry_run:
1024 class CompleterTaskBootState (CompleterTask):
1025 def __init__ (self, test_plc,hostname):
1026 self.test_plc=test_plc
1027 self.hostname=hostname
1028 self.last_boot_state='undef'
1029 def actual_run (self):
1031 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1033 self.last_boot_state = node['boot_state']
1034 return self.last_boot_state == target_boot_state
1038 return "CompleterTaskBootState with node %s"%self.hostname
1039 def failure_message (self):
1040 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1042 timeout = timedelta(minutes=timeout_minutes)
1043 graceout = timedelta(minutes=silent_minutes)
1044 period = timedelta(seconds=period_seconds)
1045 # the nodes that haven't checked yet - start with a full list and shrink over time
1046 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1047 tasks = [ CompleterTaskBootState (self,hostname) \
1048 for (hostname,_) in self.all_node_infos() ]
1049 return Completer (tasks).run (timeout, graceout, period)
1051 def nodes_booted(self):
1052 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1054 def probe_kvm_iptables (self):
1055 (_,kvmbox) = self.all_node_infos()[0]
1056 TestSsh(kvmbox).run("iptables-save")
1060 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1061 class CompleterTaskPingNode (CompleterTask):
1062 def __init__ (self, hostname):
1063 self.hostname=hostname
1064 def run(self,silent):
1065 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1066 return utils.system (command, silent=silent)==0
1067 def failure_message (self):
1068 return "Cannot ping node with name %s"%self.hostname
1069 timeout=timedelta (seconds=timeout_seconds)
1071 period=timedelta (seconds=period_seconds)
1072 node_infos = self.all_node_infos()
1073 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1074 return Completer (tasks).run (timeout, graceout, period)
1076 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1077 def ping_node (self):
1079 return self.check_nodes_ping ()
1081 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1082 class CompleterTaskNodeSsh (CompleterTask):
1083 def __init__ (self, hostname, qemuname, boot_state, local_key):
1084 self.hostname=hostname
1085 self.qemuname=qemuname
1086 self.boot_state=boot_state
1087 self.local_key=local_key
1088 def run (self, silent):
1089 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1090 return utils.system (command, silent=silent)==0
1091 def failure_message (self):
1092 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1095 timeout = timedelta(minutes=timeout_minutes)
1096 graceout = timedelta(minutes=silent_minutes)
1097 period = timedelta(seconds=period_seconds)
1098 vservername=self.vservername
1101 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1104 local_key = "keys/key_admin.rsa"
1105 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1106 node_infos = self.all_node_infos()
1107 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1108 for (nodename,qemuname) in node_infos ]
1109 return Completer (tasks).run (timeout, graceout, period)
1111 def ssh_node_debug(self):
1112 "Tries to ssh into nodes in debug mode with the debug ssh key"
1113 return self.check_nodes_ssh(debug=True,
1114 timeout_minutes=self.ssh_node_debug_timeout,
1115 silent_minutes=self.ssh_node_debug_silent)
1117 def ssh_node_boot(self):
1118 "Tries to ssh into nodes in production mode with the root ssh key"
1119 return self.check_nodes_ssh(debug=False,
1120 timeout_minutes=self.ssh_node_boot_timeout,
1121 silent_minutes=self.ssh_node_boot_silent)
1123 def node_bmlogs(self):
1124 "Checks that there's a non-empty dir. /var/log/bm/raw"
1125 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1128 def qemu_local_init (self): pass
1130 def bootcd (self): pass
1132 def qemu_local_config (self): pass
1134 def nodestate_reinstall (self): pass
1136 def nodestate_safeboot (self): pass
1138 def nodestate_boot (self): pass
1140 def nodestate_show (self): pass
1142 def qemu_export (self): pass
1144 ### check hooks : invoke scripts from hooks/{node,slice}
1145 def check_hooks_node (self):
1146 return self.locate_first_node().check_hooks()
1147 def check_hooks_sliver (self) :
1148 return self.locate_first_sliver().check_hooks()
1150 def check_hooks (self):
1151 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1152 return self.check_hooks_node() and self.check_hooks_sliver()
1155 def do_check_initscripts(self):
1156 class CompleterTaskInitscript (CompleterTask):
1157 def __init__ (self, test_sliver, stamp):
1158 self.test_sliver=test_sliver
1160 def actual_run (self):
1161 return self.test_sliver.check_initscript_stamp (self.stamp)
1163 return "initscript checker for %s"%self.test_sliver.name()
1164 def failure_message (self):
1165 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1168 for slice_spec in self.plc_spec['slices']:
1169 if not slice_spec.has_key('initscriptstamp'):
1171 stamp=slice_spec['initscriptstamp']
1172 slicename=slice_spec['slice_fields']['name']
1173 for nodename in slice_spec['nodenames']:
1174 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1175 (site,node) = self.locate_node (nodename)
1176 # xxx - passing the wrong site - probably harmless
1177 test_site = TestSite (self,site)
1178 test_slice = TestSlice (self,test_site,slice_spec)
1179 test_node = TestNode (self,test_site,node)
1180 test_sliver = TestSliver (self, test_node, test_slice)
1181 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1182 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1184 def check_initscripts(self):
1185 "check that the initscripts have triggered"
1186 return self.do_check_initscripts()
1188 def initscripts (self):
1189 "create initscripts with PLCAPI"
1190 for initscript in self.plc_spec['initscripts']:
1191 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1192 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1195 def delete_initscripts (self):
1196 "delete initscripts with PLCAPI"
1197 for initscript in self.plc_spec['initscripts']:
1198 initscript_name = initscript['initscript_fields']['name']
1199 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1201 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1202 print initscript_name,'deleted'
1204 print 'deletion went wrong - probably did not exist'
1209 "create slices with PLCAPI"
1210 return self.do_slices(action="add")
1212 def delete_slices (self):
1213 "delete slices with PLCAPI"
1214 return self.do_slices(action="delete")
1216 def fill_slices (self):
1217 "add nodes in slices with PLCAPI"
1218 return self.do_slices(action="fill")
1220 def empty_slices (self):
1221 "remove nodes from slices with PLCAPI"
1222 return self.do_slices(action="empty")
1224 def do_slices (self, action="add"):
1225 for slice in self.plc_spec['slices']:
1226 site_spec = self.locate_site (slice['sitename'])
1227 test_site = TestSite(self,site_spec)
1228 test_slice=TestSlice(self,test_site,slice)
1229 if action == "delete":
1230 test_slice.delete_slice()
1231 elif action=="fill":
1232 test_slice.add_nodes()
1233 elif action=="empty":
1234 test_slice.delete_nodes()
1236 test_slice.create_slice()
1239 @slice_mapper__tasks(20,10,15)
1240 def ssh_slice(self): pass
1241 @slice_mapper__tasks(20,19,15)
1242 def ssh_slice_off (self): pass
1244 # use another name so we can exclude/ignore it from the tests on the nightly command line
1245 def ssh_slice_again(self): return self.ssh_slice()
1246 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1247 # but for some reason the ignore-wrapping thing would not
1250 def ssh_slice_basics(self): pass
1253 def check_vsys_defaults(self): pass
1256 def keys_clear_known_hosts (self): pass
1258 def plcapi_urls (self):
1259 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1261 def speed_up_slices (self):
1262 "tweak nodemanager settings on all nodes using a conf file"
1263 # create the template on the server-side
1264 template="%s.nodemanager"%self.name()
1265 template_file = open (template,"w")
1266 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1267 template_file.close()
1268 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1269 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1270 self.test_ssh.copy_abs(template,remote)
1272 self.apiserver.AddConfFile (self.auth_root(),
1273 {'dest':'/etc/sysconfig/nodemanager',
1274 'source':'PlanetLabConf/nodemanager',
1275 'postinstall_cmd':'service nm restart',})
1278 def debug_nodemanager (self):
1279 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1280 template="%s.nodemanager"%self.name()
1281 template_file = open (template,"w")
1282 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1283 template_file.close()
1284 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1285 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1286 self.test_ssh.copy_abs(template,remote)
1290 def qemu_start (self) : pass
1293 def qemu_timestamp (self) : pass
1295 # when a spec refers to a node possibly on another plc
1296 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1297 for plc in [ self ] + other_plcs:
1299 return plc.locate_sliver_obj (nodename, slicename)
1302 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1304 # implement this one as a cross step so that we can take advantage of different nodes
1305 # in multi-plcs mode
1306 def cross_check_tcp (self, other_plcs):
1307 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1308 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1309 utils.header ("check_tcp: no/empty config found")
1311 specs = self.plc_spec['tcp_specs']
1316 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1317 if not s_test_sliver.run_tcp_server(port,timeout=20):
1321 # idem for the client side
1322 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1323 # use nodename from locatesd sliver, unless 'client_connect' is set
1324 if 'client_connect' in spec:
1325 destination = spec['client_connect']
1327 destination=s_test_sliver.test_node.name()
1328 if not c_test_sliver.run_tcp_client(destination,port):
1332 # painfully enough, we need to allow for some time as netflow might show up last
1333 def check_system_slice (self):
1334 "all nodes: check that a system slice is alive"
1335 # netflow currently not working in the lxc distro
1336 # drl not built at all in the wtx distro
1337 # if we find either of them we're happy
1338 return self.check_netflow() or self.check_drl()
1341 def check_netflow (self): return self._check_system_slice ('netflow')
1342 def check_drl (self): return self._check_system_slice ('drl')
1344 # we have the slices up already here, so it should not take too long
1345 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1346 class CompleterTaskSystemSlice (CompleterTask):
1347 def __init__ (self, test_node, dry_run):
1348 self.test_node=test_node
1349 self.dry_run=dry_run
1350 def actual_run (self):
1351 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1353 return "System slice %s @ %s"%(slicename, self.test_node.name())
1354 def failure_message (self):
1355 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1356 timeout = timedelta(minutes=timeout_minutes)
1357 silent = timedelta (0)
1358 period = timedelta (seconds=period_seconds)
1359 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1360 for test_node in self.all_nodes() ]
1361 return Completer (tasks) . run (timeout, silent, period)
1363 def plcsh_stress_test (self):
1364 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1365 # install the stress-test in the plc image
1366 location = "/usr/share/plc_api/plcsh_stress_test.py"
1367 remote="%s/%s"%(self.vm_root_in_host(),location)
1368 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1370 command += " -- --check"
1371 if self.options.size == 1:
1372 command += " --tiny"
1373 return ( self.run_in_guest(command) == 0)
1375 # populate runs the same utility without slightly different options
1376 # in particular runs with --preserve (dont cleanup) and without --check
1377 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1379 def sfa_install_all (self):
1380 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1381 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1383 def sfa_install_core(self):
1385 return self.yum_install ("sfa")
1387 def sfa_install_plc(self):
1388 "yum install sfa-plc"
1389 return self.yum_install("sfa-plc")
1391 def sfa_install_sfatables(self):
1392 "yum install sfa-sfatables"
1393 return self.yum_install ("sfa-sfatables")
1395 # for some very odd reason, this sometimes fails with the following symptom
1396 # # yum install sfa-client
1397 # Setting up Install Process
1399 # Downloading Packages:
1400 # Running rpm_check_debug
1401 # Running Transaction Test
1402 # Transaction Test Succeeded
1403 # Running Transaction
1404 # Transaction couldn't start:
1405 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1406 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1407 # even though in the same context I have
1408 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1409 # Filesystem Size Used Avail Use% Mounted on
1410 # /dev/hdv1 806G 264G 501G 35% /
1411 # none 16M 36K 16M 1% /tmp
1413 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1414 def sfa_install_client(self):
1415 "yum install sfa-client"
1416 first_try=self.yum_install("sfa-client")
1417 if first_try: return True
1418 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1419 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1420 utils.header("rpm_path=<<%s>>"%rpm_path)
1422 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1423 return self.yum_check_installed ("sfa-client")
1425 def sfa_dbclean(self):
1426 "thoroughly wipes off the SFA database"
1427 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1428 self.run_in_guest("sfa-nuke.py")==0 or \
1429 self.run_in_guest("sfa-nuke-plc.py")==0
1431 def sfa_fsclean(self):
1432 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1433 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1436 def sfa_plcclean(self):
1437 "cleans the PLC entries that were created as a side effect of running the script"
1439 sfa_spec=self.plc_spec['sfa']
1441 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1442 login_base=auth_sfa_spec['login_base']
1443 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1444 except: print "Site %s already absent from PLC db"%login_base
1446 for spec_name in ['pi_spec','user_spec']:
1447 user_spec=auth_sfa_spec[spec_name]
1448 username=user_spec['email']
1449 try: self.apiserver.DeletePerson(self.auth_root(),username)
1451 # this in fact is expected as sites delete their members
1452 #print "User %s already absent from PLC db"%username
1455 print "REMEMBER TO RUN sfa_import AGAIN"
1458 def sfa_uninstall(self):
1459 "uses rpm to uninstall sfa - ignore result"
1460 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1461 self.run_in_guest("rm -rf /var/lib/sfa")
1462 self.run_in_guest("rm -rf /etc/sfa")
1463 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1465 self.run_in_guest("rpm -e --noscripts sfa-plc")
1468 ### run unit tests for SFA
1469 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1470 # Running Transaction
1471 # Transaction couldn't start:
1472 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1473 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1474 # no matter how many Gbs are available on the testplc
1475 # could not figure out what's wrong, so...
1476 # if the yum install phase fails, consider the test is successful
1477 # other combinations will eventually run it hopefully
1478 def sfa_utest(self):
1479 "yum install sfa-tests and run SFA unittests"
1480 self.run_in_guest("yum -y install sfa-tests")
1481 # failed to install - forget it
1482 if self.run_in_guest("rpm -q sfa-tests")!=0:
1483 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1485 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1489 dirname="conf.%s"%self.plc_spec['name']
1490 if not os.path.isdir(dirname):
1491 utils.system("mkdir -p %s"%dirname)
1492 if not os.path.isdir(dirname):
1493 raise Exception,"Cannot create config dir for plc %s"%self.name()
1496 def conffile(self,filename):
1497 return "%s/%s"%(self.confdir(),filename)
1498 def confsubdir(self,dirname,clean,dry_run=False):
1499 subdirname="%s/%s"%(self.confdir(),dirname)
1501 utils.system("rm -rf %s"%subdirname)
1502 if not os.path.isdir(subdirname):
1503 utils.system("mkdir -p %s"%subdirname)
1504 if not dry_run and not os.path.isdir(subdirname):
1505 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1508 def conffile_clean (self,filename):
1509 filename=self.conffile(filename)
1510 return utils.system("rm -rf %s"%filename)==0
1513 def sfa_configure(self):
1514 "run sfa-config-tty"
1515 tmpname=self.conffile("sfa-config-tty")
1516 fileconf=open(tmpname,'w')
1517 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1518 'SFA_INTERFACE_HRN',
1519 'SFA_REGISTRY_LEVEL1_AUTH',
1520 'SFA_REGISTRY_HOST',
1521 'SFA_AGGREGATE_HOST',
1531 'SFA_GENERIC_FLAVOUR',
1532 'SFA_AGGREGATE_ENABLED',
1534 if self.plc_spec['sfa'].has_key(var):
1535 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1536 # the way plc_config handles booleans just sucks..
1539 if self.plc_spec['sfa'][var]: val='true'
1540 fileconf.write ('e %s\n%s\n'%(var,val))
1541 fileconf.write('w\n')
1542 fileconf.write('R\n')
1543 fileconf.write('q\n')
1545 utils.system('cat %s'%tmpname)
1546 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1549 def aggregate_xml_line(self):
1550 port=self.plc_spec['sfa']['neighbours-port']
1551 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1552 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1554 def registry_xml_line(self):
1555 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1556 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1559 # a cross step that takes all other plcs in argument
1560 def cross_sfa_configure(self, other_plcs):
1561 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1562 # of course with a single plc, other_plcs is an empty list
1565 agg_fname=self.conffile("agg.xml")
1566 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1567 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1568 utils.header ("(Over)wrote %s"%agg_fname)
1569 reg_fname=self.conffile("reg.xml")
1570 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1571 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1572 utils.header ("(Over)wrote %s"%reg_fname)
1573 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1574 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1576 def sfa_import(self):
1577 "use sfaadmin to import from plc"
1578 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1579 return self.run_in_guest('sfaadmin reg import_registry')==0
1581 def sfa_start(self):
1583 return self.start_service('sfa')
1586 def sfi_configure(self):
1587 "Create /root/sfi on the plc side for sfi client configuration"
1588 if self.options.dry_run:
1589 utils.header("DRY RUN - skipping step")
1591 sfa_spec=self.plc_spec['sfa']
1592 # cannot use auth_sfa_mapper to pass dir_name
1593 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1594 test_slice=TestAuthSfa(self,slice_spec)
1595 dir_basename=os.path.basename(test_slice.sfi_path())
1596 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1597 test_slice.sfi_configure(dir_name)
1598 # push into the remote /root/sfi area
1599 location = test_slice.sfi_path()
1600 remote="%s/%s"%(self.vm_root_in_host(),location)
1601 self.test_ssh.mkdir(remote,abs=True)
1602 # need to strip last level or remote otherwise we get an extra dir level
1603 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1607 def sfi_clean (self):
1608 "clean up /root/sfi on the plc side"
1609 self.run_in_guest("rm -rf /root/sfi")
1613 def sfa_add_site (self): pass
1615 def sfa_add_pi (self): pass
1617 def sfa_add_user(self): pass
1619 def sfa_update_user(self): pass
1621 def sfa_add_slice(self): pass
1623 def sfa_renew_slice(self): pass
1625 def sfa_discover(self): pass
1627 def sfa_create_slice(self): pass
1629 def sfa_check_slice_plc(self): pass
1631 def sfa_update_slice(self): pass
1633 def sfi_list(self): pass
1635 def sfi_show(self): pass
1637 def ssh_slice_sfa(self): pass
1639 def sfa_delete_user(self): pass
1641 def sfa_delete_slice(self): pass
1645 return self.stop_service ('sfa')
1647 def populate (self):
1648 "creates random entries in the PLCAPI"
1649 # install the stress-test in the plc image
1650 location = "/usr/share/plc_api/plcsh_stress_test.py"
1651 remote="%s/%s"%(self.vm_root_in_host(),location)
1652 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1654 command += " -- --preserve --short-names"
1655 local = (self.run_in_guest(command) == 0);
1656 # second run with --foreign
1657 command += ' --foreign'
1658 remote = (self.run_in_guest(command) == 0);
1659 return ( local and remote)
1661 def gather_logs (self):
1662 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1663 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1664 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1665 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1666 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1667 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1668 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1670 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1671 self.gather_var_logs ()
1673 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1674 self.gather_pgsql_logs ()
1676 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1677 self.gather_root_sfi ()
1679 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1680 for site_spec in self.plc_spec['sites']:
1681 test_site = TestSite (self,site_spec)
1682 for node_spec in site_spec['nodes']:
1683 test_node=TestNode(self,test_site,node_spec)
1684 test_node.gather_qemu_logs()
1686 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1687 self.gather_nodes_var_logs()
1689 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1690 self.gather_slivers_var_logs()
1693 def gather_slivers_var_logs(self):
1694 for test_sliver in self.all_sliver_objs():
1695 remote = test_sliver.tar_var_logs()
1696 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1697 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1698 utils.system(command)
1701 def gather_var_logs (self):
1702 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1703 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1704 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1705 utils.system(command)
1706 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1707 utils.system(command)
1709 def gather_pgsql_logs (self):
1710 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1711 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1712 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1713 utils.system(command)
1715 def gather_root_sfi (self):
1716 utils.system("mkdir -p logs/sfi.%s"%self.name())
1717 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1718 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1719 utils.system(command)
1721 def gather_nodes_var_logs (self):
1722 for site_spec in self.plc_spec['sites']:
1723 test_site = TestSite (self,site_spec)
1724 for node_spec in site_spec['nodes']:
1725 test_node=TestNode(self,test_site,node_spec)
1726 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1727 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1728 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1729 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1730 utils.system(command)
1733 # returns the filename to use for sql dump/restore, using options.dbname if set
1734 def dbfile (self, database):
1735 # uses options.dbname if it is found
1737 name=self.options.dbname
1738 if not isinstance(name,StringTypes):
1744 return "/root/%s-%s.sql"%(database,name)
1746 def plc_db_dump(self):
1747 'dump the planetlab5 DB in /root in the PLC - filename has time'
1748 dump=self.dbfile("planetab5")
1749 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1750 utils.header('Dumped planetlab5 database in %s'%dump)
1753 def plc_db_restore(self):
1754 'restore the planetlab5 DB - looks broken, but run -n might help'
1755 dump=self.dbfile("planetab5")
1756 ##stop httpd service
1757 self.run_in_guest('service httpd stop')
1758 # xxx - need another wrapper
1759 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1760 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1761 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1762 ##starting httpd service
1763 self.run_in_guest('service httpd start')
1765 utils.header('Database restored from ' + dump)
1768 def create_ignore_steps ():
1769 for step in TestPlc.default_steps + TestPlc.other_steps:
1770 # default step can have a plc qualifier
1771 if '@' in step: (step,qualifier)=step.split('@')
1772 # or be defined as forced or ignored by default
1773 for keyword in ['_ignore','_force']:
1774 if step.endswith (keyword): step=step.replace(keyword,'')
1775 if step == SEP or step == SEPSFA : continue
1776 method=getattr(TestPlc,step)
1778 wrapped=ignore_result(method)
1779 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1780 setattr(TestPlc, name, wrapped)
1783 # def ssh_slice_again_ignore (self): pass
1785 # def check_initscripts_ignore (self): pass
1787 def standby_1_through_20(self):
1788 """convenience function to wait for a specified number of minutes"""
1791 def standby_1(): pass
1793 def standby_2(): pass
1795 def standby_3(): pass
1797 def standby_4(): pass
1799 def standby_5(): pass
1801 def standby_6(): pass
1803 def standby_7(): pass
1805 def standby_8(): pass
1807 def standby_9(): pass
1809 def standby_10(): pass
1811 def standby_11(): pass
1813 def standby_12(): pass
1815 def standby_13(): pass
1817 def standby_14(): pass
1819 def standby_15(): pass
1821 def standby_16(): pass
1823 def standby_17(): pass
1825 def standby_18(): pass
1827 def standby_19(): pass
1829 def standby_20(): pass
1831 # convenience for debugging the test logic
1832 def yes (self): return True
1833 def no (self): return False