1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def map_on_nodes(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # maintain __name__ for ignore_result
50 map_on_nodes.__name__=method.__name__
51 # restore the doc text
52 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
55 def slice_mapper (method):
56 def map_on_slices(self):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # maintain __name__ for ignore_result
66 map_on_slices.__name__=method.__name__
67 # restore the doc text
68 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
71 # run a step but return True so that we can go on
72 def ignore_result (method):
74 # ssh_slice_ignore->ssh_slice
75 ref_name=method.__name__.replace('_ignore','').replace('force_','')
76 ref_method=TestPlc.__dict__[ref_name]
77 result=ref_method(self)
78 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
79 return Ignored (result)
80 name=method.__name__.replace('_ignore','').replace('force_','')
81 ignoring.__name__=name
82 ignoring.__doc__="ignored version of " + name
85 # a variant that expects the TestSlice method to return a list of CompleterTasks that
86 # are then merged into a single Completer run to avoid wating for all the slices
87 # esp. useful when a test fails of course
88 # because we need to pass arguments we use a class instead..
89 class slice_mapper__tasks (object):
90 # could not get this to work with named arguments
91 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
92 self.timeout=timedelta(minutes=timeout_minutes)
93 self.silent=timedelta(minutes=silent_minutes)
94 self.period=timedelta(seconds=period_seconds)
95 def __call__ (self, method):
97 # compute augmented method name
98 method_name = method.__name__ + "__tasks"
100 slice_method = TestSlice.__dict__[ method_name ]
103 for slice_spec in self.plc_spec['slices']:
104 site_spec = self.locate_site (slice_spec['sitename'])
105 test_site = TestSite(self,site_spec)
106 test_slice=TestSlice(self,test_site,slice_spec)
107 tasks += slice_method (test_slice, self.options)
108 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
109 # restore the doc text from the TestSlice method even if a bit odd
110 wrappee.__doc__ = slice_method.__doc__
113 def auth_sfa_mapper (method):
116 auth_method = TestAuthSfa.__dict__[method.__name__]
117 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
118 test_auth=TestAuthSfa(self,auth_spec)
119 if not auth_method(test_auth,self.options): overall=False
121 # restore the doc text
122 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
126 def __init__ (self,result):
136 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
137 'plc_install', 'plc_configure', 'plc_start', SEP,
138 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
139 'plcapi_urls','speed_up_slices', SEP,
140 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
141 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
142 # keep this our of the way for now
143 'check_vsys_defaults_ignore', SEP,
144 # run this first off so it's easier to re-run on another qemu box
145 'qemu_kill_mine', SEP,
146 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
147 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
148 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
149 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
150 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
151 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
152 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
153 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
154 # but as the stress test might take a while, we sometimes missed the debug mode..
155 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
156 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
157 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
158 'cross_check_tcp@1', 'check_system_slice', SEP,
159 # check slices are turned off properly
160 'empty_slices', 'ssh_slice_off', SEP,
161 # check they are properly re-created with the same name
162 'fill_slices', 'ssh_slice_again_ignore', SEP,
163 'gather_logs_force', SEP,
166 'export', 'show_boxes', SEP,
167 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
168 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
169 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
170 'delete_leases', 'list_leases', SEP,
172 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
173 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
174 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
175 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
176 'plc_db_dump' , 'plc_db_restore', SEP,
177 'check_netflow','check_drl', SEP,
178 'debug_nodemanager', SEP,
179 'standby_1_through_20','yes','no',SEP,
183 def printable_steps (list):
184 single_line=" ".join(list)+" "
185 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
187 def valid_step (step):
188 return step != SEP and step != SEPSFA
190 # turn off the sfa-related steps when build has skipped SFA
191 # this was originally for centos5 but is still valid
192 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
194 def check_whether_build_has_sfa (rpms_url):
195 utils.header ("Checking if build provides SFA package...")
196 # warning, we're now building 'sface' so let's be a bit more picky
197 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
198 # full builds are expected to return with 0 here
200 utils.header("build does provide SFA")
202 # move all steps containing 'sfa' from default_steps to other_steps
203 utils.header("SFA package not found - removing steps with sfa or sfi")
204 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
205 TestPlc.other_steps += sfa_steps
206 for step in sfa_steps: TestPlc.default_steps.remove(step)
208 def __init__ (self,plc_spec,options):
209 self.plc_spec=plc_spec
211 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
212 self.vserverip=plc_spec['vserverip']
213 self.vservername=plc_spec['vservername']
214 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
215 self.apiserver=TestApiserver(self.url,options.dry_run)
216 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
217 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
219 def has_addresses_api (self):
220 return self.apiserver.has_method('AddIpAddress')
223 name=self.plc_spec['name']
224 return "%s.%s"%(name,self.vservername)
227 return self.plc_spec['host_box']
230 return self.test_ssh.is_local()
232 # define the API methods on this object through xmlrpc
233 # would help, but not strictly necessary
237 def actual_command_in_guest (self,command, backslash=False):
238 raw1=self.host_to_guest(command)
239 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
242 def start_guest (self):
243 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
245 def stop_guest (self):
246 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
248 def run_in_guest (self,command,backslash=False):
249 raw=self.actual_command_in_guest(command,backslash)
250 return utils.system(raw)
252 def run_in_host (self,command):
253 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
255 # backslashing turned out so awful at some point that I've turned off auto-backslashing
256 # see e.g. plc_start esp. the version for f14
257 #command gets run in the plc's vm
258 def host_to_guest(self,command):
259 # f14 still needs some extra help
260 if self.options.fcdistro == 'f14':
261 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
263 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
266 # this /vservers thing is legacy...
267 def vm_root_in_host(self):
268 return "/vservers/%s/"%(self.vservername)
270 def vm_timestamp_path (self):
271 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
273 #start/stop the vserver
274 def start_guest_in_host(self):
275 return "virsh -c lxc:/// start %s"%(self.vservername)
277 def stop_guest_in_host(self):
278 return "virsh -c lxc:/// destroy %s"%(self.vservername)
281 def run_in_guest_piped (self,local,remote):
282 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
284 def yum_check_installed (self, rpms):
285 if isinstance (rpms, list):
287 return self.run_in_guest("rpm -q %s"%rpms)==0
289 # does a yum install in the vs, ignore yum retcod, check with rpm
290 def yum_install (self, rpms):
291 if isinstance (rpms, list):
293 self.run_in_guest("yum -y install %s"%rpms)
294 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
295 self.run_in_guest("yum-complete-transaction -y")
296 return self.yum_check_installed (rpms)
298 def auth_root (self):
299 return {'Username':self.plc_spec['PLC_ROOT_USER'],
300 'AuthMethod':'password',
301 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
302 'Role' : self.plc_spec['role']
304 def locate_site (self,sitename):
305 for site in self.plc_spec['sites']:
306 if site['site_fields']['name'] == sitename:
308 if site['site_fields']['login_base'] == sitename:
310 raise Exception,"Cannot locate site %s"%sitename
312 def locate_node (self,nodename):
313 for site in self.plc_spec['sites']:
314 for node in site['nodes']:
315 if node['name'] == nodename:
317 raise Exception,"Cannot locate node %s"%nodename
319 def locate_hostname (self,hostname):
320 for site in self.plc_spec['sites']:
321 for node in site['nodes']:
322 if node['node_fields']['hostname'] == hostname:
324 raise Exception,"Cannot locate hostname %s"%hostname
326 def locate_key (self,key_name):
327 for key in self.plc_spec['keys']:
328 if key['key_name'] == key_name:
330 raise Exception,"Cannot locate key %s"%key_name
332 def locate_private_key_from_key_names (self, key_names):
333 # locate the first avail. key
335 for key_name in key_names:
336 key_spec=self.locate_key(key_name)
337 test_key=TestKey(self,key_spec)
338 publickey=test_key.publicpath()
339 privatekey=test_key.privatepath()
340 if os.path.isfile(publickey) and os.path.isfile(privatekey):
342 if found: return privatekey
345 def locate_slice (self, slicename):
346 for slice in self.plc_spec['slices']:
347 if slice['slice_fields']['name'] == slicename:
349 raise Exception,"Cannot locate slice %s"%slicename
351 def all_sliver_objs (self):
353 for slice_spec in self.plc_spec['slices']:
354 slicename = slice_spec['slice_fields']['name']
355 for nodename in slice_spec['nodenames']:
356 result.append(self.locate_sliver_obj (nodename,slicename))
359 def locate_sliver_obj (self,nodename,slicename):
360 (site,node) = self.locate_node(nodename)
361 slice = self.locate_slice (slicename)
363 test_site = TestSite (self, site)
364 test_node = TestNode (self, test_site,node)
365 # xxx the slice site is assumed to be the node site - mhh - probably harmless
366 test_slice = TestSlice (self, test_site, slice)
367 return TestSliver (self, test_node, test_slice)
369 def locate_first_node(self):
370 nodename=self.plc_spec['slices'][0]['nodenames'][0]
371 (site,node) = self.locate_node(nodename)
372 test_site = TestSite (self, site)
373 test_node = TestNode (self, test_site,node)
376 def locate_first_sliver (self):
377 slice_spec=self.plc_spec['slices'][0]
378 slicename=slice_spec['slice_fields']['name']
379 nodename=slice_spec['nodenames'][0]
380 return self.locate_sliver_obj(nodename,slicename)
382 # all different hostboxes used in this plc
383 def get_BoxNodes(self):
384 # maps on sites and nodes, return [ (host_box,test_node) ]
386 for site_spec in self.plc_spec['sites']:
387 test_site = TestSite (self,site_spec)
388 for node_spec in site_spec['nodes']:
389 test_node = TestNode (self, test_site, node_spec)
390 if not test_node.is_real():
391 tuples.append( (test_node.host_box(),test_node) )
392 # transform into a dict { 'host_box' -> [ test_node .. ] }
394 for (box,node) in tuples:
395 if not result.has_key(box):
398 result[box].append(node)
401 # a step for checking this stuff
402 def show_boxes (self):
403 'print summary of nodes location'
404 for (box,nodes) in self.get_BoxNodes().iteritems():
405 print box,":"," + ".join( [ node.name() for node in nodes ] )
408 # make this a valid step
409 def qemu_kill_all(self):
410 'kill all qemu instances on the qemu boxes involved by this setup'
411 # this is the brute force version, kill all qemus on that host box
412 for (box,nodes) in self.get_BoxNodes().iteritems():
413 # pass the first nodename, as we don't push template-qemu on testboxes
414 nodedir=nodes[0].nodedir()
415 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
418 # make this a valid step
419 def qemu_list_all(self):
420 'list all qemu instances on the qemu boxes involved by this setup'
421 for (box,nodes) in self.get_BoxNodes().iteritems():
422 # this is the brute force version, kill all qemus on that host box
423 TestBoxQemu(box,self.options.buildname).qemu_list_all()
426 # kill only the qemus related to this test
427 def qemu_list_mine(self):
428 'list qemu instances for our nodes'
429 for (box,nodes) in self.get_BoxNodes().iteritems():
430 # the fine-grain version
435 # kill only the qemus related to this test
436 def qemu_clean_mine(self):
437 'cleanup (rm -rf) qemu instances for our nodes'
438 for (box,nodes) in self.get_BoxNodes().iteritems():
439 # the fine-grain version
444 # kill only the right qemus
445 def qemu_kill_mine(self):
446 'kill the qemu instances for our nodes'
447 for (box,nodes) in self.get_BoxNodes().iteritems():
448 # the fine-grain version
453 #################### display config
455 "show test configuration after localization"
460 # uggly hack to make sure 'run export' only reports about the 1st plc
461 # to avoid confusion - also we use 'inri_slice1' in various aliases..
464 "print cut'n paste-able stuff to export env variables to your shell"
465 # guess local domain from hostname
466 if TestPlc.exported_id>1:
467 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
469 TestPlc.exported_id+=1
470 domain=socket.gethostname().split('.',1)[1]
471 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
472 print "export BUILD=%s"%self.options.buildname
473 print "export PLCHOSTLXC=%s"%fqdn
474 print "export GUESTNAME=%s"%self.plc_spec['vservername']
475 vplcname=self.plc_spec['vservername'].split('-')[-1]
476 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
477 # find hostname of first node
478 (hostname,qemubox) = self.all_node_infos()[0]
479 print "export KVMHOST=%s.%s"%(qemubox,domain)
480 print "export NODE=%s"%(hostname)
484 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
485 def show_pass (self,passno):
486 for (key,val) in self.plc_spec.iteritems():
487 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
491 self.display_site_spec(site)
492 for node in site['nodes']:
493 self.display_node_spec(node)
494 elif key=='initscripts':
495 for initscript in val:
496 self.display_initscript_spec (initscript)
499 self.display_slice_spec (slice)
502 self.display_key_spec (key)
504 if key not in ['sites','initscripts','slices','keys', 'sfa']:
505 print '+ ',key,':',val
507 def display_site_spec (self,site):
508 print '+ ======== site',site['site_fields']['name']
509 for (k,v) in site.iteritems():
510 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
513 print '+ ','nodes : ',
515 print node['node_fields']['hostname'],'',
521 print user['name'],'',
523 elif k == 'site_fields':
524 print '+ login_base',':',v['login_base']
525 elif k == 'address_fields':
531 def display_initscript_spec (self,initscript):
532 print '+ ======== initscript',initscript['initscript_fields']['name']
534 def display_key_spec (self,key):
535 print '+ ======== key',key['key_name']
537 def display_slice_spec (self,slice):
538 print '+ ======== slice',slice['slice_fields']['name']
539 for (k,v) in slice.iteritems():
552 elif k=='slice_fields':
553 print '+ fields',':',
554 print 'max_nodes=',v['max_nodes'],
559 def display_node_spec (self,node):
560 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
561 print "hostname=",node['node_fields']['hostname'],
562 print "ip=",node['interface_fields']['ip']
563 if self.options.verbose:
564 utils.pprint("node details",node,depth=3)
566 # another entry point for just showing the boxes involved
567 def display_mapping (self):
568 TestPlc.display_mapping_plc(self.plc_spec)
572 def display_mapping_plc (plc_spec):
573 print '+ MyPLC',plc_spec['name']
574 # WARNING this would not be right for lxc-based PLC's - should be harmless though
575 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
576 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
577 for site_spec in plc_spec['sites']:
578 for node_spec in site_spec['nodes']:
579 TestPlc.display_mapping_node(node_spec)
582 def display_mapping_node (node_spec):
583 print '+ NODE %s'%(node_spec['name'])
584 print '+\tqemu box %s'%node_spec['host_box']
585 print '+\thostname=%s'%node_spec['node_fields']['hostname']
587 # write a timestamp in /vservers/<>.timestamp
588 # cannot be inside the vserver, that causes vserver .. build to cough
589 def plcvm_timestamp (self):
590 "Create a timestamp to remember creation date for this plc"
592 # TODO-lxc check this one
593 # a first approx. is to store the timestamp close to the VM root like vs does
594 stamp_path=self.vm_timestamp_path ()
595 stamp_dir = os.path.dirname (stamp_path)
596 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
597 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
599 # this is called inconditionnally at the beginning of the test sequence
600 # just in case this is a rerun, so if the vm is not running it's fine
601 def plcvm_delete(self):
602 "vserver delete the test myplc"
603 stamp_path=self.vm_timestamp_path()
604 self.run_in_host("rm -f %s"%stamp_path)
605 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
606 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
607 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
611 # historically the build was being fetched by the tests
612 # now the build pushes itself as a subdir of the tests workdir
613 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
614 def plcvm_create (self):
615 "vserver creation (no install done)"
616 # push the local build/ dir to the testplc box
618 # a full path for the local calls
619 build_dir=os.path.dirname(sys.argv[0])
620 # sometimes this is empty - set to "." in such a case
621 if not build_dir: build_dir="."
622 build_dir += "/build"
624 # use a standard name - will be relative to remote buildname
626 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
627 self.test_ssh.rmdir(build_dir)
628 self.test_ssh.copy(build_dir,recursive=True)
629 # the repo url is taken from arch-rpms-url
630 # with the last step (i386) removed
631 repo_url = self.options.arch_rpms_url
632 for level in [ 'arch' ]:
633 repo_url = os.path.dirname(repo_url)
635 # invoke initvm (drop support for vs)
636 script="lbuild-initvm.sh"
638 # pass the vbuild-nightly options to [lv]test-initvm
639 script_options += " -p %s"%self.options.personality
640 script_options += " -d %s"%self.options.pldistro
641 script_options += " -f %s"%self.options.fcdistro
642 script_options += " -r %s"%repo_url
643 vserver_name = self.vservername
645 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
646 script_options += " -n %s"%vserver_hostname
648 print "Cannot reverse lookup %s"%self.vserverip
649 print "This is considered fatal, as this might pollute the test results"
651 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
652 return self.run_in_host(create_vserver) == 0
655 def plc_install(self):
656 "yum install myplc, noderepo, and the plain bootstrapfs"
658 # workaround for getting pgsql8.2 on centos5
659 if self.options.fcdistro == "centos5":
660 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
663 if self.options.personality == "linux32":
665 elif self.options.personality == "linux64":
668 raise Exception, "Unsupported personality %r"%self.options.personality
669 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
672 pkgs_list.append ("slicerepo-%s"%nodefamily)
673 pkgs_list.append ("myplc")
674 pkgs_list.append ("noderepo-%s"%nodefamily)
675 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
676 pkgs_string=" ".join(pkgs_list)
677 return self.yum_install (pkgs_list)
680 def mod_python(self):
681 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
682 return self.yum_install ( [ 'mod_python' ] )
685 def plc_configure(self):
687 tmpname='%s.plc-config-tty'%(self.name())
688 fileconf=open(tmpname,'w')
689 for var in [ 'PLC_NAME',
694 'PLC_MAIL_SUPPORT_ADDRESS',
697 # Above line was added for integrating SFA Testing
703 'PLC_RESERVATION_GRANULARITY',
705 'PLC_OMF_XMPP_SERVER',
708 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
709 fileconf.write('w\n')
710 fileconf.write('q\n')
712 utils.system('cat %s'%tmpname)
713 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
714 utils.system('rm %s'%tmpname)
717 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
718 # however using a vplc guest under f20 requires this trick
719 # the symptom is this: service plc start
720 # Starting plc (via systemctl): Failed to get D-Bus connection: \
721 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
722 # weird thing is the doc says f14 uses upstart by default and not systemd
723 # so this sounds kind of harmless
724 def start_service (self,service): return self.start_stop_service (service,'start')
725 def stop_service (self,service): return self.start_stop_service (service,'stop')
727 def start_stop_service (self, service,start_or_stop):
728 "utility to start/stop a service with the special trick for f14"
729 if self.options.fcdistro != 'f14':
730 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
732 # patch /sbin/service so it does not reset environment
733 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
734 # this is because our own scripts in turn call service
735 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
739 return self.start_service ('plc')
743 return self.stop_service ('plc')
745 def plcvm_start (self):
746 "start the PLC vserver"
750 def plcvm_stop (self):
751 "stop the PLC vserver"
755 # stores the keys from the config for further use
756 def keys_store(self):
757 "stores test users ssh keys in keys/"
758 for key_spec in self.plc_spec['keys']:
759 TestKey(self,key_spec).store_key()
762 def keys_clean(self):
763 "removes keys cached in keys/"
764 utils.system("rm -rf ./keys")
767 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
768 # for later direct access to the nodes
769 def keys_fetch(self):
770 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
772 if not os.path.isdir(dir):
774 vservername=self.vservername
775 vm_root=self.vm_root_in_host()
777 prefix = 'debug_ssh_key'
778 for ext in [ 'pub', 'rsa' ] :
779 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
780 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
781 if self.test_ssh.fetch(src,dst) != 0: overall=False
785 "create sites with PLCAPI"
786 return self.do_sites()
788 def delete_sites (self):
789 "delete sites with PLCAPI"
790 return self.do_sites(action="delete")
792 def do_sites (self,action="add"):
793 for site_spec in self.plc_spec['sites']:
794 test_site = TestSite (self,site_spec)
795 if (action != "add"):
796 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
797 test_site.delete_site()
798 # deleted with the site
799 #test_site.delete_users()
802 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
803 test_site.create_site()
804 test_site.create_users()
807 def delete_all_sites (self):
808 "Delete all sites in PLC, and related objects"
809 print 'auth_root',self.auth_root()
810 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
812 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
813 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
814 site_id=site['site_id']
815 print 'Deleting site_id',site_id
816 self.apiserver.DeleteSite(self.auth_root(),site_id)
820 "create nodes with PLCAPI"
821 return self.do_nodes()
822 def delete_nodes (self):
823 "delete nodes with PLCAPI"
824 return self.do_nodes(action="delete")
826 def do_nodes (self,action="add"):
827 for site_spec in self.plc_spec['sites']:
828 test_site = TestSite (self,site_spec)
830 utils.header("Deleting nodes in site %s"%test_site.name())
831 for node_spec in site_spec['nodes']:
832 test_node=TestNode(self,test_site,node_spec)
833 utils.header("Deleting %s"%test_node.name())
834 test_node.delete_node()
836 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
837 for node_spec in site_spec['nodes']:
838 utils.pprint('Creating node %s'%node_spec,node_spec)
839 test_node = TestNode (self,test_site,node_spec)
840 test_node.create_node ()
843 def nodegroups (self):
844 "create nodegroups with PLCAPI"
845 return self.do_nodegroups("add")
846 def delete_nodegroups (self):
847 "delete nodegroups with PLCAPI"
848 return self.do_nodegroups("delete")
852 def translate_timestamp (start,grain,timestamp):
853 if timestamp < TestPlc.YEAR: return start+timestamp*grain
854 else: return timestamp
857 def timestamp_printable (timestamp):
858 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
861 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
863 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
864 print 'API answered grain=',grain
865 start=(now/grain)*grain
867 # find out all nodes that are reservable
868 nodes=self.all_reservable_nodenames()
870 utils.header ("No reservable node found - proceeding without leases")
873 # attach them to the leases as specified in plc_specs
874 # this is where the 'leases' field gets interpreted as relative of absolute
875 for lease_spec in self.plc_spec['leases']:
876 # skip the ones that come with a null slice id
877 if not lease_spec['slice']: continue
878 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
879 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
880 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
881 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
882 if lease_addition['errors']:
883 utils.header("Cannot create leases, %s"%lease_addition['errors'])
886 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
887 (nodes,lease_spec['slice'],
888 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
889 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
893 def delete_leases (self):
894 "remove all leases in the myplc side"
895 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
896 utils.header("Cleaning leases %r"%lease_ids)
897 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
900 def list_leases (self):
901 "list all leases known to the myplc"
902 leases = self.apiserver.GetLeases(self.auth_root())
905 current=l['t_until']>=now
906 if self.options.verbose or current:
907 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
908 TestPlc.timestamp_printable(l['t_from']),
909 TestPlc.timestamp_printable(l['t_until'])))
912 # create nodegroups if needed, and populate
913 def do_nodegroups (self, action="add"):
914 # 1st pass to scan contents
916 for site_spec in self.plc_spec['sites']:
917 test_site = TestSite (self,site_spec)
918 for node_spec in site_spec['nodes']:
919 test_node=TestNode (self,test_site,node_spec)
920 if node_spec.has_key('nodegroups'):
921 nodegroupnames=node_spec['nodegroups']
922 if isinstance(nodegroupnames,StringTypes):
923 nodegroupnames = [ nodegroupnames ]
924 for nodegroupname in nodegroupnames:
925 if not groups_dict.has_key(nodegroupname):
926 groups_dict[nodegroupname]=[]
927 groups_dict[nodegroupname].append(test_node.name())
928 auth=self.auth_root()
930 for (nodegroupname,group_nodes) in groups_dict.iteritems():
932 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
933 # first, check if the nodetagtype is here
934 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
936 tag_type_id = tag_types[0]['tag_type_id']
938 tag_type_id = self.apiserver.AddTagType(auth,
939 {'tagname':nodegroupname,
940 'description': 'for nodegroup %s'%nodegroupname,
942 print 'located tag (type)',nodegroupname,'as',tag_type_id
944 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
946 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
947 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
948 # set node tag on all nodes, value='yes'
949 for nodename in group_nodes:
951 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
953 traceback.print_exc()
954 print 'node',nodename,'seems to already have tag',nodegroupname
957 expect_yes = self.apiserver.GetNodeTags(auth,
958 {'hostname':nodename,
959 'tagname':nodegroupname},
960 ['value'])[0]['value']
961 if expect_yes != "yes":
962 print 'Mismatch node tag on node',nodename,'got',expect_yes
965 if not self.options.dry_run:
966 print 'Cannot find tag',nodegroupname,'on node',nodename
970 print 'cleaning nodegroup',nodegroupname
971 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
973 traceback.print_exc()
977 # a list of TestNode objs
978 def all_nodes (self):
980 for site_spec in self.plc_spec['sites']:
981 test_site = TestSite (self,site_spec)
982 for node_spec in site_spec['nodes']:
983 nodes.append(TestNode (self,test_site,node_spec))
986 # return a list of tuples (nodename,qemuname)
987 def all_node_infos (self) :
989 for site_spec in self.plc_spec['sites']:
990 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
991 for node_spec in site_spec['nodes'] ]
994 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
995 def all_reservable_nodenames (self):
997 for site_spec in self.plc_spec['sites']:
998 for node_spec in site_spec['nodes']:
999 node_fields=node_spec['node_fields']
1000 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1001 res.append(node_fields['hostname'])
1004 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1005 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1006 if self.options.dry_run:
1010 class CompleterTaskBootState (CompleterTask):
1011 def __init__ (self, test_plc,hostname):
1012 self.test_plc=test_plc
1013 self.hostname=hostname
1014 self.last_boot_state='undef'
1015 def actual_run (self):
1017 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1019 self.last_boot_state = node['boot_state']
1020 return self.last_boot_state == target_boot_state
1024 return "CompleterTaskBootState with node %s"%self.hostname
1025 def failure_message (self):
1026 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1028 timeout = timedelta(minutes=timeout_minutes)
1029 graceout = timedelta(minutes=silent_minutes)
1030 period = timedelta(seconds=period_seconds)
1031 # the nodes that haven't checked yet - start with a full list and shrink over time
1032 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1033 tasks = [ CompleterTaskBootState (self,hostname) \
1034 for (hostname,_) in self.all_node_infos() ]
1035 return Completer (tasks).run (timeout, graceout, period)
1037 def nodes_booted(self):
1038 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1041 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1042 class CompleterTaskPingNode (CompleterTask):
1043 def __init__ (self, hostname):
1044 self.hostname=hostname
1045 def run(self,silent):
1046 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1047 return utils.system (command, silent=silent)==0
1048 def failure_message (self):
1049 return "Cannot ping node with name %s"%self.hostname
1050 timeout=timedelta (seconds=timeout_seconds)
1052 period=timedelta (seconds=period_seconds)
1053 node_infos = self.all_node_infos()
1054 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1055 return Completer (tasks).run (timeout, graceout, period)
1057 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1058 def ping_node (self):
1060 return self.check_nodes_ping ()
1062 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1063 class CompleterTaskNodeSsh (CompleterTask):
1064 def __init__ (self, hostname, qemuname, boot_state, local_key):
1065 self.hostname=hostname
1066 self.qemuname=qemuname
1067 self.boot_state=boot_state
1068 self.local_key=local_key
1069 def run (self, silent):
1070 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1071 return utils.system (command, silent=silent)==0
1072 def failure_message (self):
1073 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1076 timeout = timedelta(minutes=timeout_minutes)
1077 graceout = timedelta(minutes=silent_minutes)
1078 period = timedelta(seconds=period_seconds)
1079 vservername=self.vservername
1082 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1085 local_key = "keys/key_admin.rsa"
1086 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1087 node_infos = self.all_node_infos()
1088 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1089 for (nodename,qemuname) in node_infos ]
1090 return Completer (tasks).run (timeout, graceout, period)
1092 def ssh_node_debug(self):
1093 "Tries to ssh into nodes in debug mode with the debug ssh key"
1094 return self.check_nodes_ssh(debug=True,
1095 timeout_minutes=self.ssh_node_debug_timeout,
1096 silent_minutes=self.ssh_node_debug_silent)
1098 def ssh_node_boot(self):
1099 "Tries to ssh into nodes in production mode with the root ssh key"
1100 return self.check_nodes_ssh(debug=False,
1101 timeout_minutes=self.ssh_node_boot_timeout,
1102 silent_minutes=self.ssh_node_boot_silent)
1104 def node_bmlogs(self):
1105 "Checks that there's a non-empty dir. /var/log/bm/raw"
1106 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1109 def qemu_local_init (self): pass
1111 def bootcd (self): pass
1113 def qemu_local_config (self): pass
1115 def nodestate_reinstall (self): pass
1117 def nodestate_safeboot (self): pass
1119 def nodestate_boot (self): pass
1121 def nodestate_show (self): pass
1123 def qemu_export (self): pass
1125 ### check hooks : invoke scripts from hooks/{node,slice}
1126 def check_hooks_node (self):
1127 return self.locate_first_node().check_hooks()
1128 def check_hooks_sliver (self) :
1129 return self.locate_first_sliver().check_hooks()
1131 def check_hooks (self):
1132 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1133 return self.check_hooks_node() and self.check_hooks_sliver()
1136 def do_check_initscripts(self):
1137 class CompleterTaskInitscript (CompleterTask):
1138 def __init__ (self, test_sliver, stamp):
1139 self.test_sliver=test_sliver
1141 def actual_run (self):
1142 return self.test_sliver.check_initscript_stamp (self.stamp)
1144 return "initscript checker for %s"%self.test_sliver.name()
1145 def failure_message (self):
1146 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1149 for slice_spec in self.plc_spec['slices']:
1150 if not slice_spec.has_key('initscriptstamp'):
1152 stamp=slice_spec['initscriptstamp']
1153 slicename=slice_spec['slice_fields']['name']
1154 for nodename in slice_spec['nodenames']:
1155 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1156 (site,node) = self.locate_node (nodename)
1157 # xxx - passing the wrong site - probably harmless
1158 test_site = TestSite (self,site)
1159 test_slice = TestSlice (self,test_site,slice_spec)
1160 test_node = TestNode (self,test_site,node)
1161 test_sliver = TestSliver (self, test_node, test_slice)
1162 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1163 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1165 def check_initscripts(self):
1166 "check that the initscripts have triggered"
1167 return self.do_check_initscripts()
1169 def initscripts (self):
1170 "create initscripts with PLCAPI"
1171 for initscript in self.plc_spec['initscripts']:
1172 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1173 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1176 def delete_initscripts (self):
1177 "delete initscripts with PLCAPI"
1178 for initscript in self.plc_spec['initscripts']:
1179 initscript_name = initscript['initscript_fields']['name']
1180 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1182 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1183 print initscript_name,'deleted'
1185 print 'deletion went wrong - probably did not exist'
1190 "create slices with PLCAPI"
1191 return self.do_slices(action="add")
1193 def delete_slices (self):
1194 "delete slices with PLCAPI"
1195 return self.do_slices(action="delete")
1197 def fill_slices (self):
1198 "add nodes in slices with PLCAPI"
1199 return self.do_slices(action="fill")
1201 def empty_slices (self):
1202 "remove nodes from slices with PLCAPI"
1203 return self.do_slices(action="empty")
1205 def do_slices (self, action="add"):
1206 for slice in self.plc_spec['slices']:
1207 site_spec = self.locate_site (slice['sitename'])
1208 test_site = TestSite(self,site_spec)
1209 test_slice=TestSlice(self,test_site,slice)
1210 if action == "delete":
1211 test_slice.delete_slice()
1212 elif action=="fill":
1213 test_slice.add_nodes()
1214 elif action=="empty":
1215 test_slice.delete_nodes()
1217 test_slice.create_slice()
1220 @slice_mapper__tasks(20,10,15)
1221 def ssh_slice(self): pass
1222 @slice_mapper__tasks(20,19,15)
1223 def ssh_slice_off (self): pass
1225 # use another name so we can exclude/ignore it from the tests on the nightly command line
1226 def ssh_slice_again(self): return self.ssh_slice()
1227 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1228 # but for some reason the ignore-wrapping thing would not
1231 def ssh_slice_basics(self): pass
1234 def check_vsys_defaults(self): pass
1237 def keys_clear_known_hosts (self): pass
1239 def plcapi_urls (self):
1240 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1242 def speed_up_slices (self):
1243 "tweak nodemanager settings on all nodes using a conf file"
1244 # create the template on the server-side
1245 template="%s.nodemanager"%self.name()
1246 template_file = open (template,"w")
1247 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1248 template_file.close()
1249 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1250 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1251 self.test_ssh.copy_abs(template,remote)
1253 self.apiserver.AddConfFile (self.auth_root(),
1254 {'dest':'/etc/sysconfig/nodemanager',
1255 'source':'PlanetLabConf/nodemanager',
1256 'postinstall_cmd':'service nm restart',})
1259 def debug_nodemanager (self):
1260 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1261 template="%s.nodemanager"%self.name()
1262 template_file = open (template,"w")
1263 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1264 template_file.close()
1265 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1266 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1267 self.test_ssh.copy_abs(template,remote)
1271 def qemu_start (self) : pass
1274 def qemu_timestamp (self) : pass
1276 # when a spec refers to a node possibly on another plc
1277 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1278 for plc in [ self ] + other_plcs:
1280 return plc.locate_sliver_obj (nodename, slicename)
1283 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1285 # implement this one as a cross step so that we can take advantage of different nodes
1286 # in multi-plcs mode
1287 def cross_check_tcp (self, other_plcs):
1288 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1289 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1290 utils.header ("check_tcp: no/empty config found")
1292 specs = self.plc_spec['tcp_specs']
1297 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1298 if not s_test_sliver.run_tcp_server(port,timeout=20):
1302 # idem for the client side
1303 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1304 # use nodename from locatesd sliver, unless 'client_connect' is set
1305 if 'client_connect' in spec:
1306 destination = spec['client_connect']
1308 destination=s_test_sliver.test_node.name()
1309 if not c_test_sliver.run_tcp_client(destination,port):
1313 # painfully enough, we need to allow for some time as netflow might show up last
1314 def check_system_slice (self):
1315 "all nodes: check that a system slice is alive"
1316 # netflow currently not working in the lxc distro
1317 # drl not built at all in the wtx distro
1318 # if we find either of them we're happy
1319 return self.check_netflow() or self.check_drl()
1322 def check_netflow (self): return self._check_system_slice ('netflow')
1323 def check_drl (self): return self._check_system_slice ('drl')
1325 # we have the slices up already here, so it should not take too long
1326 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1327 class CompleterTaskSystemSlice (CompleterTask):
1328 def __init__ (self, test_node, dry_run):
1329 self.test_node=test_node
1330 self.dry_run=dry_run
1331 def actual_run (self):
1332 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1334 return "System slice %s @ %s"%(slicename, self.test_node.name())
1335 def failure_message (self):
1336 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1337 timeout = timedelta(minutes=timeout_minutes)
1338 silent = timedelta (0)
1339 period = timedelta (seconds=period_seconds)
1340 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1341 for test_node in self.all_nodes() ]
1342 return Completer (tasks) . run (timeout, silent, period)
1344 def plcsh_stress_test (self):
1345 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1346 # install the stress-test in the plc image
1347 location = "/usr/share/plc_api/plcsh_stress_test.py"
1348 remote="%s/%s"%(self.vm_root_in_host(),location)
1349 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1351 command += " -- --check"
1352 if self.options.size == 1:
1353 command += " --tiny"
1354 return ( self.run_in_guest(command) == 0)
1356 # populate runs the same utility without slightly different options
1357 # in particular runs with --preserve (dont cleanup) and without --check
1358 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1360 def sfa_install_all (self):
1361 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1362 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1364 def sfa_install_core(self):
1366 return self.yum_install ("sfa")
1368 def sfa_install_plc(self):
1369 "yum install sfa-plc"
1370 return self.yum_install("sfa-plc")
1372 def sfa_install_sfatables(self):
1373 "yum install sfa-sfatables"
1374 return self.yum_install ("sfa-sfatables")
1376 # for some very odd reason, this sometimes fails with the following symptom
1377 # # yum install sfa-client
1378 # Setting up Install Process
1380 # Downloading Packages:
1381 # Running rpm_check_debug
1382 # Running Transaction Test
1383 # Transaction Test Succeeded
1384 # Running Transaction
1385 # Transaction couldn't start:
1386 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1387 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1388 # even though in the same context I have
1389 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1390 # Filesystem Size Used Avail Use% Mounted on
1391 # /dev/hdv1 806G 264G 501G 35% /
1392 # none 16M 36K 16M 1% /tmp
1394 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1395 def sfa_install_client(self):
1396 "yum install sfa-client"
1397 first_try=self.yum_install("sfa-client")
1398 if first_try: return True
1399 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1400 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1401 utils.header("rpm_path=<<%s>>"%rpm_path)
1403 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1404 return self.yum_check_installed ("sfa-client")
1406 def sfa_dbclean(self):
1407 "thoroughly wipes off the SFA database"
1408 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1409 self.run_in_guest("sfa-nuke.py")==0 or \
1410 self.run_in_guest("sfa-nuke-plc.py")==0
1412 def sfa_fsclean(self):
1413 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1414 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1417 def sfa_plcclean(self):
1418 "cleans the PLC entries that were created as a side effect of running the script"
1420 sfa_spec=self.plc_spec['sfa']
1422 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1423 login_base=auth_sfa_spec['login_base']
1424 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1425 except: print "Site %s already absent from PLC db"%login_base
1427 for spec_name in ['pi_spec','user_spec']:
1428 user_spec=auth_sfa_spec[spec_name]
1429 username=user_spec['email']
1430 try: self.apiserver.DeletePerson(self.auth_root(),username)
1432 # this in fact is expected as sites delete their members
1433 #print "User %s already absent from PLC db"%username
1436 print "REMEMBER TO RUN sfa_import AGAIN"
1439 def sfa_uninstall(self):
1440 "uses rpm to uninstall sfa - ignore result"
1441 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1442 self.run_in_guest("rm -rf /var/lib/sfa")
1443 self.run_in_guest("rm -rf /etc/sfa")
1444 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1446 self.run_in_guest("rpm -e --noscripts sfa-plc")
1449 ### run unit tests for SFA
1450 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1451 # Running Transaction
1452 # Transaction couldn't start:
1453 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1454 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1455 # no matter how many Gbs are available on the testplc
1456 # could not figure out what's wrong, so...
1457 # if the yum install phase fails, consider the test is successful
1458 # other combinations will eventually run it hopefully
1459 def sfa_utest(self):
1460 "yum install sfa-tests and run SFA unittests"
1461 self.run_in_guest("yum -y install sfa-tests")
1462 # failed to install - forget it
1463 if self.run_in_guest("rpm -q sfa-tests")!=0:
1464 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1466 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1470 dirname="conf.%s"%self.plc_spec['name']
1471 if not os.path.isdir(dirname):
1472 utils.system("mkdir -p %s"%dirname)
1473 if not os.path.isdir(dirname):
1474 raise Exception,"Cannot create config dir for plc %s"%self.name()
1477 def conffile(self,filename):
1478 return "%s/%s"%(self.confdir(),filename)
1479 def confsubdir(self,dirname,clean,dry_run=False):
1480 subdirname="%s/%s"%(self.confdir(),dirname)
1482 utils.system("rm -rf %s"%subdirname)
1483 if not os.path.isdir(subdirname):
1484 utils.system("mkdir -p %s"%subdirname)
1485 if not dry_run and not os.path.isdir(subdirname):
1486 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1489 def conffile_clean (self,filename):
1490 filename=self.conffile(filename)
1491 return utils.system("rm -rf %s"%filename)==0
1494 def sfa_configure(self):
1495 "run sfa-config-tty"
1496 tmpname=self.conffile("sfa-config-tty")
1497 fileconf=open(tmpname,'w')
1498 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1499 'SFA_INTERFACE_HRN',
1500 'SFA_REGISTRY_LEVEL1_AUTH',
1501 'SFA_REGISTRY_HOST',
1502 'SFA_AGGREGATE_HOST',
1512 'SFA_GENERIC_FLAVOUR',
1513 'SFA_AGGREGATE_ENABLED',
1515 if self.plc_spec['sfa'].has_key(var):
1516 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1517 # the way plc_config handles booleans just sucks..
1520 if self.plc_spec['sfa'][var]: val='true'
1521 fileconf.write ('e %s\n%s\n'%(var,val))
1522 fileconf.write('w\n')
1523 fileconf.write('R\n')
1524 fileconf.write('q\n')
1526 utils.system('cat %s'%tmpname)
1527 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1530 def aggregate_xml_line(self):
1531 port=self.plc_spec['sfa']['neighbours-port']
1532 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1533 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1535 def registry_xml_line(self):
1536 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1537 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1540 # a cross step that takes all other plcs in argument
1541 def cross_sfa_configure(self, other_plcs):
1542 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1543 # of course with a single plc, other_plcs is an empty list
1546 agg_fname=self.conffile("agg.xml")
1547 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1548 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1549 utils.header ("(Over)wrote %s"%agg_fname)
1550 reg_fname=self.conffile("reg.xml")
1551 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1552 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1553 utils.header ("(Over)wrote %s"%reg_fname)
1554 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1555 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1557 def sfa_import(self):
1558 "use sfaadmin to import from plc"
1559 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1560 return self.run_in_guest('sfaadmin reg import_registry')==0
1562 def sfa_start(self):
1564 return self.start_service('sfa')
1567 def sfi_configure(self):
1568 "Create /root/sfi on the plc side for sfi client configuration"
1569 if self.options.dry_run:
1570 utils.header("DRY RUN - skipping step")
1572 sfa_spec=self.plc_spec['sfa']
1573 # cannot use auth_sfa_mapper to pass dir_name
1574 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1575 test_slice=TestAuthSfa(self,slice_spec)
1576 dir_basename=os.path.basename(test_slice.sfi_path())
1577 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1578 test_slice.sfi_configure(dir_name)
1579 # push into the remote /root/sfi area
1580 location = test_slice.sfi_path()
1581 remote="%s/%s"%(self.vm_root_in_host(),location)
1582 self.test_ssh.mkdir(remote,abs=True)
1583 # need to strip last level or remote otherwise we get an extra dir level
1584 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1588 def sfi_clean (self):
1589 "clean up /root/sfi on the plc side"
1590 self.run_in_guest("rm -rf /root/sfi")
1594 def sfa_add_site (self): pass
1596 def sfa_add_pi (self): pass
1598 def sfa_add_user(self): pass
1600 def sfa_update_user(self): pass
1602 def sfa_add_slice(self): pass
1604 def sfa_renew_slice(self): pass
1606 def sfa_discover(self): pass
1608 def sfa_create_slice(self): pass
1610 def sfa_check_slice_plc(self): pass
1612 def sfa_update_slice(self): pass
1614 def sfi_list(self): pass
1616 def sfi_show(self): pass
1618 def ssh_slice_sfa(self): pass
1620 def sfa_delete_user(self): pass
1622 def sfa_delete_slice(self): pass
1626 return self.stop_service ('sfa')
1628 def populate (self):
1629 "creates random entries in the PLCAPI"
1630 # install the stress-test in the plc image
1631 location = "/usr/share/plc_api/plcsh_stress_test.py"
1632 remote="%s/%s"%(self.vm_root_in_host(),location)
1633 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1635 command += " -- --preserve --short-names"
1636 local = (self.run_in_guest(command) == 0);
1637 # second run with --foreign
1638 command += ' --foreign'
1639 remote = (self.run_in_guest(command) == 0);
1640 return ( local and remote)
1642 def gather_logs (self):
1643 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1644 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1645 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1646 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1647 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1648 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1649 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1651 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1652 self.gather_var_logs ()
1654 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1655 self.gather_pgsql_logs ()
1657 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1658 self.gather_root_sfi ()
1660 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1661 for site_spec in self.plc_spec['sites']:
1662 test_site = TestSite (self,site_spec)
1663 for node_spec in site_spec['nodes']:
1664 test_node=TestNode(self,test_site,node_spec)
1665 test_node.gather_qemu_logs()
1667 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1668 self.gather_nodes_var_logs()
1670 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1671 self.gather_slivers_var_logs()
1674 def gather_slivers_var_logs(self):
1675 for test_sliver in self.all_sliver_objs():
1676 remote = test_sliver.tar_var_logs()
1677 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1678 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1679 utils.system(command)
1682 def gather_var_logs (self):
1683 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1684 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1685 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1686 utils.system(command)
1687 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1688 utils.system(command)
1690 def gather_pgsql_logs (self):
1691 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1692 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1693 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1694 utils.system(command)
1696 def gather_root_sfi (self):
1697 utils.system("mkdir -p logs/sfi.%s"%self.name())
1698 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1699 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1700 utils.system(command)
1702 def gather_nodes_var_logs (self):
1703 for site_spec in self.plc_spec['sites']:
1704 test_site = TestSite (self,site_spec)
1705 for node_spec in site_spec['nodes']:
1706 test_node=TestNode(self,test_site,node_spec)
1707 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1708 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1709 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1710 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1711 utils.system(command)
1714 # returns the filename to use for sql dump/restore, using options.dbname if set
1715 def dbfile (self, database):
1716 # uses options.dbname if it is found
1718 name=self.options.dbname
1719 if not isinstance(name,StringTypes):
1725 return "/root/%s-%s.sql"%(database,name)
1727 def plc_db_dump(self):
1728 'dump the planetlab5 DB in /root in the PLC - filename has time'
1729 dump=self.dbfile("planetab5")
1730 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1731 utils.header('Dumped planetlab5 database in %s'%dump)
1734 def plc_db_restore(self):
1735 'restore the planetlab5 DB - looks broken, but run -n might help'
1736 dump=self.dbfile("planetab5")
1737 ##stop httpd service
1738 self.run_in_guest('service httpd stop')
1739 # xxx - need another wrapper
1740 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1741 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1742 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1743 ##starting httpd service
1744 self.run_in_guest('service httpd start')
1746 utils.header('Database restored from ' + dump)
1749 def create_ignore_steps ():
1750 for step in TestPlc.default_steps + TestPlc.other_steps:
1751 # default step can have a plc qualifier
1752 if '@' in step: (step,qualifier)=step.split('@')
1753 # or be defined as forced or ignored by default
1754 for keyword in ['_ignore','_force']:
1755 if step.endswith (keyword): step=step.replace(keyword,'')
1756 if step == SEP or step == SEPSFA : continue
1757 method=getattr(TestPlc,step)
1759 wrapped=ignore_result(method)
1760 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1761 setattr(TestPlc, name, wrapped)
1764 # def ssh_slice_again_ignore (self): pass
1766 # def check_initscripts_ignore (self): pass
1768 def standby_1_through_20(self):
1769 """convenience function to wait for a specified number of minutes"""
1772 def standby_1(): pass
1774 def standby_2(): pass
1776 def standby_3(): pass
1778 def standby_4(): pass
1780 def standby_5(): pass
1782 def standby_6(): pass
1784 def standby_7(): pass
1786 def standby_8(): pass
1788 def standby_9(): pass
1790 def standby_10(): pass
1792 def standby_11(): pass
1794 def standby_12(): pass
1796 def standby_13(): pass
1798 def standby_14(): pass
1800 def standby_15(): pass
1802 def standby_16(): pass
1804 def standby_17(): pass
1806 def standby_18(): pass
1808 def standby_19(): pass
1810 def standby_20(): pass
1812 # convenience for debugging the test logic
1813 def yes (self): return True
1814 def no (self): return False