1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # run a step but return True so that we can go on
68 def ignore_result (method):
70 # ssh_slice_ignore->ssh_slice
71 ref_name=method.__name__.replace('_ignore','').replace('force_','')
72 ref_method=TestPlc.__dict__[ref_name]
73 result=ref_method(self)
74 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
75 return Ignored (result)
76 wrappee.__doc__="ignored version of " + method.__name__.replace('_ignore','').replace('ignore_','')
79 # a variant that expects the TestSlice method to return a list of CompleterTasks that
80 # are then merged into a single Completer run to avoid wating for all the slices
81 # esp. useful when a test fails of course
82 # because we need to pass arguments we use a class instead..
83 class slice_mapper__tasks (object):
84 # could not get this to work with named arguments
85 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
86 self.timeout=timedelta(minutes=timeout_minutes)
87 self.silent=timedelta(minutes=silent_minutes)
88 self.period=timedelta(seconds=period_seconds)
89 def __call__ (self, method):
91 # compute augmented method name
92 method_name = method.__name__ + "__tasks"
94 slice_method = TestSlice.__dict__[ method_name ]
97 for slice_spec in self.plc_spec['slices']:
98 site_spec = self.locate_site (slice_spec['sitename'])
99 test_site = TestSite(self,site_spec)
100 test_slice=TestSlice(self,test_site,slice_spec)
101 tasks += slice_method (test_slice, self.options)
102 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
103 # restore the doc text from the TestSlice method even if a bit odd
104 wrappee.__doc__ = slice_method.__doc__
107 def auth_sfa_mapper (method):
110 auth_method = TestAuthSfa.__dict__[method.__name__]
111 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
112 test_auth=TestAuthSfa(self,auth_spec)
113 if not auth_method(test_auth,self.options): overall=False
115 # restore the doc text
116 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
120 def __init__ (self,result):
130 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
131 'plc_install', 'plc_configure', 'plc_start', SEP,
132 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
133 'plcapi_urls','speed_up_slices', SEP,
134 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
135 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
136 # keep this our of the way for now
137 'check_vsys_defaults_ignore', SEP,
138 # run this first off so it's easier to re-run on another qemu box
139 'qemu_kill_mine', SEP,
140 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
141 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
142 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
143 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
144 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
145 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
146 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
147 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
148 # but as the stress test might take a while, we sometimes missed the debug mode..
149 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
150 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
151 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
152 'cross_check_tcp@1', 'check_system_slice', SEP,
153 # check slices are turned off properly
154 'empty_slices', 'ssh_slice_off', SEP,
155 # check they are properly re-created with the same name
156 'fill_slices', 'ssh_slice_again_ignore', SEP,
157 'gather_logs_force', SEP,
160 'export', 'show_boxes', SEP,
161 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
162 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
163 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
164 'delete_leases', 'list_leases', SEP,
166 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
167 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
168 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
169 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
170 'plc_db_dump' , 'plc_db_restore', SEP,
171 'check_netflow','check_drl', SEP,
172 'debug_nodemanager', SEP,
173 'standby_1_through_20','yes','no',SEP,
177 def printable_steps (list):
178 single_line=" ".join(list)+" "
179 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
181 def valid_step (step):
182 return step != SEP and step != SEPSFA
184 # turn off the sfa-related steps when build has skipped SFA
185 # this was originally for centos5 but is still valid
186 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
188 def check_whether_build_has_sfa (rpms_url):
189 utils.header ("Checking if build provides SFA package...")
190 # warning, we're now building 'sface' so let's be a bit more picky
191 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
192 # full builds are expected to return with 0 here
194 utils.header("build does provide SFA")
196 # move all steps containing 'sfa' from default_steps to other_steps
197 utils.header("SFA package not found - removing steps with sfa or sfi")
198 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
199 TestPlc.other_steps += sfa_steps
200 for step in sfa_steps: TestPlc.default_steps.remove(step)
202 def __init__ (self,plc_spec,options):
203 self.plc_spec=plc_spec
205 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
206 self.vserverip=plc_spec['vserverip']
207 self.vservername=plc_spec['vservername']
208 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
209 self.apiserver=TestApiserver(self.url,options.dry_run)
210 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
211 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
213 def has_addresses_api (self):
214 return self.apiserver.has_method('AddIpAddress')
217 name=self.plc_spec['name']
218 return "%s.%s"%(name,self.vservername)
221 return self.plc_spec['host_box']
224 return self.test_ssh.is_local()
226 # define the API methods on this object through xmlrpc
227 # would help, but not strictly necessary
231 def actual_command_in_guest (self,command, backslash=False):
232 raw1=self.host_to_guest(command)
233 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
236 def start_guest (self):
237 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
239 def stop_guest (self):
240 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
242 def run_in_guest (self,command,backslash=False):
243 raw=self.actual_command_in_guest(command,backslash)
244 return utils.system(raw)
246 def run_in_host (self,command):
247 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
249 # backslashing turned out so awful at some point that I've turned off auto-backslashing
250 # see e.g. plc_start esp. the version for f14
251 #command gets run in the plc's vm
252 def host_to_guest(self,command):
253 # f14 still needs some extra help
254 if self.options.fcdistro == 'f14':
255 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
257 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
260 # this /vservers thing is legacy...
261 def vm_root_in_host(self):
262 return "/vservers/%s/"%(self.vservername)
264 def vm_timestamp_path (self):
265 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
267 #start/stop the vserver
268 def start_guest_in_host(self):
269 return "virsh -c lxc:/// start %s"%(self.vservername)
271 def stop_guest_in_host(self):
272 return "virsh -c lxc:/// destroy %s"%(self.vservername)
275 def run_in_guest_piped (self,local,remote):
276 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
278 def yum_check_installed (self, rpms):
279 if isinstance (rpms, list):
281 return self.run_in_guest("rpm -q %s"%rpms)==0
283 # does a yum install in the vs, ignore yum retcod, check with rpm
284 def yum_install (self, rpms):
285 if isinstance (rpms, list):
287 self.run_in_guest("yum -y install %s"%rpms)
288 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
289 self.run_in_guest("yum-complete-transaction -y")
290 return self.yum_check_installed (rpms)
292 def auth_root (self):
293 return {'Username':self.plc_spec['PLC_ROOT_USER'],
294 'AuthMethod':'password',
295 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
296 'Role' : self.plc_spec['role']
298 def locate_site (self,sitename):
299 for site in self.plc_spec['sites']:
300 if site['site_fields']['name'] == sitename:
302 if site['site_fields']['login_base'] == sitename:
304 raise Exception,"Cannot locate site %s"%sitename
306 def locate_node (self,nodename):
307 for site in self.plc_spec['sites']:
308 for node in site['nodes']:
309 if node['name'] == nodename:
311 raise Exception,"Cannot locate node %s"%nodename
313 def locate_hostname (self,hostname):
314 for site in self.plc_spec['sites']:
315 for node in site['nodes']:
316 if node['node_fields']['hostname'] == hostname:
318 raise Exception,"Cannot locate hostname %s"%hostname
320 def locate_key (self,key_name):
321 for key in self.plc_spec['keys']:
322 if key['key_name'] == key_name:
324 raise Exception,"Cannot locate key %s"%key_name
326 def locate_private_key_from_key_names (self, key_names):
327 # locate the first avail. key
329 for key_name in key_names:
330 key_spec=self.locate_key(key_name)
331 test_key=TestKey(self,key_spec)
332 publickey=test_key.publicpath()
333 privatekey=test_key.privatepath()
334 if os.path.isfile(publickey) and os.path.isfile(privatekey):
336 if found: return privatekey
339 def locate_slice (self, slicename):
340 for slice in self.plc_spec['slices']:
341 if slice['slice_fields']['name'] == slicename:
343 raise Exception,"Cannot locate slice %s"%slicename
345 def all_sliver_objs (self):
347 for slice_spec in self.plc_spec['slices']:
348 slicename = slice_spec['slice_fields']['name']
349 for nodename in slice_spec['nodenames']:
350 result.append(self.locate_sliver_obj (nodename,slicename))
353 def locate_sliver_obj (self,nodename,slicename):
354 (site,node) = self.locate_node(nodename)
355 slice = self.locate_slice (slicename)
357 test_site = TestSite (self, site)
358 test_node = TestNode (self, test_site,node)
359 # xxx the slice site is assumed to be the node site - mhh - probably harmless
360 test_slice = TestSlice (self, test_site, slice)
361 return TestSliver (self, test_node, test_slice)
363 def locate_first_node(self):
364 nodename=self.plc_spec['slices'][0]['nodenames'][0]
365 (site,node) = self.locate_node(nodename)
366 test_site = TestSite (self, site)
367 test_node = TestNode (self, test_site,node)
370 def locate_first_sliver (self):
371 slice_spec=self.plc_spec['slices'][0]
372 slicename=slice_spec['slice_fields']['name']
373 nodename=slice_spec['nodenames'][0]
374 return self.locate_sliver_obj(nodename,slicename)
376 # all different hostboxes used in this plc
377 def get_BoxNodes(self):
378 # maps on sites and nodes, return [ (host_box,test_node) ]
380 for site_spec in self.plc_spec['sites']:
381 test_site = TestSite (self,site_spec)
382 for node_spec in site_spec['nodes']:
383 test_node = TestNode (self, test_site, node_spec)
384 if not test_node.is_real():
385 tuples.append( (test_node.host_box(),test_node) )
386 # transform into a dict { 'host_box' -> [ test_node .. ] }
388 for (box,node) in tuples:
389 if not result.has_key(box):
392 result[box].append(node)
395 # a step for checking this stuff
396 def show_boxes (self):
397 'print summary of nodes location'
398 for (box,nodes) in self.get_BoxNodes().iteritems():
399 print box,":"," + ".join( [ node.name() for node in nodes ] )
402 # make this a valid step
403 def qemu_kill_all(self):
404 'kill all qemu instances on the qemu boxes involved by this setup'
405 # this is the brute force version, kill all qemus on that host box
406 for (box,nodes) in self.get_BoxNodes().iteritems():
407 # pass the first nodename, as we don't push template-qemu on testboxes
408 nodedir=nodes[0].nodedir()
409 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
412 # make this a valid step
413 def qemu_list_all(self):
414 'list all qemu instances on the qemu boxes involved by this setup'
415 for (box,nodes) in self.get_BoxNodes().iteritems():
416 # this is the brute force version, kill all qemus on that host box
417 TestBoxQemu(box,self.options.buildname).qemu_list_all()
420 # kill only the qemus related to this test
421 def qemu_list_mine(self):
422 'list qemu instances for our nodes'
423 for (box,nodes) in self.get_BoxNodes().iteritems():
424 # the fine-grain version
429 # kill only the qemus related to this test
430 def qemu_clean_mine(self):
431 'cleanup (rm -rf) qemu instances for our nodes'
432 for (box,nodes) in self.get_BoxNodes().iteritems():
433 # the fine-grain version
438 # kill only the right qemus
439 def qemu_kill_mine(self):
440 'kill the qemu instances for our nodes'
441 for (box,nodes) in self.get_BoxNodes().iteritems():
442 # the fine-grain version
447 #################### display config
449 "show test configuration after localization"
454 # uggly hack to make sure 'run export' only reports about the 1st plc
455 # to avoid confusion - also we use 'inri_slice1' in various aliases..
458 "print cut'n paste-able stuff to export env variables to your shell"
459 # guess local domain from hostname
460 if TestPlc.exported_id>1:
461 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
463 TestPlc.exported_id+=1
464 domain=socket.gethostname().split('.',1)[1]
465 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
466 print "export BUILD=%s"%self.options.buildname
467 print "export PLCHOSTLXC=%s"%fqdn
468 print "export GUESTNAME=%s"%self.plc_spec['vservername']
469 vplcname=self.plc_spec['vservername'].split('-')[-1]
470 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
471 # find hostname of first node
472 (hostname,qemubox) = self.all_node_infos()[0]
473 print "export KVMHOST=%s.%s"%(qemubox,domain)
474 print "export NODE=%s"%(hostname)
478 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
479 def show_pass (self,passno):
480 for (key,val) in self.plc_spec.iteritems():
481 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
485 self.display_site_spec(site)
486 for node in site['nodes']:
487 self.display_node_spec(node)
488 elif key=='initscripts':
489 for initscript in val:
490 self.display_initscript_spec (initscript)
493 self.display_slice_spec (slice)
496 self.display_key_spec (key)
498 if key not in ['sites','initscripts','slices','keys', 'sfa']:
499 print '+ ',key,':',val
501 def display_site_spec (self,site):
502 print '+ ======== site',site['site_fields']['name']
503 for (k,v) in site.iteritems():
504 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
507 print '+ ','nodes : ',
509 print node['node_fields']['hostname'],'',
515 print user['name'],'',
517 elif k == 'site_fields':
518 print '+ login_base',':',v['login_base']
519 elif k == 'address_fields':
525 def display_initscript_spec (self,initscript):
526 print '+ ======== initscript',initscript['initscript_fields']['name']
528 def display_key_spec (self,key):
529 print '+ ======== key',key['key_name']
531 def display_slice_spec (self,slice):
532 print '+ ======== slice',slice['slice_fields']['name']
533 for (k,v) in slice.iteritems():
546 elif k=='slice_fields':
547 print '+ fields',':',
548 print 'max_nodes=',v['max_nodes'],
553 def display_node_spec (self,node):
554 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
555 print "hostname=",node['node_fields']['hostname'],
556 print "ip=",node['interface_fields']['ip']
557 if self.options.verbose:
558 utils.pprint("node details",node,depth=3)
560 # another entry point for just showing the boxes involved
561 def display_mapping (self):
562 TestPlc.display_mapping_plc(self.plc_spec)
566 def display_mapping_plc (plc_spec):
567 print '+ MyPLC',plc_spec['name']
568 # WARNING this would not be right for lxc-based PLC's - should be harmless though
569 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
570 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
571 for site_spec in plc_spec['sites']:
572 for node_spec in site_spec['nodes']:
573 TestPlc.display_mapping_node(node_spec)
576 def display_mapping_node (node_spec):
577 print '+ NODE %s'%(node_spec['name'])
578 print '+\tqemu box %s'%node_spec['host_box']
579 print '+\thostname=%s'%node_spec['node_fields']['hostname']
581 # write a timestamp in /vservers/<>.timestamp
582 # cannot be inside the vserver, that causes vserver .. build to cough
583 def plcvm_timestamp (self):
584 "Create a timestamp to remember creation date for this plc"
586 # TODO-lxc check this one
587 # a first approx. is to store the timestamp close to the VM root like vs does
588 stamp_path=self.vm_timestamp_path ()
589 stamp_dir = os.path.dirname (stamp_path)
590 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
591 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
593 # this is called inconditionnally at the beginning of the test sequence
594 # just in case this is a rerun, so if the vm is not running it's fine
595 def plcvm_delete(self):
596 "vserver delete the test myplc"
597 stamp_path=self.vm_timestamp_path()
598 self.run_in_host("rm -f %s"%stamp_path)
599 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
600 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
601 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
605 # historically the build was being fetched by the tests
606 # now the build pushes itself as a subdir of the tests workdir
607 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
608 def plcvm_create (self):
609 "vserver creation (no install done)"
610 # push the local build/ dir to the testplc box
612 # a full path for the local calls
613 build_dir=os.path.dirname(sys.argv[0])
614 # sometimes this is empty - set to "." in such a case
615 if not build_dir: build_dir="."
616 build_dir += "/build"
618 # use a standard name - will be relative to remote buildname
620 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
621 self.test_ssh.rmdir(build_dir)
622 self.test_ssh.copy(build_dir,recursive=True)
623 # the repo url is taken from arch-rpms-url
624 # with the last step (i386) removed
625 repo_url = self.options.arch_rpms_url
626 for level in [ 'arch' ]:
627 repo_url = os.path.dirname(repo_url)
629 # invoke initvm (drop support for vs)
630 script="lbuild-initvm.sh"
632 # pass the vbuild-nightly options to [lv]test-initvm
633 script_options += " -p %s"%self.options.personality
634 script_options += " -d %s"%self.options.pldistro
635 script_options += " -f %s"%self.options.fcdistro
636 script_options += " -r %s"%repo_url
637 vserver_name = self.vservername
639 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
640 script_options += " -n %s"%vserver_hostname
642 print "Cannot reverse lookup %s"%self.vserverip
643 print "This is considered fatal, as this might pollute the test results"
645 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
646 return self.run_in_host(create_vserver) == 0
649 def plc_install(self):
650 "yum install myplc, noderepo, and the plain bootstrapfs"
652 # workaround for getting pgsql8.2 on centos5
653 if self.options.fcdistro == "centos5":
654 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
657 if self.options.personality == "linux32":
659 elif self.options.personality == "linux64":
662 raise Exception, "Unsupported personality %r"%self.options.personality
663 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
666 pkgs_list.append ("slicerepo-%s"%nodefamily)
667 pkgs_list.append ("myplc")
668 pkgs_list.append ("noderepo-%s"%nodefamily)
669 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
670 pkgs_string=" ".join(pkgs_list)
671 return self.yum_install (pkgs_list)
674 def mod_python(self):
675 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
676 return self.yum_install ( [ 'mod_python' ] )
679 def plc_configure(self):
681 tmpname='%s.plc-config-tty'%(self.name())
682 fileconf=open(tmpname,'w')
683 for var in [ 'PLC_NAME',
688 'PLC_MAIL_SUPPORT_ADDRESS',
691 # Above line was added for integrating SFA Testing
697 'PLC_RESERVATION_GRANULARITY',
699 'PLC_OMF_XMPP_SERVER',
702 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
703 fileconf.write('w\n')
704 fileconf.write('q\n')
706 utils.system('cat %s'%tmpname)
707 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
708 utils.system('rm %s'%tmpname)
711 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
712 # however using a vplc guest under f20 requires this trick
713 # the symptom is this: service plc start
714 # Starting plc (via systemctl): Failed to get D-Bus connection: \
715 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
716 # weird thing is the doc says f14 uses upstart by default and not systemd
717 # so this sounds kind of harmless
718 def start_service (self,service): return self.start_stop_service (service,'start')
719 def stop_service (self,service): return self.start_stop_service (service,'stop')
721 def start_stop_service (self, service,start_or_stop):
722 "utility to start/stop a service with the special trick for f14"
723 if self.options.fcdistro != 'f14':
724 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
726 # patch /sbin/service so it does not reset environment
727 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
728 # this is because our own scripts in turn call service
729 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
733 return self.start_service ('plc')
737 return self.stop_service ('plc')
739 def plcvm_start (self):
740 "start the PLC vserver"
744 def plcvm_stop (self):
745 "stop the PLC vserver"
749 # stores the keys from the config for further use
750 def keys_store(self):
751 "stores test users ssh keys in keys/"
752 for key_spec in self.plc_spec['keys']:
753 TestKey(self,key_spec).store_key()
756 def keys_clean(self):
757 "removes keys cached in keys/"
758 utils.system("rm -rf ./keys")
761 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
762 # for later direct access to the nodes
763 def keys_fetch(self):
764 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
766 if not os.path.isdir(dir):
768 vservername=self.vservername
769 vm_root=self.vm_root_in_host()
771 prefix = 'debug_ssh_key'
772 for ext in [ 'pub', 'rsa' ] :
773 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
774 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
775 if self.test_ssh.fetch(src,dst) != 0: overall=False
779 "create sites with PLCAPI"
780 return self.do_sites()
782 def delete_sites (self):
783 "delete sites with PLCAPI"
784 return self.do_sites(action="delete")
786 def do_sites (self,action="add"):
787 for site_spec in self.plc_spec['sites']:
788 test_site = TestSite (self,site_spec)
789 if (action != "add"):
790 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
791 test_site.delete_site()
792 # deleted with the site
793 #test_site.delete_users()
796 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
797 test_site.create_site()
798 test_site.create_users()
801 def delete_all_sites (self):
802 "Delete all sites in PLC, and related objects"
803 print 'auth_root',self.auth_root()
804 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
806 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
807 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
808 site_id=site['site_id']
809 print 'Deleting site_id',site_id
810 self.apiserver.DeleteSite(self.auth_root(),site_id)
814 "create nodes with PLCAPI"
815 return self.do_nodes()
816 def delete_nodes (self):
817 "delete nodes with PLCAPI"
818 return self.do_nodes(action="delete")
820 def do_nodes (self,action="add"):
821 for site_spec in self.plc_spec['sites']:
822 test_site = TestSite (self,site_spec)
824 utils.header("Deleting nodes in site %s"%test_site.name())
825 for node_spec in site_spec['nodes']:
826 test_node=TestNode(self,test_site,node_spec)
827 utils.header("Deleting %s"%test_node.name())
828 test_node.delete_node()
830 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
831 for node_spec in site_spec['nodes']:
832 utils.pprint('Creating node %s'%node_spec,node_spec)
833 test_node = TestNode (self,test_site,node_spec)
834 test_node.create_node ()
837 def nodegroups (self):
838 "create nodegroups with PLCAPI"
839 return self.do_nodegroups("add")
840 def delete_nodegroups (self):
841 "delete nodegroups with PLCAPI"
842 return self.do_nodegroups("delete")
846 def translate_timestamp (start,grain,timestamp):
847 if timestamp < TestPlc.YEAR: return start+timestamp*grain
848 else: return timestamp
851 def timestamp_printable (timestamp):
852 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
855 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
857 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
858 print 'API answered grain=',grain
859 start=(now/grain)*grain
861 # find out all nodes that are reservable
862 nodes=self.all_reservable_nodenames()
864 utils.header ("No reservable node found - proceeding without leases")
867 # attach them to the leases as specified in plc_specs
868 # this is where the 'leases' field gets interpreted as relative of absolute
869 for lease_spec in self.plc_spec['leases']:
870 # skip the ones that come with a null slice id
871 if not lease_spec['slice']: continue
872 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
873 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
874 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
875 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
876 if lease_addition['errors']:
877 utils.header("Cannot create leases, %s"%lease_addition['errors'])
880 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
881 (nodes,lease_spec['slice'],
882 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
883 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
887 def delete_leases (self):
888 "remove all leases in the myplc side"
889 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
890 utils.header("Cleaning leases %r"%lease_ids)
891 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
894 def list_leases (self):
895 "list all leases known to the myplc"
896 leases = self.apiserver.GetLeases(self.auth_root())
899 current=l['t_until']>=now
900 if self.options.verbose or current:
901 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
902 TestPlc.timestamp_printable(l['t_from']),
903 TestPlc.timestamp_printable(l['t_until'])))
906 # create nodegroups if needed, and populate
907 def do_nodegroups (self, action="add"):
908 # 1st pass to scan contents
910 for site_spec in self.plc_spec['sites']:
911 test_site = TestSite (self,site_spec)
912 for node_spec in site_spec['nodes']:
913 test_node=TestNode (self,test_site,node_spec)
914 if node_spec.has_key('nodegroups'):
915 nodegroupnames=node_spec['nodegroups']
916 if isinstance(nodegroupnames,StringTypes):
917 nodegroupnames = [ nodegroupnames ]
918 for nodegroupname in nodegroupnames:
919 if not groups_dict.has_key(nodegroupname):
920 groups_dict[nodegroupname]=[]
921 groups_dict[nodegroupname].append(test_node.name())
922 auth=self.auth_root()
924 for (nodegroupname,group_nodes) in groups_dict.iteritems():
926 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
927 # first, check if the nodetagtype is here
928 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
930 tag_type_id = tag_types[0]['tag_type_id']
932 tag_type_id = self.apiserver.AddTagType(auth,
933 {'tagname':nodegroupname,
934 'description': 'for nodegroup %s'%nodegroupname,
936 print 'located tag (type)',nodegroupname,'as',tag_type_id
938 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
940 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
941 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
942 # set node tag on all nodes, value='yes'
943 for nodename in group_nodes:
945 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
947 traceback.print_exc()
948 print 'node',nodename,'seems to already have tag',nodegroupname
951 expect_yes = self.apiserver.GetNodeTags(auth,
952 {'hostname':nodename,
953 'tagname':nodegroupname},
954 ['value'])[0]['value']
955 if expect_yes != "yes":
956 print 'Mismatch node tag on node',nodename,'got',expect_yes
959 if not self.options.dry_run:
960 print 'Cannot find tag',nodegroupname,'on node',nodename
964 print 'cleaning nodegroup',nodegroupname
965 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
967 traceback.print_exc()
971 # a list of TestNode objs
972 def all_nodes (self):
974 for site_spec in self.plc_spec['sites']:
975 test_site = TestSite (self,site_spec)
976 for node_spec in site_spec['nodes']:
977 nodes.append(TestNode (self,test_site,node_spec))
980 # return a list of tuples (nodename,qemuname)
981 def all_node_infos (self) :
983 for site_spec in self.plc_spec['sites']:
984 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
985 for node_spec in site_spec['nodes'] ]
988 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
989 def all_reservable_nodenames (self):
991 for site_spec in self.plc_spec['sites']:
992 for node_spec in site_spec['nodes']:
993 node_fields=node_spec['node_fields']
994 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
995 res.append(node_fields['hostname'])
998 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
999 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1000 if self.options.dry_run:
1004 class CompleterTaskBootState (CompleterTask):
1005 def __init__ (self, test_plc,hostname):
1006 self.test_plc=test_plc
1007 self.hostname=hostname
1008 self.last_boot_state='undef'
1009 def actual_run (self):
1011 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1013 self.last_boot_state = node['boot_state']
1014 return self.last_boot_state == target_boot_state
1018 return "CompleterTaskBootState with node %s"%self.hostname
1019 def failure_message (self):
1020 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1022 timeout = timedelta(minutes=timeout_minutes)
1023 graceout = timedelta(minutes=silent_minutes)
1024 period = timedelta(seconds=period_seconds)
1025 # the nodes that haven't checked yet - start with a full list and shrink over time
1026 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1027 tasks = [ CompleterTaskBootState (self,hostname) \
1028 for (hostname,_) in self.all_node_infos() ]
1029 return Completer (tasks).run (timeout, graceout, period)
1031 def nodes_booted(self):
1032 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1035 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1036 class CompleterTaskPingNode (CompleterTask):
1037 def __init__ (self, hostname):
1038 self.hostname=hostname
1039 def run(self,silent):
1040 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1041 return utils.system (command, silent=silent)==0
1042 def failure_message (self):
1043 return "Cannot ping node with name %s"%self.hostname
1044 timeout=timedelta (seconds=timeout_seconds)
1046 period=timedelta (seconds=period_seconds)
1047 node_infos = self.all_node_infos()
1048 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1049 return Completer (tasks).run (timeout, graceout, period)
1051 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1052 def ping_node (self):
1054 return self.check_nodes_ping ()
1056 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1057 class CompleterTaskNodeSsh (CompleterTask):
1058 def __init__ (self, hostname, qemuname, boot_state, local_key):
1059 self.hostname=hostname
1060 self.qemuname=qemuname
1061 self.boot_state=boot_state
1062 self.local_key=local_key
1063 def run (self, silent):
1064 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1065 return utils.system (command, silent=silent)==0
1066 def failure_message (self):
1067 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1070 timeout = timedelta(minutes=timeout_minutes)
1071 graceout = timedelta(minutes=silent_minutes)
1072 period = timedelta(seconds=period_seconds)
1073 vservername=self.vservername
1076 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1079 local_key = "keys/key_admin.rsa"
1080 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1081 node_infos = self.all_node_infos()
1082 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1083 for (nodename,qemuname) in node_infos ]
1084 return Completer (tasks).run (timeout, graceout, period)
1086 def ssh_node_debug(self):
1087 "Tries to ssh into nodes in debug mode with the debug ssh key"
1088 return self.check_nodes_ssh(debug=True,
1089 timeout_minutes=self.ssh_node_debug_timeout,
1090 silent_minutes=self.ssh_node_debug_silent)
1092 def ssh_node_boot(self):
1093 "Tries to ssh into nodes in production mode with the root ssh key"
1094 return self.check_nodes_ssh(debug=False,
1095 timeout_minutes=self.ssh_node_boot_timeout,
1096 silent_minutes=self.ssh_node_boot_silent)
1098 def node_bmlogs(self):
1099 "Checks that there's a non-empty dir. /var/log/bm/raw"
1100 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1103 def qemu_local_init (self): pass
1105 def bootcd (self): pass
1107 def qemu_local_config (self): pass
1109 def nodestate_reinstall (self): pass
1111 def nodestate_safeboot (self): pass
1113 def nodestate_boot (self): pass
1115 def nodestate_show (self): pass
1117 def qemu_export (self): pass
1119 ### check hooks : invoke scripts from hooks/{node,slice}
1120 def check_hooks_node (self):
1121 return self.locate_first_node().check_hooks()
1122 def check_hooks_sliver (self) :
1123 return self.locate_first_sliver().check_hooks()
1125 def check_hooks (self):
1126 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1127 return self.check_hooks_node() and self.check_hooks_sliver()
1130 def do_check_initscripts(self):
1131 class CompleterTaskInitscript (CompleterTask):
1132 def __init__ (self, test_sliver, stamp):
1133 self.test_sliver=test_sliver
1135 def actual_run (self):
1136 return self.test_sliver.check_initscript_stamp (self.stamp)
1138 return "initscript checker for %s"%self.test_sliver.name()
1139 def failure_message (self):
1140 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1143 for slice_spec in self.plc_spec['slices']:
1144 if not slice_spec.has_key('initscriptstamp'):
1146 stamp=slice_spec['initscriptstamp']
1147 slicename=slice_spec['slice_fields']['name']
1148 for nodename in slice_spec['nodenames']:
1149 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1150 (site,node) = self.locate_node (nodename)
1151 # xxx - passing the wrong site - probably harmless
1152 test_site = TestSite (self,site)
1153 test_slice = TestSlice (self,test_site,slice_spec)
1154 test_node = TestNode (self,test_site,node)
1155 test_sliver = TestSliver (self, test_node, test_slice)
1156 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1157 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1159 def check_initscripts(self):
1160 "check that the initscripts have triggered"
1161 return self.do_check_initscripts()
1163 def initscripts (self):
1164 "create initscripts with PLCAPI"
1165 for initscript in self.plc_spec['initscripts']:
1166 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1167 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1170 def delete_initscripts (self):
1171 "delete initscripts with PLCAPI"
1172 for initscript in self.plc_spec['initscripts']:
1173 initscript_name = initscript['initscript_fields']['name']
1174 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1176 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1177 print initscript_name,'deleted'
1179 print 'deletion went wrong - probably did not exist'
1184 "create slices with PLCAPI"
1185 return self.do_slices(action="add")
1187 def delete_slices (self):
1188 "delete slices with PLCAPI"
1189 return self.do_slices(action="delete")
1191 def fill_slices (self):
1192 "add nodes in slices with PLCAPI"
1193 return self.do_slices(action="fill")
1195 def empty_slices (self):
1196 "remove nodes from slices with PLCAPI"
1197 return self.do_slices(action="empty")
1199 def do_slices (self, action="add"):
1200 for slice in self.plc_spec['slices']:
1201 site_spec = self.locate_site (slice['sitename'])
1202 test_site = TestSite(self,site_spec)
1203 test_slice=TestSlice(self,test_site,slice)
1204 if action == "delete":
1205 test_slice.delete_slice()
1206 elif action=="fill":
1207 test_slice.add_nodes()
1208 elif action=="empty":
1209 test_slice.delete_nodes()
1211 test_slice.create_slice()
1214 @slice_mapper__tasks(20,10,15)
1215 def ssh_slice(self): pass
1216 @slice_mapper__tasks(20,19,15)
1217 def ssh_slice_off (self): pass
1219 # use another name so we can exclude/ignore it from the tests on the nightly command line
1220 def ssh_slice_again(self): return self.ssh_slice()
1221 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1222 # but for some reason the ignore-wrapping thing would not
1225 def ssh_slice_basics(self): pass
1228 def check_vsys_defaults(self): pass
1231 def keys_clear_known_hosts (self): pass
1233 def plcapi_urls (self):
1234 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1236 def speed_up_slices (self):
1237 "tweak nodemanager settings on all nodes using a conf file"
1238 # create the template on the server-side
1239 template="%s.nodemanager"%self.name()
1240 template_file = open (template,"w")
1241 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1242 template_file.close()
1243 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1244 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1245 self.test_ssh.copy_abs(template,remote)
1247 self.apiserver.AddConfFile (self.auth_root(),
1248 {'dest':'/etc/sysconfig/nodemanager',
1249 'source':'PlanetLabConf/nodemanager',
1250 'postinstall_cmd':'service nm restart',})
1253 def debug_nodemanager (self):
1254 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1255 template="%s.nodemanager"%self.name()
1256 template_file = open (template,"w")
1257 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1258 template_file.close()
1259 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1260 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1261 self.test_ssh.copy_abs(template,remote)
1265 def qemu_start (self) : pass
1268 def qemu_timestamp (self) : pass
1270 # when a spec refers to a node possibly on another plc
1271 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1272 for plc in [ self ] + other_plcs:
1274 return plc.locate_sliver_obj (nodename, slicename)
1277 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1279 # implement this one as a cross step so that we can take advantage of different nodes
1280 # in multi-plcs mode
1281 def cross_check_tcp (self, other_plcs):
1282 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1283 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1284 utils.header ("check_tcp: no/empty config found")
1286 specs = self.plc_spec['tcp_specs']
1291 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1292 if not s_test_sliver.run_tcp_server(port,timeout=20):
1296 # idem for the client side
1297 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1298 # use nodename from locatesd sliver, unless 'client_connect' is set
1299 if 'client_connect' in spec:
1300 destination = spec['client_connect']
1302 destination=s_test_sliver.test_node.name()
1303 if not c_test_sliver.run_tcp_client(destination,port):
1307 # painfully enough, we need to allow for some time as netflow might show up last
1308 def check_system_slice (self):
1309 "all nodes: check that a system slice is alive"
1310 # netflow currently not working in the lxc distro
1311 # drl not built at all in the wtx distro
1312 # if we find either of them we're happy
1313 return self.check_netflow() or self.check_drl()
1316 def check_netflow (self): return self._check_system_slice ('netflow')
1317 def check_drl (self): return self._check_system_slice ('drl')
1319 # we have the slices up already here, so it should not take too long
1320 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1321 class CompleterTaskSystemSlice (CompleterTask):
1322 def __init__ (self, test_node, dry_run):
1323 self.test_node=test_node
1324 self.dry_run=dry_run
1325 def actual_run (self):
1326 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1328 return "System slice %s @ %s"%(slicename, self.test_node.name())
1329 def failure_message (self):
1330 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1331 timeout = timedelta(minutes=timeout_minutes)
1332 silent = timedelta (0)
1333 period = timedelta (seconds=period_seconds)
1334 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1335 for test_node in self.all_nodes() ]
1336 return Completer (tasks) . run (timeout, silent, period)
1338 def plcsh_stress_test (self):
1339 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1340 # install the stress-test in the plc image
1341 location = "/usr/share/plc_api/plcsh_stress_test.py"
1342 remote="%s/%s"%(self.vm_root_in_host(),location)
1343 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1345 command += " -- --check"
1346 if self.options.size == 1:
1347 command += " --tiny"
1348 return ( self.run_in_guest(command) == 0)
1350 # populate runs the same utility without slightly different options
1351 # in particular runs with --preserve (dont cleanup) and without --check
1352 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1354 def sfa_install_all (self):
1355 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1356 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1358 def sfa_install_core(self):
1360 return self.yum_install ("sfa")
1362 def sfa_install_plc(self):
1363 "yum install sfa-plc"
1364 return self.yum_install("sfa-plc")
1366 def sfa_install_sfatables(self):
1367 "yum install sfa-sfatables"
1368 return self.yum_install ("sfa-sfatables")
1370 # for some very odd reason, this sometimes fails with the following symptom
1371 # # yum install sfa-client
1372 # Setting up Install Process
1374 # Downloading Packages:
1375 # Running rpm_check_debug
1376 # Running Transaction Test
1377 # Transaction Test Succeeded
1378 # Running Transaction
1379 # Transaction couldn't start:
1380 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1381 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1382 # even though in the same context I have
1383 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1384 # Filesystem Size Used Avail Use% Mounted on
1385 # /dev/hdv1 806G 264G 501G 35% /
1386 # none 16M 36K 16M 1% /tmp
1388 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1389 def sfa_install_client(self):
1390 "yum install sfa-client"
1391 first_try=self.yum_install("sfa-client")
1392 if first_try: return True
1393 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1394 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1395 utils.header("rpm_path=<<%s>>"%rpm_path)
1397 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1398 return self.yum_check_installed ("sfa-client")
1400 def sfa_dbclean(self):
1401 "thoroughly wipes off the SFA database"
1402 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1403 self.run_in_guest("sfa-nuke.py")==0 or \
1404 self.run_in_guest("sfa-nuke-plc.py")==0
1406 def sfa_fsclean(self):
1407 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1408 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1411 def sfa_plcclean(self):
1412 "cleans the PLC entries that were created as a side effect of running the script"
1414 sfa_spec=self.plc_spec['sfa']
1416 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1417 login_base=auth_sfa_spec['login_base']
1418 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1419 except: print "Site %s already absent from PLC db"%login_base
1421 for spec_name in ['pi_spec','user_spec']:
1422 user_spec=auth_sfa_spec[spec_name]
1423 username=user_spec['email']
1424 try: self.apiserver.DeletePerson(self.auth_root(),username)
1426 # this in fact is expected as sites delete their members
1427 #print "User %s already absent from PLC db"%username
1430 print "REMEMBER TO RUN sfa_import AGAIN"
1433 def sfa_uninstall(self):
1434 "uses rpm to uninstall sfa - ignore result"
1435 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1436 self.run_in_guest("rm -rf /var/lib/sfa")
1437 self.run_in_guest("rm -rf /etc/sfa")
1438 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1440 self.run_in_guest("rpm -e --noscripts sfa-plc")
1443 ### run unit tests for SFA
1444 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1445 # Running Transaction
1446 # Transaction couldn't start:
1447 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1448 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1449 # no matter how many Gbs are available on the testplc
1450 # could not figure out what's wrong, so...
1451 # if the yum install phase fails, consider the test is successful
1452 # other combinations will eventually run it hopefully
1453 def sfa_utest(self):
1454 "yum install sfa-tests and run SFA unittests"
1455 self.run_in_guest("yum -y install sfa-tests")
1456 # failed to install - forget it
1457 if self.run_in_guest("rpm -q sfa-tests")!=0:
1458 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1460 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1464 dirname="conf.%s"%self.plc_spec['name']
1465 if not os.path.isdir(dirname):
1466 utils.system("mkdir -p %s"%dirname)
1467 if not os.path.isdir(dirname):
1468 raise Exception,"Cannot create config dir for plc %s"%self.name()
1471 def conffile(self,filename):
1472 return "%s/%s"%(self.confdir(),filename)
1473 def confsubdir(self,dirname,clean,dry_run=False):
1474 subdirname="%s/%s"%(self.confdir(),dirname)
1476 utils.system("rm -rf %s"%subdirname)
1477 if not os.path.isdir(subdirname):
1478 utils.system("mkdir -p %s"%subdirname)
1479 if not dry_run and not os.path.isdir(subdirname):
1480 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1483 def conffile_clean (self,filename):
1484 filename=self.conffile(filename)
1485 return utils.system("rm -rf %s"%filename)==0
1488 def sfa_configure(self):
1489 "run sfa-config-tty"
1490 tmpname=self.conffile("sfa-config-tty")
1491 fileconf=open(tmpname,'w')
1492 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1493 'SFA_INTERFACE_HRN',
1494 'SFA_REGISTRY_LEVEL1_AUTH',
1495 'SFA_REGISTRY_HOST',
1496 'SFA_AGGREGATE_HOST',
1506 'SFA_GENERIC_FLAVOUR',
1507 'SFA_AGGREGATE_ENABLED',
1509 if self.plc_spec['sfa'].has_key(var):
1510 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1511 # the way plc_config handles booleans just sucks..
1514 if self.plc_spec['sfa'][var]: val='true'
1515 fileconf.write ('e %s\n%s\n'%(var,val))
1516 fileconf.write('w\n')
1517 fileconf.write('R\n')
1518 fileconf.write('q\n')
1520 utils.system('cat %s'%tmpname)
1521 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1524 def aggregate_xml_line(self):
1525 port=self.plc_spec['sfa']['neighbours-port']
1526 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1527 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1529 def registry_xml_line(self):
1530 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1531 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1534 # a cross step that takes all other plcs in argument
1535 def cross_sfa_configure(self, other_plcs):
1536 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1537 # of course with a single plc, other_plcs is an empty list
1540 agg_fname=self.conffile("agg.xml")
1541 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1542 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1543 utils.header ("(Over)wrote %s"%agg_fname)
1544 reg_fname=self.conffile("reg.xml")
1545 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1546 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1547 utils.header ("(Over)wrote %s"%reg_fname)
1548 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1549 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1551 def sfa_import(self):
1552 "use sfaadmin to import from plc"
1553 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1554 return self.run_in_guest('sfaadmin reg import_registry')==0
1556 def sfa_start(self):
1558 return self.start_service('sfa')
1561 def sfi_configure(self):
1562 "Create /root/sfi on the plc side for sfi client configuration"
1563 if self.options.dry_run:
1564 utils.header("DRY RUN - skipping step")
1566 sfa_spec=self.plc_spec['sfa']
1567 # cannot use auth_sfa_mapper to pass dir_name
1568 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1569 test_slice=TestAuthSfa(self,slice_spec)
1570 dir_basename=os.path.basename(test_slice.sfi_path())
1571 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1572 test_slice.sfi_configure(dir_name)
1573 # push into the remote /root/sfi area
1574 location = test_slice.sfi_path()
1575 remote="%s/%s"%(self.vm_root_in_host(),location)
1576 self.test_ssh.mkdir(remote,abs=True)
1577 # need to strip last level or remote otherwise we get an extra dir level
1578 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1582 def sfi_clean (self):
1583 "clean up /root/sfi on the plc side"
1584 self.run_in_guest("rm -rf /root/sfi")
1588 def sfa_add_site (self): pass
1590 def sfa_add_pi (self): pass
1592 def sfa_add_user(self): pass
1594 def sfa_update_user(self): pass
1596 def sfa_add_slice(self): pass
1598 def sfa_renew_slice(self): pass
1600 def sfa_discover(self): pass
1602 def sfa_create_slice(self): pass
1604 def sfa_check_slice_plc(self): pass
1606 def sfa_update_slice(self): pass
1608 def sfi_list(self): pass
1610 def sfi_show(self): pass
1612 def ssh_slice_sfa(self): pass
1614 def sfa_delete_user(self): pass
1616 def sfa_delete_slice(self): pass
1620 return self.stop_service ('sfa')
1622 def populate (self):
1623 "creates random entries in the PLCAPI"
1624 # install the stress-test in the plc image
1625 location = "/usr/share/plc_api/plcsh_stress_test.py"
1626 remote="%s/%s"%(self.vm_root_in_host(),location)
1627 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1629 command += " -- --preserve --short-names"
1630 local = (self.run_in_guest(command) == 0);
1631 # second run with --foreign
1632 command += ' --foreign'
1633 remote = (self.run_in_guest(command) == 0);
1634 return ( local and remote)
1636 def gather_logs (self):
1637 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1638 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1639 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1640 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1641 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1642 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1643 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1645 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1646 self.gather_var_logs ()
1648 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1649 self.gather_pgsql_logs ()
1651 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1652 self.gather_root_sfi ()
1654 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1655 for site_spec in self.plc_spec['sites']:
1656 test_site = TestSite (self,site_spec)
1657 for node_spec in site_spec['nodes']:
1658 test_node=TestNode(self,test_site,node_spec)
1659 test_node.gather_qemu_logs()
1661 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1662 self.gather_nodes_var_logs()
1664 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1665 self.gather_slivers_var_logs()
1668 def gather_slivers_var_logs(self):
1669 for test_sliver in self.all_sliver_objs():
1670 remote = test_sliver.tar_var_logs()
1671 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1672 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1673 utils.system(command)
1676 def gather_var_logs (self):
1677 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1678 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1679 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1680 utils.system(command)
1681 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1682 utils.system(command)
1684 def gather_pgsql_logs (self):
1685 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1686 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1687 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1688 utils.system(command)
1690 def gather_root_sfi (self):
1691 utils.system("mkdir -p logs/sfi.%s"%self.name())
1692 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1693 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1694 utils.system(command)
1696 def gather_nodes_var_logs (self):
1697 for site_spec in self.plc_spec['sites']:
1698 test_site = TestSite (self,site_spec)
1699 for node_spec in site_spec['nodes']:
1700 test_node=TestNode(self,test_site,node_spec)
1701 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1702 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1703 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1704 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1705 utils.system(command)
1708 # returns the filename to use for sql dump/restore, using options.dbname if set
1709 def dbfile (self, database):
1710 # uses options.dbname if it is found
1712 name=self.options.dbname
1713 if not isinstance(name,StringTypes):
1719 return "/root/%s-%s.sql"%(database,name)
1721 def plc_db_dump(self):
1722 'dump the planetlab5 DB in /root in the PLC - filename has time'
1723 dump=self.dbfile("planetab5")
1724 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1725 utils.header('Dumped planetlab5 database in %s'%dump)
1728 def plc_db_restore(self):
1729 'restore the planetlab5 DB - looks broken, but run -n might help'
1730 dump=self.dbfile("planetab5")
1731 ##stop httpd service
1732 self.run_in_guest('service httpd stop')
1733 # xxx - need another wrapper
1734 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1735 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1736 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1737 ##starting httpd service
1738 self.run_in_guest('service httpd start')
1740 utils.header('Database restored from ' + dump)
1743 def create_ignore_steps ():
1744 for step in TestPlc.default_steps + TestPlc.other_steps:
1745 # default step can have a plc qualifier
1746 if '@' in step: (step,qualifier)=step.split('@')
1747 # or be defined as forced or ignored by default
1748 for keyword in ['_ignore','_force']:
1749 if step.endswith (keyword): step=step.replace(keyword,'')
1750 if step == SEP or step == SEPSFA : continue
1751 method=getattr(TestPlc,step)
1753 wrapped=ignore_result(method)
1754 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1755 setattr(TestPlc, name, wrapped)
1758 # def ssh_slice_again_ignore (self): pass
1760 # def check_initscripts_ignore (self): pass
1762 def standby_1_through_20(self):
1763 """convenience function to wait for a specified number of minutes"""
1766 def standby_1(): pass
1768 def standby_2(): pass
1770 def standby_3(): pass
1772 def standby_4(): pass
1774 def standby_5(): pass
1776 def standby_6(): pass
1778 def standby_7(): pass
1780 def standby_8(): pass
1782 def standby_9(): pass
1784 def standby_10(): pass
1786 def standby_11(): pass
1788 def standby_12(): pass
1790 def standby_13(): pass
1792 def standby_14(): pass
1794 def standby_15(): pass
1796 def standby_16(): pass
1798 def standby_17(): pass
1800 def standby_18(): pass
1802 def standby_19(): pass
1804 def standby_20(): pass
1806 # convenience for debugging the test logic
1807 def yes (self): return True
1808 def no (self): return False