1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'show', 'timestamp_plc', 'timestamp_qemu', SEP,
90 'vs_delete','vs_create','plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', 'qemu_export', 'qemu_kill_all', 'qemu_start', SEP,
94 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
95 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
96 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
97 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
101 'ssh_node_boot', 'ssh_slice', 'check_initscripts', SEP,
102 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
104 'force_gather_logs', 'force_local_post', SEP,
109 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
110 'plc_stop', 'vs_start', 'vs_stop', SEP,
111 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
112 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
113 'delete_leases', 'list_leases', SEP,
115 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
116 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_mine', SEP,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1 through 20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platformb
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
148 self.vserverip=plc_spec['vserverip']
149 self.vservername=plc_spec['vservername']
150 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 raise Exception,'chroot-based myplc testing is deprecated'
154 self.apiserver=TestApiserver(self.url,options.dry_run)
157 name=self.plc_spec['name']
158 return "%s.%s"%(name,self.vservername)
161 return self.plc_spec['hostname']
164 return self.test_ssh.is_local()
166 # define the API methods on this object through xmlrpc
167 # would help, but not strictly necessary
171 def actual_command_in_guest (self,command):
172 return self.test_ssh.actual_command(self.host_to_guest(command))
174 def start_guest (self):
175 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
177 def stop_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
180 def run_in_guest (self,command):
181 return utils.system(self.actual_command_in_guest(command))
183 def run_in_host (self,command):
184 return self.test_ssh.run_in_buildname(command)
186 #command gets run in the vserver
187 def host_to_guest(self,command):
188 return "vserver %s exec %s"%(self.vservername,command)
190 #start/stop the vserver
191 def start_guest_in_host(self):
192 return "vserver %s start"%(self.vservername)
194 def stop_guest_in_host(self):
195 return "vserver %s stop"%(self.vservername)
198 def run_in_guest_piped (self,local,remote):
199 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
201 def auth_root (self):
202 return {'Username':self.plc_spec['PLC_ROOT_USER'],
203 'AuthMethod':'password',
204 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
205 'Role' : self.plc_spec['role']
207 def locate_site (self,sitename):
208 for site in self.plc_spec['sites']:
209 if site['site_fields']['name'] == sitename:
211 if site['site_fields']['login_base'] == sitename:
213 raise Exception,"Cannot locate site %s"%sitename
215 def locate_node (self,nodename):
216 for site in self.plc_spec['sites']:
217 for node in site['nodes']:
218 if node['name'] == nodename:
220 raise Exception,"Cannot locate node %s"%nodename
222 def locate_hostname (self,hostname):
223 for site in self.plc_spec['sites']:
224 for node in site['nodes']:
225 if node['node_fields']['hostname'] == hostname:
227 raise Exception,"Cannot locate hostname %s"%hostname
229 def locate_key (self,keyname):
230 for key in self.plc_spec['keys']:
231 if key['name'] == keyname:
233 raise Exception,"Cannot locate key %s"%keyname
235 def locate_slice (self, slicename):
236 for slice in self.plc_spec['slices']:
237 if slice['slice_fields']['name'] == slicename:
239 raise Exception,"Cannot locate slice %s"%slicename
241 def all_sliver_objs (self):
243 for slice_spec in self.plc_spec['slices']:
244 slicename = slice_spec['slice_fields']['name']
245 for nodename in slice_spec['nodenames']:
246 result.append(self.locate_sliver_obj (nodename,slicename))
249 def locate_sliver_obj (self,nodename,slicename):
250 (site,node) = self.locate_node(nodename)
251 slice = self.locate_slice (slicename)
253 test_site = TestSite (self, site)
254 test_node = TestNode (self, test_site,node)
255 # xxx the slice site is assumed to be the node site - mhh - probably harmless
256 test_slice = TestSlice (self, test_site, slice)
257 return TestSliver (self, test_node, test_slice)
259 def locate_first_node(self):
260 nodename=self.plc_spec['slices'][0]['nodenames'][0]
261 (site,node) = self.locate_node(nodename)
262 test_site = TestSite (self, site)
263 test_node = TestNode (self, test_site,node)
266 def locate_first_sliver (self):
267 slice_spec=self.plc_spec['slices'][0]
268 slicename=slice_spec['slice_fields']['name']
269 nodename=slice_spec['nodenames'][0]
270 return self.locate_sliver_obj(nodename,slicename)
272 # all different hostboxes used in this plc
273 def gather_hostBoxes(self):
274 # maps on sites and nodes, return [ (host_box,test_node) ]
276 for site_spec in self.plc_spec['sites']:
277 test_site = TestSite (self,site_spec)
278 for node_spec in site_spec['nodes']:
279 test_node = TestNode (self, test_site, node_spec)
280 if not test_node.is_real():
281 tuples.append( (test_node.host_box(),test_node) )
282 # transform into a dict { 'host_box' -> [ test_node .. ] }
284 for (box,node) in tuples:
285 if not result.has_key(box):
288 result[box].append(node)
291 # a step for checking this stuff
292 def show_boxes (self):
293 'print summary of nodes location'
294 for (box,nodes) in self.gather_hostBoxes().iteritems():
295 print box,":"," + ".join( [ node.name() for node in nodes ] )
298 # make this a valid step
299 def qemu_kill_all(self):
300 'kill all qemu instances on the qemu boxes involved by this setup'
301 # this is the brute force version, kill all qemus on that host box
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # pass the first nodename, as we don't push template-qemu on testboxes
304 nodedir=nodes[0].nodedir()
305 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
308 # make this a valid step
309 def qemu_list_all(self):
310 'list all qemu instances on the qemu boxes involved by this setup'
311 for (box,nodes) in self.gather_hostBoxes().iteritems():
312 # this is the brute force version, kill all qemus on that host box
313 TestBoxQemu(box,self.options.buildname).qemu_list_all()
316 # kill only the right qemus
317 def qemu_list_mine(self):
318 'list qemu instances for our nodes'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # the fine-grain version
325 # kill only the right qemus
326 def qemu_kill_mine(self):
327 'kill the qemu instances for our nodes'
328 for (box,nodes) in self.gather_hostBoxes().iteritems():
329 # the fine-grain version
334 #################### display config
336 "show test configuration after localization"
337 self.display_pass (1)
338 self.display_pass (2)
342 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
343 def display_pass (self,passno):
344 for (key,val) in self.plc_spec.iteritems():
345 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
349 self.display_site_spec(site)
350 for node in site['nodes']:
351 self.display_node_spec(node)
352 elif key=='initscripts':
353 for initscript in val:
354 self.display_initscript_spec (initscript)
357 self.display_slice_spec (slice)
360 self.display_key_spec (key)
362 if key not in ['sites','initscripts','slices','keys', 'sfa']:
363 print '+ ',key,':',val
365 def display_site_spec (self,site):
366 print '+ ======== site',site['site_fields']['name']
367 for (k,v) in site.iteritems():
368 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
371 print '+ ','nodes : ',
373 print node['node_fields']['hostname'],'',
379 print user['name'],'',
381 elif k == 'site_fields':
382 print '+ login_base',':',v['login_base']
383 elif k == 'address_fields':
389 def display_initscript_spec (self,initscript):
390 print '+ ======== initscript',initscript['initscript_fields']['name']
392 def display_key_spec (self,key):
393 print '+ ======== key',key['name']
395 def display_slice_spec (self,slice):
396 print '+ ======== slice',slice['slice_fields']['name']
397 for (k,v) in slice.iteritems():
410 elif k=='slice_fields':
411 print '+ fields',':',
412 print 'max_nodes=',v['max_nodes'],
417 def display_node_spec (self,node):
418 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
419 print "hostname=",node['node_fields']['hostname'],
420 print "ip=",node['interface_fields']['ip']
421 if self.options.verbose:
422 utils.pprint("node details",node,depth=3)
424 # another entry point for just showing the boxes involved
425 def display_mapping (self):
426 TestPlc.display_mapping_plc(self.plc_spec)
430 def display_mapping_plc (plc_spec):
431 print '+ MyPLC',plc_spec['name']
432 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
433 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
434 for site_spec in plc_spec['sites']:
435 for node_spec in site_spec['nodes']:
436 TestPlc.display_mapping_node(node_spec)
439 def display_mapping_node (node_spec):
440 print '+ NODE %s'%(node_spec['name'])
441 print '+\tqemu box %s'%node_spec['host_box']
442 print '+\thostname=%s'%node_spec['node_fields']['hostname']
444 # write a timestamp in /vservers/<>/
445 def timestamp_plc (self):
447 utils.system(self.test_ssh.actual_command("mkdir -p /vservers/%s"%self.vservername))
448 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s/timestamp"%(now,self.vservername)))==0
450 def local_pre (self):
451 "run site-dependant pre-test script as defined in LocalTestResources"
452 from LocalTestResources import local_resources
453 return local_resources.step_pre(self)
455 def local_post (self):
456 "run site-dependant post-test script as defined in LocalTestResources"
457 from LocalTestResources import local_resources
458 return local_resources.step_post(self)
460 def local_list (self):
461 "run site-dependant list script as defined in LocalTestResources"
462 from LocalTestResources import local_resources
463 return local_resources.step_list(self)
465 def local_rel (self):
466 "run site-dependant release script as defined in LocalTestResources"
467 from LocalTestResources import local_resources
468 return local_resources.step_release(self)
470 def local_rel_plc (self):
471 "run site-dependant release script as defined in LocalTestResources"
472 from LocalTestResources import local_resources
473 return local_resources.step_release_plc(self)
475 def local_rel_qemu (self):
476 "run site-dependant release script as defined in LocalTestResources"
477 from LocalTestResources import local_resources
478 return local_resources.step_release_qemu(self)
481 "vserver delete the test myplc"
482 self.run_in_host("vserver --silent %s delete"%self.vservername)
486 # historically the build was being fetched by the tests
487 # now the build pushes itself as a subdir of the tests workdir
488 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
489 def vs_create (self):
490 "vserver creation (no install done)"
491 # push the local build/ dir to the testplc box
493 # a full path for the local calls
494 build_dir=os.path.dirname(sys.argv[0])
495 # sometimes this is empty - set to "." in such a case
496 if not build_dir: build_dir="."
497 build_dir += "/build"
499 # use a standard name - will be relative to remote buildname
501 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
502 self.test_ssh.rmdir(build_dir)
503 self.test_ssh.copy(build_dir,recursive=True)
504 # the repo url is taken from arch-rpms-url
505 # with the last step (i386) removed
506 repo_url = self.options.arch_rpms_url
507 for level in [ 'arch' ]:
508 repo_url = os.path.dirname(repo_url)
509 # pass the vbuild-nightly options to vtest-init-vserver
511 test_env_options += " -p %s"%self.options.personality
512 test_env_options += " -d %s"%self.options.pldistro
513 test_env_options += " -f %s"%self.options.fcdistro
514 script="vtest-init-vserver.sh"
515 vserver_name = self.vservername
516 vserver_options="--netdev eth0 --interface %s"%self.vserverip
518 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
519 vserver_options += " --hostname %s"%vserver_hostname
521 print "Cannot reverse lookup %s"%self.vserverip
522 print "This is considered fatal, as this might pollute the test results"
524 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
525 return self.run_in_host(create_vserver) == 0
528 def plc_install(self):
529 "yum install myplc, noderepo, and the plain bootstrapfs"
531 # workaround for getting pgsql8.2 on centos5
532 if self.options.fcdistro == "centos5":
533 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
536 if self.options.personality == "linux32":
538 elif self.options.personality == "linux64":
541 raise Exception, "Unsupported personality %r"%self.options.personality
542 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
545 pkgs_list.append ("slicerepo-%s"%nodefamily)
546 pkgs_list.append ("myplc")
547 pkgs_list.append ("noderepo-%s"%nodefamily)
548 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
549 pkgs_string=" ".join(pkgs_list)
550 self.run_in_guest("yum -y install %s"%pkgs_string)
551 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
554 def plc_configure(self):
556 tmpname='%s.plc-config-tty'%(self.name())
557 fileconf=open(tmpname,'w')
558 for var in [ 'PLC_NAME',
563 'PLC_MAIL_SUPPORT_ADDRESS',
566 # Above line was added for integrating SFA Testing
572 'PLC_RESERVATION_GRANULARITY',
574 'PLC_OMF_XMPP_SERVER',
576 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
577 fileconf.write('w\n')
578 fileconf.write('q\n')
580 utils.system('cat %s'%tmpname)
581 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
582 utils.system('rm %s'%tmpname)
587 self.run_in_guest('service plc start')
592 self.run_in_guest('service plc stop')
596 "start the PLC vserver"
601 "stop the PLC vserver"
605 # stores the keys from the config for further use
606 def keys_store(self):
607 "stores test users ssh keys in keys/"
608 for key_spec in self.plc_spec['keys']:
609 TestKey(self,key_spec).store_key()
612 def keys_clean(self):
613 "removes keys cached in keys/"
614 utils.system("rm -rf ./keys")
617 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
618 # for later direct access to the nodes
619 def keys_fetch(self):
620 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
622 if not os.path.isdir(dir):
624 vservername=self.vservername
626 prefix = 'debug_ssh_key'
627 for ext in [ 'pub', 'rsa' ] :
628 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
629 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
630 if self.test_ssh.fetch(src,dst) != 0: overall=False
634 "create sites with PLCAPI"
635 return self.do_sites()
637 def delete_sites (self):
638 "delete sites with PLCAPI"
639 return self.do_sites(action="delete")
641 def do_sites (self,action="add"):
642 for site_spec in self.plc_spec['sites']:
643 test_site = TestSite (self,site_spec)
644 if (action != "add"):
645 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
646 test_site.delete_site()
647 # deleted with the site
648 #test_site.delete_users()
651 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
652 test_site.create_site()
653 test_site.create_users()
656 def delete_all_sites (self):
657 "Delete all sites in PLC, and related objects"
658 print 'auth_root',self.auth_root()
659 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
660 for site_id in site_ids:
661 print 'Deleting site_id',site_id
662 self.apiserver.DeleteSite(self.auth_root(),site_id)
666 "create nodes with PLCAPI"
667 return self.do_nodes()
668 def delete_nodes (self):
669 "delete nodes with PLCAPI"
670 return self.do_nodes(action="delete")
672 def do_nodes (self,action="add"):
673 for site_spec in self.plc_spec['sites']:
674 test_site = TestSite (self,site_spec)
676 utils.header("Deleting nodes in site %s"%test_site.name())
677 for node_spec in site_spec['nodes']:
678 test_node=TestNode(self,test_site,node_spec)
679 utils.header("Deleting %s"%test_node.name())
680 test_node.delete_node()
682 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
683 for node_spec in site_spec['nodes']:
684 utils.pprint('Creating node %s'%node_spec,node_spec)
685 test_node = TestNode (self,test_site,node_spec)
686 test_node.create_node ()
689 def nodegroups (self):
690 "create nodegroups with PLCAPI"
691 return self.do_nodegroups("add")
692 def delete_nodegroups (self):
693 "delete nodegroups with PLCAPI"
694 return self.do_nodegroups("delete")
698 def translate_timestamp (start,grain,timestamp):
699 if timestamp < TestPlc.YEAR: return start+timestamp*grain
700 else: return timestamp
703 def timestamp_printable (timestamp):
704 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
707 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
709 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
710 print 'API answered grain=',grain
711 start=(now/grain)*grain
713 # find out all nodes that are reservable
714 nodes=self.all_reservable_nodenames()
716 utils.header ("No reservable node found - proceeding without leases")
719 # attach them to the leases as specified in plc_specs
720 # this is where the 'leases' field gets interpreted as relative of absolute
721 for lease_spec in self.plc_spec['leases']:
722 # skip the ones that come with a null slice id
723 if not lease_spec['slice']: continue
724 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
725 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
726 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
727 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
728 if lease_addition['errors']:
729 utils.header("Cannot create leases, %s"%lease_addition['errors'])
732 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
733 (nodes,lease_spec['slice'],
734 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
735 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
739 def delete_leases (self):
740 "remove all leases in the myplc side"
741 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
742 utils.header("Cleaning leases %r"%lease_ids)
743 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
746 def list_leases (self):
747 "list all leases known to the myplc"
748 leases = self.apiserver.GetLeases(self.auth_root())
751 current=l['t_until']>=now
752 if self.options.verbose or current:
753 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
754 TestPlc.timestamp_printable(l['t_from']),
755 TestPlc.timestamp_printable(l['t_until'])))
758 # create nodegroups if needed, and populate
759 def do_nodegroups (self, action="add"):
760 # 1st pass to scan contents
762 for site_spec in self.plc_spec['sites']:
763 test_site = TestSite (self,site_spec)
764 for node_spec in site_spec['nodes']:
765 test_node=TestNode (self,test_site,node_spec)
766 if node_spec.has_key('nodegroups'):
767 nodegroupnames=node_spec['nodegroups']
768 if isinstance(nodegroupnames,StringTypes):
769 nodegroupnames = [ nodegroupnames ]
770 for nodegroupname in nodegroupnames:
771 if not groups_dict.has_key(nodegroupname):
772 groups_dict[nodegroupname]=[]
773 groups_dict[nodegroupname].append(test_node.name())
774 auth=self.auth_root()
776 for (nodegroupname,group_nodes) in groups_dict.iteritems():
778 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
779 # first, check if the nodetagtype is here
780 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
782 tag_type_id = tag_types[0]['tag_type_id']
784 tag_type_id = self.apiserver.AddTagType(auth,
785 {'tagname':nodegroupname,
786 'description': 'for nodegroup %s'%nodegroupname,
788 print 'located tag (type)',nodegroupname,'as',tag_type_id
790 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
792 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
793 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
794 # set node tag on all nodes, value='yes'
795 for nodename in group_nodes:
797 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
799 traceback.print_exc()
800 print 'node',nodename,'seems to already have tag',nodegroupname
803 expect_yes = self.apiserver.GetNodeTags(auth,
804 {'hostname':nodename,
805 'tagname':nodegroupname},
806 ['value'])[0]['value']
807 if expect_yes != "yes":
808 print 'Mismatch node tag on node',nodename,'got',expect_yes
811 if not self.options.dry_run:
812 print 'Cannot find tag',nodegroupname,'on node',nodename
816 print 'cleaning nodegroup',nodegroupname
817 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
819 traceback.print_exc()
823 # return a list of tuples (nodename,qemuname)
824 def all_node_infos (self) :
826 for site_spec in self.plc_spec['sites']:
827 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
828 for node_spec in site_spec['nodes'] ]
831 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
832 def all_reservable_nodenames (self):
834 for site_spec in self.plc_spec['sites']:
835 for node_spec in site_spec['nodes']:
836 node_fields=node_spec['node_fields']
837 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
838 res.append(node_fields['hostname'])
841 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
842 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
843 if self.options.dry_run:
847 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
848 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
849 # the nodes that haven't checked yet - start with a full list and shrink over time
850 tocheck = self.all_hostnames()
851 utils.header("checking nodes %r"%tocheck)
852 # create a dict hostname -> status
853 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
856 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
858 for array in tocheck_status:
859 hostname=array['hostname']
860 boot_state=array['boot_state']
861 if boot_state == target_boot_state:
862 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
864 # if it's a real node, never mind
865 (site_spec,node_spec)=self.locate_hostname(hostname)
866 if TestNode.is_real_model(node_spec['node_fields']['model']):
867 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
869 boot_state = target_boot_state
870 elif datetime.datetime.now() > graceout:
871 utils.header ("%s still in '%s' state"%(hostname,boot_state))
872 graceout=datetime.datetime.now()+datetime.timedelta(1)
873 status[hostname] = boot_state
875 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
878 if datetime.datetime.now() > timeout:
879 for hostname in tocheck:
880 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
882 # otherwise, sleep for a while
884 # only useful in empty plcs
887 def nodes_booted(self):
888 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
890 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
892 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
893 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
894 vservername=self.vservername
897 local_key = "keys/%(vservername)s-debug.rsa"%locals()
900 local_key = "keys/key1.rsa"
901 node_infos = self.all_node_infos()
902 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
903 for (nodename,qemuname) in node_infos:
904 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
905 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
906 (timeout_minutes,silent_minutes,period))
908 for node_info in node_infos:
909 (hostname,qemuname) = node_info
910 # try to run 'hostname' in the node
911 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
912 # don't spam logs - show the command only after the grace period
913 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
915 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
917 node_infos.remove(node_info)
919 # we will have tried real nodes once, in case they're up - but if not, just skip
920 (site_spec,node_spec)=self.locate_hostname(hostname)
921 if TestNode.is_real_model(node_spec['node_fields']['model']):
922 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
923 node_infos.remove(node_info)
926 if datetime.datetime.now() > timeout:
927 for (hostname,qemuname) in node_infos:
928 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
930 # otherwise, sleep for a while
932 # only useful in empty plcs
935 def ssh_node_debug(self):
936 "Tries to ssh into nodes in debug mode with the debug ssh key"
937 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
939 def ssh_node_boot(self):
940 "Tries to ssh into nodes in production mode with the root ssh key"
941 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
944 def qemu_local_init (self):
945 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
949 "all nodes: invoke GetBootMedium and store result locally"
952 def qemu_local_config (self):
953 "all nodes: compute qemu config qemu.conf and store it locally"
956 def nodestate_reinstall (self):
957 "all nodes: mark PLCAPI boot_state as reinstall"
960 def nodestate_safeboot (self):
961 "all nodes: mark PLCAPI boot_state as safeboot"
964 def nodestate_boot (self):
965 "all nodes: mark PLCAPI boot_state as boot"
968 def nodestate_show (self):
969 "all nodes: show PLCAPI boot_state"
972 def qemu_export (self):
973 "all nodes: push local node-dep directory on the qemu box"
976 ### check hooks : invoke scripts from hooks/{node,slice}
977 def check_hooks_node (self):
978 return self.locate_first_node().check_hooks()
979 def check_hooks_sliver (self) :
980 return self.locate_first_sliver().check_hooks()
982 def check_hooks (self):
983 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
984 return self.check_hooks_node() and self.check_hooks_sliver()
987 def do_check_initscripts(self):
989 for slice_spec in self.plc_spec['slices']:
990 if not slice_spec.has_key('initscriptstamp'):
992 stamp=slice_spec['initscriptstamp']
993 for nodename in slice_spec['nodenames']:
994 (site,node) = self.locate_node (nodename)
995 # xxx - passing the wrong site - probably harmless
996 test_site = TestSite (self,site)
997 test_slice = TestSlice (self,test_site,slice_spec)
998 test_node = TestNode (self,test_site,node)
999 test_sliver = TestSliver (self, test_node, test_slice)
1000 if not test_sliver.check_initscript_stamp(stamp):
1004 def check_initscripts(self):
1005 "check that the initscripts have triggered"
1006 return self.do_check_initscripts()
1008 def initscripts (self):
1009 "create initscripts with PLCAPI"
1010 for initscript in self.plc_spec['initscripts']:
1011 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1012 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1015 def delete_initscripts (self):
1016 "delete initscripts with PLCAPI"
1017 for initscript in self.plc_spec['initscripts']:
1018 initscript_name = initscript['initscript_fields']['name']
1019 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1021 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1022 print initscript_name,'deleted'
1024 print 'deletion went wrong - probably did not exist'
1029 "create slices with PLCAPI"
1030 return self.do_slices()
1032 def delete_slices (self):
1033 "delete slices with PLCAPI"
1034 return self.do_slices("delete")
1036 def do_slices (self, action="add"):
1037 for slice in self.plc_spec['slices']:
1038 site_spec = self.locate_site (slice['sitename'])
1039 test_site = TestSite(self,site_spec)
1040 test_slice=TestSlice(self,test_site,slice)
1042 utils.header("Deleting slices in site %s"%test_site.name())
1043 test_slice.delete_slice()
1045 utils.pprint("Creating slice",slice)
1046 test_slice.create_slice()
1047 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1051 def ssh_slice(self):
1052 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1056 def keys_clear_known_hosts (self):
1057 "remove test nodes entries from the local known_hosts file"
1061 def qemu_start (self) :
1062 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1066 def timestamp_qemu (self) :
1067 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1070 def check_tcp (self):
1071 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1072 specs = self.plc_spec['tcp_test']
1077 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1078 if not s_test_sliver.run_tcp_server(port,timeout=10):
1082 # idem for the client side
1083 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1084 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1088 def plcsh_stress_test (self):
1089 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1090 # install the stress-test in the plc image
1091 location = "/usr/share/plc_api/plcsh_stress_test.py"
1092 remote="/vservers/%s/%s"%(self.vservername,location)
1093 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1095 command += " -- --check"
1096 if self.options.size == 1:
1097 command += " --tiny"
1098 return ( self.run_in_guest(command) == 0)
1100 # populate runs the same utility without slightly different options
1101 # in particular runs with --preserve (dont cleanup) and without --check
1102 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1105 def sfa_install(self):
1106 "yum install sfa, sfa-plc and sfa-client"
1108 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1109 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1112 def sfa_dbclean(self):
1113 "thoroughly wipes off the SFA database"
1114 self.run_in_guest("sfa-nuke-plc.py")==0
1117 def sfa_plcclean(self):
1118 "cleans the PLC entries that were created as a side effect of running the script"
1120 sfa_spec=self.plc_spec['sfa']
1122 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1123 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1124 except: print "Slice %s already absent from PLC db"%slicename
1126 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1127 try: self.apiserver.DeletePerson(self.auth_root(),username)
1128 except: print "User %s already absent from PLC db"%username
1130 print "REMEMBER TO RUN sfa_import AGAIN"
1133 def sfa_uninstall(self):
1134 "uses rpm to uninstall sfa - ignore result"
1135 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1136 self.run_in_guest("rm -rf /var/lib/sfa")
1137 self.run_in_guest("rm -rf /etc/sfa")
1138 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1140 self.run_in_guest("rpm -e --noscripts sfa-plc")
1143 ### run unit tests for SFA
1144 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1145 # Running Transaction
1146 # Transaction couldn't start:
1147 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1148 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1149 # no matter how many Gbs are available on the testplc
1150 # could not figure out what's wrong, so...
1151 # if the yum install phase fails, consider the test is successful
1152 # other combinations will eventually run it hopefully
1153 def sfa_utest(self):
1154 "yum install sfa-tests and run SFA unittests"
1155 self.run_in_guest("yum -y install sfa-tests")
1156 # failed to install - forget it
1157 if self.run_in_guest("rpm -q sfa-tests")!=0:
1158 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1160 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1164 dirname="conf.%s"%self.plc_spec['name']
1165 if not os.path.isdir(dirname):
1166 utils.system("mkdir -p %s"%dirname)
1167 if not os.path.isdir(dirname):
1168 raise "Cannot create config dir for plc %s"%self.name()
1171 def conffile(self,filename):
1172 return "%s/%s"%(self.confdir(),filename)
1173 def confsubdir(self,dirname,clean,dry_run=False):
1174 subdirname="%s/%s"%(self.confdir(),dirname)
1176 utils.system("rm -rf %s"%subdirname)
1177 if not os.path.isdir(subdirname):
1178 utils.system("mkdir -p %s"%subdirname)
1179 if not dry_run and not os.path.isdir(subdirname):
1180 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1183 def conffile_clean (self,filename):
1184 filename=self.conffile(filename)
1185 return utils.system("rm -rf %s"%filename)==0
1188 def sfa_configure(self):
1189 "run sfa-config-tty"
1190 tmpname=self.conffile("sfa-config-tty")
1191 fileconf=open(tmpname,'w')
1192 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1193 'SFA_INTERFACE_HRN',
1194 # 'SFA_REGISTRY_LEVEL1_AUTH',
1195 'SFA_REGISTRY_HOST',
1196 'SFA_AGGREGATE_HOST',
1202 'SFA_PLC_DB_PASSWORD',
1205 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1206 # the way plc_config handles booleans just sucks..
1207 for var in ['SFA_API_DEBUG']:
1209 if self.plc_spec['sfa'][var]: val='true'
1210 fileconf.write ('e %s\n%s\n'%(var,val))
1211 fileconf.write('w\n')
1212 fileconf.write('R\n')
1213 fileconf.write('q\n')
1215 utils.system('cat %s'%tmpname)
1216 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1219 def aggregate_xml_line(self):
1220 port=self.plc_spec['sfa']['neighbours-port']
1221 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1222 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1224 def registry_xml_line(self):
1225 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1226 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1229 # a cross step that takes all other plcs in argument
1230 def cross_sfa_configure(self, other_plcs):
1231 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1232 # of course with a single plc, other_plcs is an empty list
1235 agg_fname=self.conffile("agg.xml")
1236 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1237 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1238 utils.header ("(Over)wrote %s"%agg_fname)
1239 reg_fname=self.conffile("reg.xml")
1240 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1241 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1242 utils.header ("(Over)wrote %s"%reg_fname)
1243 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1244 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1246 def sfa_import(self):
1248 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1249 return self.run_in_guest('sfa-import-plc.py')==0
1250 # not needed anymore
1251 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1253 def sfa_start(self):
1255 return self.run_in_guest('service sfa start')==0
1257 def sfi_configure(self):
1258 "Create /root/.sfi on the plc side for sfi client configuration"
1259 sfa_spec=self.plc_spec['sfa']
1260 dir_name=self.confsubdir("dot-sfi",clean=True,dry_run=self.options.dry_run)
1261 if self.options.dry_run: return True
1262 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1263 fileconf=open(file_name,'w')
1264 fileconf.write (self.plc_spec['keys'][0]['private'])
1266 utils.header ("(Over)wrote %s"%file_name)
1268 file_name=dir_name + os.sep + 'sfi_config'
1269 fileconf=open(file_name,'w')
1270 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1271 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1272 fileconf.write('\n')
1273 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1274 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1275 fileconf.write('\n')
1276 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1277 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1278 fileconf.write('\n')
1279 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1280 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1281 fileconf.write('\n')
1283 utils.header ("(Over)wrote %s"%file_name)
1285 file_name=dir_name + os.sep + 'person.xml'
1286 fileconf=open(file_name,'w')
1287 for record in sfa_spec['sfa_person_xml']:
1288 person_record=record
1289 fileconf.write(person_record)
1290 fileconf.write('\n')
1292 utils.header ("(Over)wrote %s"%file_name)
1294 file_name=dir_name + os.sep + 'slice.xml'
1295 fileconf=open(file_name,'w')
1296 for record in sfa_spec['sfa_slice_xml']:
1298 #slice_record=sfa_spec['sfa_slice_xml']
1299 fileconf.write(slice_record)
1300 fileconf.write('\n')
1301 utils.header ("(Over)wrote %s"%file_name)
1304 file_name=dir_name + os.sep + 'slice.rspec'
1305 fileconf=open(file_name,'w')
1307 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1309 fileconf.write(slice_rspec)
1310 fileconf.write('\n')
1312 utils.header ("(Over)wrote %s"%file_name)
1314 # push to the remote root's .sfi
1315 location = "root/.sfi"
1316 remote="/vservers/%s/%s"%(self.vservername,location)
1317 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1321 def sfi_clean (self):
1322 "clean up /root/.sfi on the plc side"
1323 self.run_in_guest("rm -rf /root/.sfi")
1326 def sfa_add_user(self):
1327 "run sfi.py add using person.xml"
1328 return TestUserSfa(self).add_user()
1330 def sfa_update_user(self):
1331 "run sfi.py update using person.xml"
1332 return TestUserSfa(self).update_user()
1335 def sfa_add_slice(self):
1336 "run sfi.py add (on Registry) from slice.xml"
1340 def sfa_discover(self):
1341 "discover resources into resouces_in.rspec"
1345 def sfa_create_slice(self):
1346 "run sfi.py create (on SM) - 1st time"
1350 def sfa_check_slice_plc(self):
1351 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1355 def sfa_update_slice(self):
1356 "run sfi.py create (on SM) on existing object"
1360 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1361 sfa_spec=self.plc_spec['sfa']
1362 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1364 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1365 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1366 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1367 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1370 def ssh_slice_sfa(self):
1371 "tries to ssh-enter the SFA slice"
1374 def sfa_delete_user(self):
1375 "run sfi.py delete (on SM) for user"
1376 test_user_sfa=TestUserSfa(self)
1377 return test_user_sfa.delete_user()
1380 def sfa_delete_slice(self):
1381 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1386 self.run_in_guest('service sfa stop')==0
1389 def populate (self):
1390 "creates random entries in the PLCAPI"
1391 # install the stress-test in the plc image
1392 location = "/usr/share/plc_api/plcsh_stress_test.py"
1393 remote="/vservers/%s/%s"%(self.vservername,location)
1394 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1396 command += " -- --preserve --short-names"
1397 local = (self.run_in_guest(command) == 0);
1398 # second run with --foreign
1399 command += ' --foreign'
1400 remote = (self.run_in_guest(command) == 0);
1401 return ( local and remote)
1403 def gather_logs (self):
1404 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1405 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1406 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1407 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1408 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1409 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1411 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1412 self.gather_var_logs ()
1414 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1415 self.gather_pgsql_logs ()
1417 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1418 for site_spec in self.plc_spec['sites']:
1419 test_site = TestSite (self,site_spec)
1420 for node_spec in site_spec['nodes']:
1421 test_node=TestNode(self,test_site,node_spec)
1422 test_node.gather_qemu_logs()
1424 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1425 self.gather_nodes_var_logs()
1427 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1428 self.gather_slivers_var_logs()
1431 def gather_slivers_var_logs(self):
1432 for test_sliver in self.all_sliver_objs():
1433 remote = test_sliver.tar_var_logs()
1434 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1435 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1436 utils.system(command)
1439 def gather_var_logs (self):
1440 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1441 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1442 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1443 utils.system(command)
1444 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1445 utils.system(command)
1447 def gather_pgsql_logs (self):
1448 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1449 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1450 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1451 utils.system(command)
1453 def gather_nodes_var_logs (self):
1454 for site_spec in self.plc_spec['sites']:
1455 test_site = TestSite (self,site_spec)
1456 for node_spec in site_spec['nodes']:
1457 test_node=TestNode(self,test_site,node_spec)
1458 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1459 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1460 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1461 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1462 utils.system(command)
1465 # returns the filename to use for sql dump/restore, using options.dbname if set
1466 def dbfile (self, database):
1467 # uses options.dbname if it is found
1469 name=self.options.dbname
1470 if not isinstance(name,StringTypes):
1473 t=datetime.datetime.now()
1476 return "/root/%s-%s.sql"%(database,name)
1478 def plc_db_dump(self):
1479 'dump the planetlab5 DB in /root in the PLC - filename has time'
1480 dump=self.dbfile("planetab5")
1481 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1482 utils.header('Dumped planetlab5 database in %s'%dump)
1485 def plc_db_restore(self):
1486 'restore the planetlab5 DB - looks broken, but run -n might help'
1487 dump=self.dbfile("planetab5")
1488 ##stop httpd service
1489 self.run_in_guest('service httpd stop')
1490 # xxx - need another wrapper
1491 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1492 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1493 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1494 ##starting httpd service
1495 self.run_in_guest('service httpd start')
1497 utils.header('Database restored from ' + dump)
1500 def standby_1(): pass
1502 def standby_2(): pass
1504 def standby_3(): pass
1506 def standby_4(): pass
1508 def standby_5(): pass
1510 def standby_6(): pass
1512 def standby_7(): pass
1514 def standby_8(): pass
1516 def standby_9(): pass
1518 def standby_10(): pass
1520 def standby_11(): pass
1522 def standby_12(): pass
1524 def standby_13(): pass
1526 def standby_14(): pass
1528 def standby_15(): pass
1530 def standby_16(): pass
1532 def standby_17(): pass
1534 def standby_18(): pass
1536 def standby_19(): pass
1538 def standby_20(): pass