1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'show', 'local_pre', SEP,
90 'vs_delete','vs_create','plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', 'qemu_export', 'qemu_kill_all', 'qemu_start', SEP,
94 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
95 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
96 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
97 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
101 'ssh_node_boot', 'ssh_slice', 'check_initscripts', SEP,
102 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
104 'force_gather_logs', 'force_local_post', SEP,
109 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
110 'plc_stop', 'vs_start', 'vs_stop', SEP,
111 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
112 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
113 'delete_leases', 'list_leases', SEP,
115 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
116 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_mine', SEP,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1 through 20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platformb
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
148 self.vserverip=plc_spec['vserverip']
149 self.vservername=plc_spec['vservername']
150 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 raise Exception,'chroot-based myplc testing is deprecated'
154 self.apiserver=TestApiserver(self.url,options.dry_run)
157 name=self.plc_spec['name']
158 return "%s.%s"%(name,self.vservername)
161 return self.plc_spec['hostname']
164 return self.test_ssh.is_local()
166 # define the API methods on this object through xmlrpc
167 # would help, but not strictly necessary
171 def actual_command_in_guest (self,command):
172 return self.test_ssh.actual_command(self.host_to_guest(command))
174 def start_guest (self):
175 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
177 def stop_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
180 def run_in_guest (self,command):
181 return utils.system(self.actual_command_in_guest(command))
183 def run_in_host (self,command):
184 return self.test_ssh.run_in_buildname(command)
186 #command gets run in the vserver
187 def host_to_guest(self,command):
188 return "vserver %s exec %s"%(self.vservername,command)
190 #start/stop the vserver
191 def start_guest_in_host(self):
192 return "vserver %s start"%(self.vservername)
194 def stop_guest_in_host(self):
195 return "vserver %s stop"%(self.vservername)
198 def run_in_guest_piped (self,local,remote):
199 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
201 def auth_root (self):
202 return {'Username':self.plc_spec['PLC_ROOT_USER'],
203 'AuthMethod':'password',
204 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
205 'Role' : self.plc_spec['role']
207 def locate_site (self,sitename):
208 for site in self.plc_spec['sites']:
209 if site['site_fields']['name'] == sitename:
211 if site['site_fields']['login_base'] == sitename:
213 raise Exception,"Cannot locate site %s"%sitename
215 def locate_node (self,nodename):
216 for site in self.plc_spec['sites']:
217 for node in site['nodes']:
218 if node['name'] == nodename:
220 raise Exception,"Cannot locate node %s"%nodename
222 def locate_hostname (self,hostname):
223 for site in self.plc_spec['sites']:
224 for node in site['nodes']:
225 if node['node_fields']['hostname'] == hostname:
227 raise Exception,"Cannot locate hostname %s"%hostname
229 def locate_key (self,keyname):
230 for key in self.plc_spec['keys']:
231 if key['name'] == keyname:
233 raise Exception,"Cannot locate key %s"%keyname
235 def locate_slice (self, slicename):
236 for slice in self.plc_spec['slices']:
237 if slice['slice_fields']['name'] == slicename:
239 raise Exception,"Cannot locate slice %s"%slicename
241 def all_sliver_objs (self):
243 for slice_spec in self.plc_spec['slices']:
244 slicename = slice_spec['slice_fields']['name']
245 for nodename in slice_spec['nodenames']:
246 result.append(self.locate_sliver_obj (nodename,slicename))
249 def locate_sliver_obj (self,nodename,slicename):
250 (site,node) = self.locate_node(nodename)
251 slice = self.locate_slice (slicename)
253 test_site = TestSite (self, site)
254 test_node = TestNode (self, test_site,node)
255 # xxx the slice site is assumed to be the node site - mhh - probably harmless
256 test_slice = TestSlice (self, test_site, slice)
257 return TestSliver (self, test_node, test_slice)
259 def locate_first_node(self):
260 nodename=self.plc_spec['slices'][0]['nodenames'][0]
261 (site,node) = self.locate_node(nodename)
262 test_site = TestSite (self, site)
263 test_node = TestNode (self, test_site,node)
266 def locate_first_sliver (self):
267 slice_spec=self.plc_spec['slices'][0]
268 slicename=slice_spec['slice_fields']['name']
269 nodename=slice_spec['nodenames'][0]
270 return self.locate_sliver_obj(nodename,slicename)
272 # all different hostboxes used in this plc
273 def gather_hostBoxes(self):
274 # maps on sites and nodes, return [ (host_box,test_node) ]
276 for site_spec in self.plc_spec['sites']:
277 test_site = TestSite (self,site_spec)
278 for node_spec in site_spec['nodes']:
279 test_node = TestNode (self, test_site, node_spec)
280 if not test_node.is_real():
281 tuples.append( (test_node.host_box(),test_node) )
282 # transform into a dict { 'host_box' -> [ test_node .. ] }
284 for (box,node) in tuples:
285 if not result.has_key(box):
288 result[box].append(node)
291 # a step for checking this stuff
292 def show_boxes (self):
293 'print summary of nodes location'
294 for (box,nodes) in self.gather_hostBoxes().iteritems():
295 print box,":"," + ".join( [ node.name() for node in nodes ] )
298 # make this a valid step
299 def qemu_kill_all(self):
300 'kill all qemu instances on the qemu boxes involved by this setup'
301 # this is the brute force version, kill all qemus on that host box
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # pass the first nodename, as we don't push template-qemu on testboxes
304 nodedir=nodes[0].nodedir()
305 TestBox(box,self.options.buildname).qemu_kill_all(nodedir)
308 # make this a valid step
309 def qemu_list_all(self):
310 'list all qemu instances on the qemu boxes involved by this setup'
311 for (box,nodes) in self.gather_hostBoxes().iteritems():
312 # this is the brute force version, kill all qemus on that host box
313 TestBox(box,self.options.buildname).qemu_list_all()
316 # kill only the right qemus
317 def qemu_list_mine(self):
318 'list qemu instances for our nodes'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # the fine-grain version
325 # kill only the right qemus
326 def qemu_kill_mine(self):
327 'kill the qemu instances for our nodes'
328 for (box,nodes) in self.gather_hostBoxes().iteritems():
329 # the fine-grain version
334 #################### display config
336 "show test configuration after localization"
337 self.display_pass (1)
338 self.display_pass (2)
342 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
343 def display_pass (self,passno):
344 for (key,val) in self.plc_spec.iteritems():
345 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
349 self.display_site_spec(site)
350 for node in site['nodes']:
351 self.display_node_spec(node)
352 elif key=='initscripts':
353 for initscript in val:
354 self.display_initscript_spec (initscript)
357 self.display_slice_spec (slice)
360 self.display_key_spec (key)
362 if key not in ['sites','initscripts','slices','keys', 'sfa']:
363 print '+ ',key,':',val
365 def display_site_spec (self,site):
366 print '+ ======== site',site['site_fields']['name']
367 for (k,v) in site.iteritems():
368 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
371 print '+ ','nodes : ',
373 print node['node_fields']['hostname'],'',
379 print user['name'],'',
381 elif k == 'site_fields':
382 print '+ login_base',':',v['login_base']
383 elif k == 'address_fields':
389 def display_initscript_spec (self,initscript):
390 print '+ ======== initscript',initscript['initscript_fields']['name']
392 def display_key_spec (self,key):
393 print '+ ======== key',key['name']
395 def display_slice_spec (self,slice):
396 print '+ ======== slice',slice['slice_fields']['name']
397 for (k,v) in slice.iteritems():
410 elif k=='slice_fields':
411 print '+ fields',':',
412 print 'max_nodes=',v['max_nodes'],
417 def display_node_spec (self,node):
418 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
419 print "hostname=",node['node_fields']['hostname'],
420 print "ip=",node['interface_fields']['ip']
421 if self.options.verbose:
422 utils.pprint("node details",node,depth=3)
424 # another entry point for just showing the boxes involved
425 def display_mapping (self):
426 TestPlc.display_mapping_plc(self.plc_spec)
430 def display_mapping_plc (plc_spec):
431 print '+ MyPLC',plc_spec['name']
432 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
433 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
434 for site_spec in plc_spec['sites']:
435 for node_spec in site_spec['nodes']:
436 TestPlc.display_mapping_node(node_spec)
439 def display_mapping_node (node_spec):
440 print '+ NODE %s'%(node_spec['name'])
441 print '+\tqemu box %s'%node_spec['host_box']
442 print '+\thostname=%s'%node_spec['node_fields']['hostname']
444 def local_pre (self):
445 "run site-dependant pre-test script as defined in LocalTestResources"
446 from LocalTestResources import local_resources
447 return local_resources.step_pre(self)
449 def local_post (self):
450 "run site-dependant post-test script as defined in LocalTestResources"
451 from LocalTestResources import local_resources
452 return local_resources.step_post(self)
454 def local_list (self):
455 "run site-dependant list script as defined in LocalTestResources"
456 from LocalTestResources import local_resources
457 return local_resources.step_list(self)
459 def local_rel (self):
460 "run site-dependant release script as defined in LocalTestResources"
461 from LocalTestResources import local_resources
462 return local_resources.step_release(self)
464 def local_rel_plc (self):
465 "run site-dependant release script as defined in LocalTestResources"
466 from LocalTestResources import local_resources
467 return local_resources.step_release_plc(self)
469 def local_rel_qemu (self):
470 "run site-dependant release script as defined in LocalTestResources"
471 from LocalTestResources import local_resources
472 return local_resources.step_release_qemu(self)
475 "vserver delete the test myplc"
476 self.run_in_host("vserver --silent %s delete"%self.vservername)
480 # historically the build was being fetched by the tests
481 # now the build pushes itself as a subdir of the tests workdir
482 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
483 def vs_create (self):
484 "vserver creation (no install done)"
485 # push the local build/ dir to the testplc box
487 # a full path for the local calls
488 build_dir=os.path.dirname(sys.argv[0])
489 # sometimes this is empty - set to "." in such a case
490 if not build_dir: build_dir="."
491 build_dir += "/build"
493 # use a standard name - will be relative to remote buildname
495 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
496 self.test_ssh.rmdir(build_dir)
497 self.test_ssh.copy(build_dir,recursive=True)
498 # the repo url is taken from arch-rpms-url
499 # with the last step (i386) removed
500 repo_url = self.options.arch_rpms_url
501 for level in [ 'arch' ]:
502 repo_url = os.path.dirname(repo_url)
503 # pass the vbuild-nightly options to vtest-init-vserver
505 test_env_options += " -p %s"%self.options.personality
506 test_env_options += " -d %s"%self.options.pldistro
507 test_env_options += " -f %s"%self.options.fcdistro
508 script="vtest-init-vserver.sh"
509 vserver_name = self.vservername
510 vserver_options="--netdev eth0 --interface %s"%self.vserverip
512 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
513 vserver_options += " --hostname %s"%vserver_hostname
515 print "Cannot reverse lookup %s"%self.vserverip
516 print "This is considered fatal, as this might pollute the test results"
518 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
519 return self.run_in_host(create_vserver) == 0
522 def plc_install(self):
523 "yum install myplc, noderepo, and the plain bootstrapfs"
525 # workaround for getting pgsql8.2 on centos5
526 if self.options.fcdistro == "centos5":
527 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
530 if self.options.personality == "linux32":
532 elif self.options.personality == "linux64":
535 raise Exception, "Unsupported personality %r"%self.options.personality
536 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
539 pkgs_list.append ("slicerepo-%s"%nodefamily)
540 pkgs_list.append ("myplc")
541 pkgs_list.append ("noderepo-%s"%nodefamily)
542 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
543 pkgs_string=" ".join(pkgs_list)
544 self.run_in_guest("yum -y install %s"%pkgs_string)
545 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
548 def plc_configure(self):
550 tmpname='%s.plc-config-tty'%(self.name())
551 fileconf=open(tmpname,'w')
552 for var in [ 'PLC_NAME',
557 'PLC_MAIL_SUPPORT_ADDRESS',
560 # Above line was added for integrating SFA Testing
566 'PLC_RESERVATION_GRANULARITY',
568 'PLC_OMF_XMPP_SERVER',
570 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
571 fileconf.write('w\n')
572 fileconf.write('q\n')
574 utils.system('cat %s'%tmpname)
575 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
576 utils.system('rm %s'%tmpname)
581 self.run_in_guest('service plc start')
586 self.run_in_guest('service plc stop')
590 "start the PLC vserver"
595 "stop the PLC vserver"
599 # stores the keys from the config for further use
600 def keys_store(self):
601 "stores test users ssh keys in keys/"
602 for key_spec in self.plc_spec['keys']:
603 TestKey(self,key_spec).store_key()
606 def keys_clean(self):
607 "removes keys cached in keys/"
608 utils.system("rm -rf ./keys")
611 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
612 # for later direct access to the nodes
613 def keys_fetch(self):
614 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
616 if not os.path.isdir(dir):
618 vservername=self.vservername
620 prefix = 'debug_ssh_key'
621 for ext in [ 'pub', 'rsa' ] :
622 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
623 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
624 if self.test_ssh.fetch(src,dst) != 0: overall=False
628 "create sites with PLCAPI"
629 return self.do_sites()
631 def delete_sites (self):
632 "delete sites with PLCAPI"
633 return self.do_sites(action="delete")
635 def do_sites (self,action="add"):
636 for site_spec in self.plc_spec['sites']:
637 test_site = TestSite (self,site_spec)
638 if (action != "add"):
639 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
640 test_site.delete_site()
641 # deleted with the site
642 #test_site.delete_users()
645 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
646 test_site.create_site()
647 test_site.create_users()
650 def delete_all_sites (self):
651 "Delete all sites in PLC, and related objects"
652 print 'auth_root',self.auth_root()
653 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
654 for site_id in site_ids:
655 print 'Deleting site_id',site_id
656 self.apiserver.DeleteSite(self.auth_root(),site_id)
660 "create nodes with PLCAPI"
661 return self.do_nodes()
662 def delete_nodes (self):
663 "delete nodes with PLCAPI"
664 return self.do_nodes(action="delete")
666 def do_nodes (self,action="add"):
667 for site_spec in self.plc_spec['sites']:
668 test_site = TestSite (self,site_spec)
670 utils.header("Deleting nodes in site %s"%test_site.name())
671 for node_spec in site_spec['nodes']:
672 test_node=TestNode(self,test_site,node_spec)
673 utils.header("Deleting %s"%test_node.name())
674 test_node.delete_node()
676 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
677 for node_spec in site_spec['nodes']:
678 utils.pprint('Creating node %s'%node_spec,node_spec)
679 test_node = TestNode (self,test_site,node_spec)
680 test_node.create_node ()
683 def nodegroups (self):
684 "create nodegroups with PLCAPI"
685 return self.do_nodegroups("add")
686 def delete_nodegroups (self):
687 "delete nodegroups with PLCAPI"
688 return self.do_nodegroups("delete")
692 def translate_timestamp (start,grain,timestamp):
693 if timestamp < TestPlc.YEAR: return start+timestamp*grain
694 else: return timestamp
697 def timestamp_printable (timestamp):
698 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
701 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
703 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
704 print 'API answered grain=',grain
705 start=(now/grain)*grain
707 # find out all nodes that are reservable
708 nodes=self.all_reservable_nodenames()
710 utils.header ("No reservable node found - proceeding without leases")
713 # attach them to the leases as specified in plc_specs
714 # this is where the 'leases' field gets interpreted as relative of absolute
715 for lease_spec in self.plc_spec['leases']:
716 # skip the ones that come with a null slice id
717 if not lease_spec['slice']: continue
718 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
719 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
720 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
721 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
722 if lease_addition['errors']:
723 utils.header("Cannot create leases, %s"%lease_addition['errors'])
726 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
727 (nodes,lease_spec['slice'],
728 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
729 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
733 def delete_leases (self):
734 "remove all leases in the myplc side"
735 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
736 utils.header("Cleaning leases %r"%lease_ids)
737 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
740 def list_leases (self):
741 "list all leases known to the myplc"
742 leases = self.apiserver.GetLeases(self.auth_root())
745 current=l['t_until']>=now
746 if self.options.verbose or current:
747 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
748 TestPlc.timestamp_printable(l['t_from']),
749 TestPlc.timestamp_printable(l['t_until'])))
752 # create nodegroups if needed, and populate
753 def do_nodegroups (self, action="add"):
754 # 1st pass to scan contents
756 for site_spec in self.plc_spec['sites']:
757 test_site = TestSite (self,site_spec)
758 for node_spec in site_spec['nodes']:
759 test_node=TestNode (self,test_site,node_spec)
760 if node_spec.has_key('nodegroups'):
761 nodegroupnames=node_spec['nodegroups']
762 if isinstance(nodegroupnames,StringTypes):
763 nodegroupnames = [ nodegroupnames ]
764 for nodegroupname in nodegroupnames:
765 if not groups_dict.has_key(nodegroupname):
766 groups_dict[nodegroupname]=[]
767 groups_dict[nodegroupname].append(test_node.name())
768 auth=self.auth_root()
770 for (nodegroupname,group_nodes) in groups_dict.iteritems():
772 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
773 # first, check if the nodetagtype is here
774 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
776 tag_type_id = tag_types[0]['tag_type_id']
778 tag_type_id = self.apiserver.AddTagType(auth,
779 {'tagname':nodegroupname,
780 'description': 'for nodegroup %s'%nodegroupname,
782 print 'located tag (type)',nodegroupname,'as',tag_type_id
784 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
786 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
787 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
788 # set node tag on all nodes, value='yes'
789 for nodename in group_nodes:
791 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
793 traceback.print_exc()
794 print 'node',nodename,'seems to already have tag',nodegroupname
797 expect_yes = self.apiserver.GetNodeTags(auth,
798 {'hostname':nodename,
799 'tagname':nodegroupname},
800 ['value'])[0]['value']
801 if expect_yes != "yes":
802 print 'Mismatch node tag on node',nodename,'got',expect_yes
805 if not self.options.dry_run:
806 print 'Cannot find tag',nodegroupname,'on node',nodename
810 print 'cleaning nodegroup',nodegroupname
811 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
813 traceback.print_exc()
817 # return a list of tuples (nodename,qemuname)
818 def all_node_infos (self) :
820 for site_spec in self.plc_spec['sites']:
821 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
822 for node_spec in site_spec['nodes'] ]
825 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
826 def all_reservable_nodenames (self):
828 for site_spec in self.plc_spec['sites']:
829 for node_spec in site_spec['nodes']:
830 node_fields=node_spec['node_fields']
831 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
832 res.append(node_fields['hostname'])
835 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
836 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
837 if self.options.dry_run:
841 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
842 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
843 # the nodes that haven't checked yet - start with a full list and shrink over time
844 tocheck = self.all_hostnames()
845 utils.header("checking nodes %r"%tocheck)
846 # create a dict hostname -> status
847 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
850 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
852 for array in tocheck_status:
853 hostname=array['hostname']
854 boot_state=array['boot_state']
855 if boot_state == target_boot_state:
856 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
858 # if it's a real node, never mind
859 (site_spec,node_spec)=self.locate_hostname(hostname)
860 if TestNode.is_real_model(node_spec['node_fields']['model']):
861 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
863 boot_state = target_boot_state
864 elif datetime.datetime.now() > graceout:
865 utils.header ("%s still in '%s' state"%(hostname,boot_state))
866 graceout=datetime.datetime.now()+datetime.timedelta(1)
867 status[hostname] = boot_state
869 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
872 if datetime.datetime.now() > timeout:
873 for hostname in tocheck:
874 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
876 # otherwise, sleep for a while
878 # only useful in empty plcs
881 def nodes_booted(self):
882 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
884 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
886 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
887 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
888 vservername=self.vservername
891 local_key = "keys/%(vservername)s-debug.rsa"%locals()
894 local_key = "keys/key1.rsa"
895 node_infos = self.all_node_infos()
896 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
897 for (nodename,qemuname) in node_infos:
898 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
899 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
900 (timeout_minutes,silent_minutes,period))
902 for node_info in node_infos:
903 (hostname,qemuname) = node_info
904 # try to run 'hostname' in the node
905 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
906 # don't spam logs - show the command only after the grace period
907 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
909 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
911 node_infos.remove(node_info)
913 # we will have tried real nodes once, in case they're up - but if not, just skip
914 (site_spec,node_spec)=self.locate_hostname(hostname)
915 if TestNode.is_real_model(node_spec['node_fields']['model']):
916 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
917 node_infos.remove(node_info)
920 if datetime.datetime.now() > timeout:
921 for (hostname,qemuname) in node_infos:
922 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
924 # otherwise, sleep for a while
926 # only useful in empty plcs
929 def ssh_node_debug(self):
930 "Tries to ssh into nodes in debug mode with the debug ssh key"
931 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
933 def ssh_node_boot(self):
934 "Tries to ssh into nodes in production mode with the root ssh key"
935 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
938 def qemu_local_init (self):
939 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
943 "all nodes: invoke GetBootMedium and store result locally"
946 def qemu_local_config (self):
947 "all nodes: compute qemu config qemu.conf and store it locally"
950 def nodestate_reinstall (self):
951 "all nodes: mark PLCAPI boot_state as reinstall"
954 def nodestate_safeboot (self):
955 "all nodes: mark PLCAPI boot_state as safeboot"
958 def nodestate_boot (self):
959 "all nodes: mark PLCAPI boot_state as boot"
962 def nodestate_show (self):
963 "all nodes: show PLCAPI boot_state"
966 def qemu_export (self):
967 "all nodes: push local node-dep directory on the qemu box"
970 ### check hooks : invoke scripts from hooks/{node,slice}
971 def check_hooks_node (self):
972 return self.locate_first_node().check_hooks()
973 def check_hooks_sliver (self) :
974 return self.locate_first_sliver().check_hooks()
976 def check_hooks (self):
977 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
978 return self.check_hooks_node() and self.check_hooks_sliver()
981 def do_check_initscripts(self):
983 for slice_spec in self.plc_spec['slices']:
984 if not slice_spec.has_key('initscriptstamp'):
986 stamp=slice_spec['initscriptstamp']
987 for nodename in slice_spec['nodenames']:
988 (site,node) = self.locate_node (nodename)
989 # xxx - passing the wrong site - probably harmless
990 test_site = TestSite (self,site)
991 test_slice = TestSlice (self,test_site,slice_spec)
992 test_node = TestNode (self,test_site,node)
993 test_sliver = TestSliver (self, test_node, test_slice)
994 if not test_sliver.check_initscript_stamp(stamp):
998 def check_initscripts(self):
999 "check that the initscripts have triggered"
1000 return self.do_check_initscripts()
1002 def initscripts (self):
1003 "create initscripts with PLCAPI"
1004 for initscript in self.plc_spec['initscripts']:
1005 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1006 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1009 def delete_initscripts (self):
1010 "delete initscripts with PLCAPI"
1011 for initscript in self.plc_spec['initscripts']:
1012 initscript_name = initscript['initscript_fields']['name']
1013 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1015 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1016 print initscript_name,'deleted'
1018 print 'deletion went wrong - probably did not exist'
1023 "create slices with PLCAPI"
1024 return self.do_slices()
1026 def delete_slices (self):
1027 "delete slices with PLCAPI"
1028 return self.do_slices("delete")
1030 def do_slices (self, action="add"):
1031 for slice in self.plc_spec['slices']:
1032 site_spec = self.locate_site (slice['sitename'])
1033 test_site = TestSite(self,site_spec)
1034 test_slice=TestSlice(self,test_site,slice)
1036 utils.header("Deleting slices in site %s"%test_site.name())
1037 test_slice.delete_slice()
1039 utils.pprint("Creating slice",slice)
1040 test_slice.create_slice()
1041 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1045 def ssh_slice(self):
1046 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1050 def keys_clear_known_hosts (self):
1051 "remove test nodes entries from the local known_hosts file"
1055 def qemu_start (self) :
1056 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1059 def check_tcp (self):
1060 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1061 specs = self.plc_spec['tcp_test']
1066 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1067 if not s_test_sliver.run_tcp_server(port,timeout=10):
1071 # idem for the client side
1072 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1073 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1077 def plcsh_stress_test (self):
1078 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1079 # install the stress-test in the plc image
1080 location = "/usr/share/plc_api/plcsh_stress_test.py"
1081 remote="/vservers/%s/%s"%(self.vservername,location)
1082 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1084 command += " -- --check"
1085 if self.options.size == 1:
1086 command += " --tiny"
1087 return ( self.run_in_guest(command) == 0)
1089 # populate runs the same utility without slightly different options
1090 # in particular runs with --preserve (dont cleanup) and without --check
1091 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1094 def sfa_install(self):
1095 "yum install sfa, sfa-plc and sfa-client"
1097 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1098 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1101 def sfa_dbclean(self):
1102 "thoroughly wipes off the SFA database"
1103 self.run_in_guest("sfa-nuke-plc.py")==0
1106 def sfa_plcclean(self):
1107 "cleans the PLC entries that were created as a side effect of running the script"
1109 sfa_spec=self.plc_spec['sfa']
1111 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1112 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1113 except: print "Slice %s already absent from PLC db"%slicename
1115 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1116 try: self.apiserver.DeletePerson(self.auth_root(),username)
1117 except: print "User %s already absent from PLC db"%username
1119 print "REMEMBER TO RUN sfa_import AGAIN"
1122 def sfa_uninstall(self):
1123 "uses rpm to uninstall sfa - ignore result"
1124 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1125 self.run_in_guest("rm -rf /var/lib/sfa")
1126 self.run_in_guest("rm -rf /etc/sfa")
1127 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1129 self.run_in_guest("rpm -e --noscripts sfa-plc")
1132 ### run unit tests for SFA
1133 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1134 # Running Transaction
1135 # Transaction couldn't start:
1136 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1137 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1138 # no matter how many Gbs are available on the testplc
1139 # could not figure out what's wrong, so...
1140 # if the yum install phase fails, consider the test is successful
1141 # other combinations will eventually run it hopefully
1142 def sfa_utest(self):
1143 "yum install sfa-tests and run SFA unittests"
1144 self.run_in_guest("yum -y install sfa-tests")
1145 # failed to install - forget it
1146 if self.run_in_guest("rpm -q sfa-tests")!=0:
1147 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1149 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1153 dirname="conf.%s"%self.plc_spec['name']
1154 if not os.path.isdir(dirname):
1155 utils.system("mkdir -p %s"%dirname)
1156 if not os.path.isdir(dirname):
1157 raise "Cannot create config dir for plc %s"%self.name()
1160 def conffile(self,filename):
1161 return "%s/%s"%(self.confdir(),filename)
1162 def confsubdir(self,dirname,clean,dry_run=False):
1163 subdirname="%s/%s"%(self.confdir(),dirname)
1165 utils.system("rm -rf %s"%subdirname)
1166 if not os.path.isdir(subdirname):
1167 utils.system("mkdir -p %s"%subdirname)
1168 if not dry_run and not os.path.isdir(subdirname):
1169 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1172 def conffile_clean (self,filename):
1173 filename=self.conffile(filename)
1174 return utils.system("rm -rf %s"%filename)==0
1177 def sfa_configure(self):
1178 "run sfa-config-tty"
1179 tmpname=self.conffile("sfa-config-tty")
1180 fileconf=open(tmpname,'w')
1181 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1182 'SFA_INTERFACE_HRN',
1183 # 'SFA_REGISTRY_LEVEL1_AUTH',
1184 'SFA_REGISTRY_HOST',
1185 'SFA_AGGREGATE_HOST',
1191 'SFA_PLC_DB_PASSWORD',
1194 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1195 # the way plc_config handles booleans just sucks..
1196 for var in ['SFA_API_DEBUG']:
1198 if self.plc_spec['sfa'][var]: val='true'
1199 fileconf.write ('e %s\n%s\n'%(var,val))
1200 fileconf.write('w\n')
1201 fileconf.write('R\n')
1202 fileconf.write('q\n')
1204 utils.system('cat %s'%tmpname)
1205 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1208 def aggregate_xml_line(self):
1209 port=self.plc_spec['sfa']['neighbours-port']
1210 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1211 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1213 def registry_xml_line(self):
1214 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1215 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1218 # a cross step that takes all other plcs in argument
1219 def cross_sfa_configure(self, other_plcs):
1220 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1221 # of course with a single plc, other_plcs is an empty list
1224 agg_fname=self.conffile("agg.xml")
1225 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1226 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1227 utils.header ("(Over)wrote %s"%agg_fname)
1228 reg_fname=self.conffile("reg.xml")
1229 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1230 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1231 utils.header ("(Over)wrote %s"%reg_fname)
1232 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1233 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1235 def sfa_import(self):
1237 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1238 return self.run_in_guest('sfa-import-plc.py')==0
1239 # not needed anymore
1240 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1242 def sfa_start(self):
1244 return self.run_in_guest('service sfa start')==0
1246 def sfi_configure(self):
1247 "Create /root/.sfi on the plc side for sfi client configuration"
1248 sfa_spec=self.plc_spec['sfa']
1249 dir_name=self.confsubdir("dot-sfi",clean=True,dry_run=self.options.dry_run)
1250 if self.options.dry_run: return True
1251 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1252 fileconf=open(file_name,'w')
1253 fileconf.write (self.plc_spec['keys'][0]['private'])
1255 utils.header ("(Over)wrote %s"%file_name)
1257 file_name=dir_name + os.sep + 'sfi_config'
1258 fileconf=open(file_name,'w')
1259 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1260 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1261 fileconf.write('\n')
1262 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1263 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1264 fileconf.write('\n')
1265 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1266 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1267 fileconf.write('\n')
1268 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1269 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1270 fileconf.write('\n')
1272 utils.header ("(Over)wrote %s"%file_name)
1274 file_name=dir_name + os.sep + 'person.xml'
1275 fileconf=open(file_name,'w')
1276 for record in sfa_spec['sfa_person_xml']:
1277 person_record=record
1278 fileconf.write(person_record)
1279 fileconf.write('\n')
1281 utils.header ("(Over)wrote %s"%file_name)
1283 file_name=dir_name + os.sep + 'slice.xml'
1284 fileconf=open(file_name,'w')
1285 for record in sfa_spec['sfa_slice_xml']:
1287 #slice_record=sfa_spec['sfa_slice_xml']
1288 fileconf.write(slice_record)
1289 fileconf.write('\n')
1290 utils.header ("(Over)wrote %s"%file_name)
1293 file_name=dir_name + os.sep + 'slice.rspec'
1294 fileconf=open(file_name,'w')
1296 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1298 fileconf.write(slice_rspec)
1299 fileconf.write('\n')
1301 utils.header ("(Over)wrote %s"%file_name)
1303 # push to the remote root's .sfi
1304 location = "root/.sfi"
1305 remote="/vservers/%s/%s"%(self.vservername,location)
1306 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1310 def sfi_clean (self):
1311 "clean up /root/.sfi on the plc side"
1312 self.run_in_guest("rm -rf /root/.sfi")
1315 def sfa_add_user(self):
1316 "run sfi.py add using person.xml"
1317 return TestUserSfa(self).add_user()
1319 def sfa_update_user(self):
1320 "run sfi.py update using person.xml"
1321 return TestUserSfa(self).update_user()
1324 def sfa_add_slice(self):
1325 "run sfi.py add (on Registry) from slice.xml"
1329 def sfa_discover(self):
1330 "discover resources into resouces_in.rspec"
1334 def sfa_create_slice(self):
1335 "run sfi.py create (on SM) - 1st time"
1339 def sfa_check_slice_plc(self):
1340 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1344 def sfa_update_slice(self):
1345 "run sfi.py create (on SM) on existing object"
1349 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1350 sfa_spec=self.plc_spec['sfa']
1351 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1353 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1354 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1355 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1356 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1359 def ssh_slice_sfa(self):
1360 "tries to ssh-enter the SFA slice"
1363 def sfa_delete_user(self):
1364 "run sfi.py delete (on SM) for user"
1365 test_user_sfa=TestUserSfa(self)
1366 return test_user_sfa.delete_user()
1369 def sfa_delete_slice(self):
1370 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1375 self.run_in_guest('service sfa stop')==0
1378 def populate (self):
1379 "creates random entries in the PLCAPI"
1380 # install the stress-test in the plc image
1381 location = "/usr/share/plc_api/plcsh_stress_test.py"
1382 remote="/vservers/%s/%s"%(self.vservername,location)
1383 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1385 command += " -- --preserve --short-names"
1386 local = (self.run_in_guest(command) == 0);
1387 # second run with --foreign
1388 command += ' --foreign'
1389 remote = (self.run_in_guest(command) == 0);
1390 return ( local and remote)
1392 def gather_logs (self):
1393 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1394 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1395 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1396 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1397 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1398 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1400 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1401 self.gather_var_logs ()
1403 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1404 self.gather_pgsql_logs ()
1406 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1407 for site_spec in self.plc_spec['sites']:
1408 test_site = TestSite (self,site_spec)
1409 for node_spec in site_spec['nodes']:
1410 test_node=TestNode(self,test_site,node_spec)
1411 test_node.gather_qemu_logs()
1413 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1414 self.gather_nodes_var_logs()
1416 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1417 self.gather_slivers_var_logs()
1420 def gather_slivers_var_logs(self):
1421 for test_sliver in self.all_sliver_objs():
1422 remote = test_sliver.tar_var_logs()
1423 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1424 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1425 utils.system(command)
1428 def gather_var_logs (self):
1429 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1430 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1431 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1432 utils.system(command)
1433 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1434 utils.system(command)
1436 def gather_pgsql_logs (self):
1437 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1438 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1439 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1440 utils.system(command)
1442 def gather_nodes_var_logs (self):
1443 for site_spec in self.plc_spec['sites']:
1444 test_site = TestSite (self,site_spec)
1445 for node_spec in site_spec['nodes']:
1446 test_node=TestNode(self,test_site,node_spec)
1447 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1448 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1449 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1450 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1451 utils.system(command)
1454 # returns the filename to use for sql dump/restore, using options.dbname if set
1455 def dbfile (self, database):
1456 # uses options.dbname if it is found
1458 name=self.options.dbname
1459 if not isinstance(name,StringTypes):
1462 t=datetime.datetime.now()
1465 return "/root/%s-%s.sql"%(database,name)
1467 def plc_db_dump(self):
1468 'dump the planetlab5 DB in /root in the PLC - filename has time'
1469 dump=self.dbfile("planetab5")
1470 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1471 utils.header('Dumped planetlab5 database in %s'%dump)
1474 def plc_db_restore(self):
1475 'restore the planetlab5 DB - looks broken, but run -n might help'
1476 dump=self.dbfile("planetab5")
1477 ##stop httpd service
1478 self.run_in_guest('service httpd stop')
1479 # xxx - need another wrapper
1480 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1481 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1482 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1483 ##starting httpd service
1484 self.run_in_guest('service httpd start')
1486 utils.header('Database restored from ' + dump)
1489 def standby_1(): pass
1491 def standby_2(): pass
1493 def standby_3(): pass
1495 def standby_4(): pass
1497 def standby_5(): pass
1499 def standby_6(): pass
1501 def standby_7(): pass
1503 def standby_8(): pass
1505 def standby_9(): pass
1507 def standby_10(): pass
1509 def standby_11(): pass
1511 def standby_12(): pass
1513 def standby_13(): pass
1515 def standby_14(): pass
1517 def standby_15(): pass
1519 def standby_16(): pass
1521 def standby_17(): pass
1523 def standby_18(): pass
1525 def standby_19(): pass
1527 def standby_20(): pass