1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
110 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
111 'plc_stop', 'vs_start', 'vs_stop', SEP,
112 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
113 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
114 'delete_leases', 'list_leases', SEP,
116 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
117 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
118 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
119 'plc_db_dump' , 'plc_db_restore', SEP,
120 'standby_1 through 20',SEP,
124 def printable_steps (list):
125 single_line=" ".join(list)+" "
126 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
128 def valid_step (step):
129 return step != SEP and step != SEPSFA
131 # turn off the sfa-related steps when build has skipped SFA
132 # this is originally for centos5 as recent SFAs won't build on this platform
134 def check_whether_build_has_sfa (rpms_url):
135 # warning, we're now building 'sface' so let's be a bit more picky
136 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
137 # full builds are expected to return with 0 here
139 # move all steps containing 'sfa' from default_steps to other_steps
140 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
141 TestPlc.other_steps += sfa_steps
142 for step in sfa_steps: TestPlc.default_steps.remove(step)
144 def __init__ (self,plc_spec,options):
145 self.plc_spec=plc_spec
147 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
149 self.vserverip=plc_spec['vserverip']
150 self.vservername=plc_spec['vservername']
151 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
154 raise Exception,'chroot-based myplc testing is deprecated'
155 self.apiserver=TestApiserver(self.url,options.dry_run)
158 name=self.plc_spec['name']
159 return "%s.%s"%(name,self.vservername)
162 return self.plc_spec['host_box']
165 return self.test_ssh.is_local()
167 # define the API methods on this object through xmlrpc
168 # would help, but not strictly necessary
172 def actual_command_in_guest (self,command):
173 return self.test_ssh.actual_command(self.host_to_guest(command))
175 def start_guest (self):
176 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
178 def stop_guest (self):
179 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
181 def run_in_guest (self,command):
182 return utils.system(self.actual_command_in_guest(command))
184 def run_in_host (self,command):
185 return self.test_ssh.run_in_buildname(command)
187 #command gets run in the vserver
188 def host_to_guest(self,command):
189 return "vserver %s exec %s"%(self.vservername,command)
191 #start/stop the vserver
192 def start_guest_in_host(self):
193 return "vserver %s start"%(self.vservername)
195 def stop_guest_in_host(self):
196 return "vserver %s stop"%(self.vservername)
199 def run_in_guest_piped (self,local,remote):
200 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
202 def auth_root (self):
203 return {'Username':self.plc_spec['PLC_ROOT_USER'],
204 'AuthMethod':'password',
205 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
206 'Role' : self.plc_spec['role']
208 def locate_site (self,sitename):
209 for site in self.plc_spec['sites']:
210 if site['site_fields']['name'] == sitename:
212 if site['site_fields']['login_base'] == sitename:
214 raise Exception,"Cannot locate site %s"%sitename
216 def locate_node (self,nodename):
217 for site in self.plc_spec['sites']:
218 for node in site['nodes']:
219 if node['name'] == nodename:
221 raise Exception,"Cannot locate node %s"%nodename
223 def locate_hostname (self,hostname):
224 for site in self.plc_spec['sites']:
225 for node in site['nodes']:
226 if node['node_fields']['hostname'] == hostname:
228 raise Exception,"Cannot locate hostname %s"%hostname
230 def locate_key (self,keyname):
231 for key in self.plc_spec['keys']:
232 if key['name'] == keyname:
234 raise Exception,"Cannot locate key %s"%keyname
236 def locate_slice (self, slicename):
237 for slice in self.plc_spec['slices']:
238 if slice['slice_fields']['name'] == slicename:
240 raise Exception,"Cannot locate slice %s"%slicename
242 def all_sliver_objs (self):
244 for slice_spec in self.plc_spec['slices']:
245 slicename = slice_spec['slice_fields']['name']
246 for nodename in slice_spec['nodenames']:
247 result.append(self.locate_sliver_obj (nodename,slicename))
250 def locate_sliver_obj (self,nodename,slicename):
251 (site,node) = self.locate_node(nodename)
252 slice = self.locate_slice (slicename)
254 test_site = TestSite (self, site)
255 test_node = TestNode (self, test_site,node)
256 # xxx the slice site is assumed to be the node site - mhh - probably harmless
257 test_slice = TestSlice (self, test_site, slice)
258 return TestSliver (self, test_node, test_slice)
260 def locate_first_node(self):
261 nodename=self.plc_spec['slices'][0]['nodenames'][0]
262 (site,node) = self.locate_node(nodename)
263 test_site = TestSite (self, site)
264 test_node = TestNode (self, test_site,node)
267 def locate_first_sliver (self):
268 slice_spec=self.plc_spec['slices'][0]
269 slicename=slice_spec['slice_fields']['name']
270 nodename=slice_spec['nodenames'][0]
271 return self.locate_sliver_obj(nodename,slicename)
273 # all different hostboxes used in this plc
274 def gather_hostBoxes(self):
275 # maps on sites and nodes, return [ (host_box,test_node) ]
277 for site_spec in self.plc_spec['sites']:
278 test_site = TestSite (self,site_spec)
279 for node_spec in site_spec['nodes']:
280 test_node = TestNode (self, test_site, node_spec)
281 if not test_node.is_real():
282 tuples.append( (test_node.host_box(),test_node) )
283 # transform into a dict { 'host_box' -> [ test_node .. ] }
285 for (box,node) in tuples:
286 if not result.has_key(box):
289 result[box].append(node)
292 # a step for checking this stuff
293 def show_boxes (self):
294 'print summary of nodes location'
295 for (box,nodes) in self.gather_hostBoxes().iteritems():
296 print box,":"," + ".join( [ node.name() for node in nodes ] )
299 # make this a valid step
300 def qemu_kill_all(self):
301 'kill all qemu instances on the qemu boxes involved by this setup'
302 # this is the brute force version, kill all qemus on that host box
303 for (box,nodes) in self.gather_hostBoxes().iteritems():
304 # pass the first nodename, as we don't push template-qemu on testboxes
305 nodedir=nodes[0].nodedir()
306 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
309 # make this a valid step
310 def qemu_list_all(self):
311 'list all qemu instances on the qemu boxes involved by this setup'
312 for (box,nodes) in self.gather_hostBoxes().iteritems():
313 # this is the brute force version, kill all qemus on that host box
314 TestBoxQemu(box,self.options.buildname).qemu_list_all()
317 # kill only the right qemus
318 def qemu_list_mine(self):
319 'list qemu instances for our nodes'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 # the fine-grain version
326 # kill only the right qemus
327 def qemu_kill_mine(self):
328 'kill the qemu instances for our nodes'
329 for (box,nodes) in self.gather_hostBoxes().iteritems():
330 # the fine-grain version
335 #################### display config
337 "show test configuration after localization"
338 self.display_pass (1)
339 self.display_pass (2)
342 def show_vplc (self):
343 "print out a shell command that can be cut'n pasted to define the GUEST variable"
344 # these work but the shell prompt does not get displayed..
345 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
346 command2="ssh root@%s %s"%(socket.gethostname(),command1)
347 # guess local domain from hostname
348 domain=socket.gethostname().split('.',1)[1]
349 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
350 print "export PLCHOST=%s"%fqdn
351 print "export GUEST=%s"%self.plc_spec['vservername']
355 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
356 def display_pass (self,passno):
357 for (key,val) in self.plc_spec.iteritems():
358 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
362 self.display_site_spec(site)
363 for node in site['nodes']:
364 self.display_node_spec(node)
365 elif key=='initscripts':
366 for initscript in val:
367 self.display_initscript_spec (initscript)
370 self.display_slice_spec (slice)
373 self.display_key_spec (key)
375 if key not in ['sites','initscripts','slices','keys', 'sfa']:
376 print '+ ',key,':',val
378 def display_site_spec (self,site):
379 print '+ ======== site',site['site_fields']['name']
380 for (k,v) in site.iteritems():
381 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
384 print '+ ','nodes : ',
386 print node['node_fields']['hostname'],'',
392 print user['name'],'',
394 elif k == 'site_fields':
395 print '+ login_base',':',v['login_base']
396 elif k == 'address_fields':
402 def display_initscript_spec (self,initscript):
403 print '+ ======== initscript',initscript['initscript_fields']['name']
405 def display_key_spec (self,key):
406 print '+ ======== key',key['name']
408 def display_slice_spec (self,slice):
409 print '+ ======== slice',slice['slice_fields']['name']
410 for (k,v) in slice.iteritems():
423 elif k=='slice_fields':
424 print '+ fields',':',
425 print 'max_nodes=',v['max_nodes'],
430 def display_node_spec (self,node):
431 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
432 print "hostname=",node['node_fields']['hostname'],
433 print "ip=",node['interface_fields']['ip']
434 if self.options.verbose:
435 utils.pprint("node details",node,depth=3)
437 # another entry point for just showing the boxes involved
438 def display_mapping (self):
439 TestPlc.display_mapping_plc(self.plc_spec)
443 def display_mapping_plc (plc_spec):
444 print '+ MyPLC',plc_spec['name']
445 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
446 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
447 for site_spec in plc_spec['sites']:
448 for node_spec in site_spec['nodes']:
449 TestPlc.display_mapping_node(node_spec)
452 def display_mapping_node (node_spec):
453 print '+ NODE %s'%(node_spec['name'])
454 print '+\tqemu box %s'%node_spec['host_box']
455 print '+\thostname=%s'%node_spec['node_fields']['hostname']
457 # write a timestamp in /vservers/<>.timestamp
458 # cannot be inside the vserver, that causes vserver .. build to cough
459 def timestamp_vs (self):
461 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
463 def local_pre (self):
464 "run site-dependant pre-test script as defined in LocalTestResources"
465 from LocalTestResources import local_resources
466 return local_resources.step_pre(self)
468 def local_post (self):
469 "run site-dependant post-test script as defined in LocalTestResources"
470 from LocalTestResources import local_resources
471 return local_resources.step_post(self)
473 def local_list (self):
474 "run site-dependant list script as defined in LocalTestResources"
475 from LocalTestResources import local_resources
476 return local_resources.step_list(self)
478 def local_rel (self):
479 "run site-dependant release script as defined in LocalTestResources"
480 from LocalTestResources import local_resources
481 return local_resources.step_release(self)
483 def local_rel_plc (self):
484 "run site-dependant release script as defined in LocalTestResources"
485 from LocalTestResources import local_resources
486 return local_resources.step_release_plc(self)
488 def local_rel_qemu (self):
489 "run site-dependant release script as defined in LocalTestResources"
490 from LocalTestResources import local_resources
491 return local_resources.step_release_qemu(self)
494 "vserver delete the test myplc"
495 self.run_in_host("vserver --silent %s delete"%self.vservername)
496 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
500 # historically the build was being fetched by the tests
501 # now the build pushes itself as a subdir of the tests workdir
502 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
503 def vs_create (self):
504 "vserver creation (no install done)"
505 # push the local build/ dir to the testplc box
507 # a full path for the local calls
508 build_dir=os.path.dirname(sys.argv[0])
509 # sometimes this is empty - set to "." in such a case
510 if not build_dir: build_dir="."
511 build_dir += "/build"
513 # use a standard name - will be relative to remote buildname
515 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
516 self.test_ssh.rmdir(build_dir)
517 self.test_ssh.copy(build_dir,recursive=True)
518 # the repo url is taken from arch-rpms-url
519 # with the last step (i386) removed
520 repo_url = self.options.arch_rpms_url
521 for level in [ 'arch' ]:
522 repo_url = os.path.dirname(repo_url)
523 # pass the vbuild-nightly options to vtest-init-vserver
525 test_env_options += " -p %s"%self.options.personality
526 test_env_options += " -d %s"%self.options.pldistro
527 test_env_options += " -f %s"%self.options.fcdistro
528 script="vtest-init-vserver.sh"
529 vserver_name = self.vservername
530 vserver_options="--netdev eth0 --interface %s"%self.vserverip
532 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
533 vserver_options += " --hostname %s"%vserver_hostname
535 print "Cannot reverse lookup %s"%self.vserverip
536 print "This is considered fatal, as this might pollute the test results"
538 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
539 return self.run_in_host(create_vserver) == 0
542 def plc_install(self):
543 "yum install myplc, noderepo, and the plain bootstrapfs"
545 # workaround for getting pgsql8.2 on centos5
546 if self.options.fcdistro == "centos5":
547 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
550 if self.options.personality == "linux32":
552 elif self.options.personality == "linux64":
555 raise Exception, "Unsupported personality %r"%self.options.personality
556 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
559 pkgs_list.append ("slicerepo-%s"%nodefamily)
560 pkgs_list.append ("myplc")
561 pkgs_list.append ("noderepo-%s"%nodefamily)
562 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
563 pkgs_string=" ".join(pkgs_list)
564 self.run_in_guest("yum -y install %s"%pkgs_string)
565 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
568 def plc_configure(self):
570 tmpname='%s.plc-config-tty'%(self.name())
571 fileconf=open(tmpname,'w')
572 for var in [ 'PLC_NAME',
577 'PLC_MAIL_SUPPORT_ADDRESS',
580 # Above line was added for integrating SFA Testing
586 'PLC_RESERVATION_GRANULARITY',
588 'PLC_OMF_XMPP_SERVER',
590 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
591 fileconf.write('w\n')
592 fileconf.write('q\n')
594 utils.system('cat %s'%tmpname)
595 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
596 utils.system('rm %s'%tmpname)
601 self.run_in_guest('service plc start')
606 self.run_in_guest('service plc stop')
610 "start the PLC vserver"
615 "stop the PLC vserver"
619 # stores the keys from the config for further use
620 def keys_store(self):
621 "stores test users ssh keys in keys/"
622 for key_spec in self.plc_spec['keys']:
623 TestKey(self,key_spec).store_key()
626 def keys_clean(self):
627 "removes keys cached in keys/"
628 utils.system("rm -rf ./keys")
631 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
632 # for later direct access to the nodes
633 def keys_fetch(self):
634 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
636 if not os.path.isdir(dir):
638 vservername=self.vservername
640 prefix = 'debug_ssh_key'
641 for ext in [ 'pub', 'rsa' ] :
642 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
643 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
644 if self.test_ssh.fetch(src,dst) != 0: overall=False
648 "create sites with PLCAPI"
649 return self.do_sites()
651 def delete_sites (self):
652 "delete sites with PLCAPI"
653 return self.do_sites(action="delete")
655 def do_sites (self,action="add"):
656 for site_spec in self.plc_spec['sites']:
657 test_site = TestSite (self,site_spec)
658 if (action != "add"):
659 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
660 test_site.delete_site()
661 # deleted with the site
662 #test_site.delete_users()
665 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
666 test_site.create_site()
667 test_site.create_users()
670 def delete_all_sites (self):
671 "Delete all sites in PLC, and related objects"
672 print 'auth_root',self.auth_root()
673 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
674 for site_id in site_ids:
675 print 'Deleting site_id',site_id
676 self.apiserver.DeleteSite(self.auth_root(),site_id)
680 "create nodes with PLCAPI"
681 return self.do_nodes()
682 def delete_nodes (self):
683 "delete nodes with PLCAPI"
684 return self.do_nodes(action="delete")
686 def do_nodes (self,action="add"):
687 for site_spec in self.plc_spec['sites']:
688 test_site = TestSite (self,site_spec)
690 utils.header("Deleting nodes in site %s"%test_site.name())
691 for node_spec in site_spec['nodes']:
692 test_node=TestNode(self,test_site,node_spec)
693 utils.header("Deleting %s"%test_node.name())
694 test_node.delete_node()
696 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
697 for node_spec in site_spec['nodes']:
698 utils.pprint('Creating node %s'%node_spec,node_spec)
699 test_node = TestNode (self,test_site,node_spec)
700 test_node.create_node ()
703 def nodegroups (self):
704 "create nodegroups with PLCAPI"
705 return self.do_nodegroups("add")
706 def delete_nodegroups (self):
707 "delete nodegroups with PLCAPI"
708 return self.do_nodegroups("delete")
712 def translate_timestamp (start,grain,timestamp):
713 if timestamp < TestPlc.YEAR: return start+timestamp*grain
714 else: return timestamp
717 def timestamp_printable (timestamp):
718 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
721 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
723 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
724 print 'API answered grain=',grain
725 start=(now/grain)*grain
727 # find out all nodes that are reservable
728 nodes=self.all_reservable_nodenames()
730 utils.header ("No reservable node found - proceeding without leases")
733 # attach them to the leases as specified in plc_specs
734 # this is where the 'leases' field gets interpreted as relative of absolute
735 for lease_spec in self.plc_spec['leases']:
736 # skip the ones that come with a null slice id
737 if not lease_spec['slice']: continue
738 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
739 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
740 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
741 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
742 if lease_addition['errors']:
743 utils.header("Cannot create leases, %s"%lease_addition['errors'])
746 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
747 (nodes,lease_spec['slice'],
748 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
749 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
753 def delete_leases (self):
754 "remove all leases in the myplc side"
755 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
756 utils.header("Cleaning leases %r"%lease_ids)
757 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
760 def list_leases (self):
761 "list all leases known to the myplc"
762 leases = self.apiserver.GetLeases(self.auth_root())
765 current=l['t_until']>=now
766 if self.options.verbose or current:
767 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
768 TestPlc.timestamp_printable(l['t_from']),
769 TestPlc.timestamp_printable(l['t_until'])))
772 # create nodegroups if needed, and populate
773 def do_nodegroups (self, action="add"):
774 # 1st pass to scan contents
776 for site_spec in self.plc_spec['sites']:
777 test_site = TestSite (self,site_spec)
778 for node_spec in site_spec['nodes']:
779 test_node=TestNode (self,test_site,node_spec)
780 if node_spec.has_key('nodegroups'):
781 nodegroupnames=node_spec['nodegroups']
782 if isinstance(nodegroupnames,StringTypes):
783 nodegroupnames = [ nodegroupnames ]
784 for nodegroupname in nodegroupnames:
785 if not groups_dict.has_key(nodegroupname):
786 groups_dict[nodegroupname]=[]
787 groups_dict[nodegroupname].append(test_node.name())
788 auth=self.auth_root()
790 for (nodegroupname,group_nodes) in groups_dict.iteritems():
792 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
793 # first, check if the nodetagtype is here
794 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
796 tag_type_id = tag_types[0]['tag_type_id']
798 tag_type_id = self.apiserver.AddTagType(auth,
799 {'tagname':nodegroupname,
800 'description': 'for nodegroup %s'%nodegroupname,
802 print 'located tag (type)',nodegroupname,'as',tag_type_id
804 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
806 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
807 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
808 # set node tag on all nodes, value='yes'
809 for nodename in group_nodes:
811 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
813 traceback.print_exc()
814 print 'node',nodename,'seems to already have tag',nodegroupname
817 expect_yes = self.apiserver.GetNodeTags(auth,
818 {'hostname':nodename,
819 'tagname':nodegroupname},
820 ['value'])[0]['value']
821 if expect_yes != "yes":
822 print 'Mismatch node tag on node',nodename,'got',expect_yes
825 if not self.options.dry_run:
826 print 'Cannot find tag',nodegroupname,'on node',nodename
830 print 'cleaning nodegroup',nodegroupname
831 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
833 traceback.print_exc()
837 # return a list of tuples (nodename,qemuname)
838 def all_node_infos (self) :
840 for site_spec in self.plc_spec['sites']:
841 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
842 for node_spec in site_spec['nodes'] ]
845 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
846 def all_reservable_nodenames (self):
848 for site_spec in self.plc_spec['sites']:
849 for node_spec in site_spec['nodes']:
850 node_fields=node_spec['node_fields']
851 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
852 res.append(node_fields['hostname'])
855 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
856 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
857 if self.options.dry_run:
861 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
862 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
863 # the nodes that haven't checked yet - start with a full list and shrink over time
864 tocheck = self.all_hostnames()
865 utils.header("checking nodes %r"%tocheck)
866 # create a dict hostname -> status
867 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
870 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
872 for array in tocheck_status:
873 hostname=array['hostname']
874 boot_state=array['boot_state']
875 if boot_state == target_boot_state:
876 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
878 # if it's a real node, never mind
879 (site_spec,node_spec)=self.locate_hostname(hostname)
880 if TestNode.is_real_model(node_spec['node_fields']['model']):
881 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
883 boot_state = target_boot_state
884 elif datetime.datetime.now() > graceout:
885 utils.header ("%s still in '%s' state"%(hostname,boot_state))
886 graceout=datetime.datetime.now()+datetime.timedelta(1)
887 status[hostname] = boot_state
889 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
892 if datetime.datetime.now() > timeout:
893 for hostname in tocheck:
894 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
896 # otherwise, sleep for a while
898 # only useful in empty plcs
901 def nodes_booted(self):
902 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
904 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
906 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
907 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
908 vservername=self.vservername
911 local_key = "keys/%(vservername)s-debug.rsa"%locals()
914 local_key = "keys/key1.rsa"
915 node_infos = self.all_node_infos()
916 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
917 for (nodename,qemuname) in node_infos:
918 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
919 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
920 (timeout_minutes,silent_minutes,period))
922 for node_info in node_infos:
923 (hostname,qemuname) = node_info
924 # try to run 'hostname' in the node
925 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
926 # don't spam logs - show the command only after the grace period
927 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
929 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
931 node_infos.remove(node_info)
933 # we will have tried real nodes once, in case they're up - but if not, just skip
934 (site_spec,node_spec)=self.locate_hostname(hostname)
935 if TestNode.is_real_model(node_spec['node_fields']['model']):
936 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
937 node_infos.remove(node_info)
940 if datetime.datetime.now() > timeout:
941 for (hostname,qemuname) in node_infos:
942 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
944 # otherwise, sleep for a while
946 # only useful in empty plcs
949 def ssh_node_debug(self):
950 "Tries to ssh into nodes in debug mode with the debug ssh key"
951 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
953 def ssh_node_boot(self):
954 "Tries to ssh into nodes in production mode with the root ssh key"
955 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
958 def qemu_local_init (self):
959 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
963 "all nodes: invoke GetBootMedium and store result locally"
966 def qemu_local_config (self):
967 "all nodes: compute qemu config qemu.conf and store it locally"
970 def nodestate_reinstall (self):
971 "all nodes: mark PLCAPI boot_state as reinstall"
974 def nodestate_safeboot (self):
975 "all nodes: mark PLCAPI boot_state as safeboot"
978 def nodestate_boot (self):
979 "all nodes: mark PLCAPI boot_state as boot"
982 def nodestate_show (self):
983 "all nodes: show PLCAPI boot_state"
986 def qemu_export (self):
987 "all nodes: push local node-dep directory on the qemu box"
990 ### check hooks : invoke scripts from hooks/{node,slice}
991 def check_hooks_node (self):
992 return self.locate_first_node().check_hooks()
993 def check_hooks_sliver (self) :
994 return self.locate_first_sliver().check_hooks()
996 def check_hooks (self):
997 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
998 return self.check_hooks_node() and self.check_hooks_sliver()
1001 def do_check_initscripts(self):
1003 for slice_spec in self.plc_spec['slices']:
1004 if not slice_spec.has_key('initscriptstamp'):
1006 stamp=slice_spec['initscriptstamp']
1007 for nodename in slice_spec['nodenames']:
1008 (site,node) = self.locate_node (nodename)
1009 # xxx - passing the wrong site - probably harmless
1010 test_site = TestSite (self,site)
1011 test_slice = TestSlice (self,test_site,slice_spec)
1012 test_node = TestNode (self,test_site,node)
1013 test_sliver = TestSliver (self, test_node, test_slice)
1014 if not test_sliver.check_initscript_stamp(stamp):
1018 def check_initscripts(self):
1019 "check that the initscripts have triggered"
1020 return self.do_check_initscripts()
1022 def initscripts (self):
1023 "create initscripts with PLCAPI"
1024 for initscript in self.plc_spec['initscripts']:
1025 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1026 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1029 def delete_initscripts (self):
1030 "delete initscripts with PLCAPI"
1031 for initscript in self.plc_spec['initscripts']:
1032 initscript_name = initscript['initscript_fields']['name']
1033 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1035 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1036 print initscript_name,'deleted'
1038 print 'deletion went wrong - probably did not exist'
1043 "create slices with PLCAPI"
1044 return self.do_slices()
1046 def delete_slices (self):
1047 "delete slices with PLCAPI"
1048 return self.do_slices("delete")
1050 def do_slices (self, action="add"):
1051 for slice in self.plc_spec['slices']:
1052 site_spec = self.locate_site (slice['sitename'])
1053 test_site = TestSite(self,site_spec)
1054 test_slice=TestSlice(self,test_site,slice)
1056 utils.header("Deleting slices in site %s"%test_site.name())
1057 test_slice.delete_slice()
1059 utils.pprint("Creating slice",slice)
1060 test_slice.create_slice()
1061 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1065 def ssh_slice(self):
1066 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1070 def keys_clear_known_hosts (self):
1071 "remove test nodes entries from the local known_hosts file"
1075 def qemu_start (self) :
1076 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1080 def timestamp_qemu (self) :
1081 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1084 def check_tcp (self):
1085 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1086 specs = self.plc_spec['tcp_test']
1091 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1092 if not s_test_sliver.run_tcp_server(port,timeout=10):
1096 # idem for the client side
1097 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1098 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1102 def plcsh_stress_test (self):
1103 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1104 # install the stress-test in the plc image
1105 location = "/usr/share/plc_api/plcsh_stress_test.py"
1106 remote="/vservers/%s/%s"%(self.vservername,location)
1107 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1109 command += " -- --check"
1110 if self.options.size == 1:
1111 command += " --tiny"
1112 return ( self.run_in_guest(command) == 0)
1114 # populate runs the same utility without slightly different options
1115 # in particular runs with --preserve (dont cleanup) and without --check
1116 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1119 def sfa_install(self):
1120 "yum install sfa, sfa-plc and sfa-client"
1122 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1123 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1126 def sfa_dbclean(self):
1127 "thoroughly wipes off the SFA database"
1128 self.run_in_guest("sfa-nuke-plc.py")==0
1131 def sfa_plcclean(self):
1132 "cleans the PLC entries that were created as a side effect of running the script"
1134 sfa_spec=self.plc_spec['sfa']
1136 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1137 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1138 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1139 except: print "Slice %s already absent from PLC db"%slicename
1141 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1142 try: self.apiserver.DeletePerson(self.auth_root(),username)
1143 except: print "User %s already absent from PLC db"%username
1145 print "REMEMBER TO RUN sfa_import AGAIN"
1148 def sfa_uninstall(self):
1149 "uses rpm to uninstall sfa - ignore result"
1150 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1151 self.run_in_guest("rm -rf /var/lib/sfa")
1152 self.run_in_guest("rm -rf /etc/sfa")
1153 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1155 self.run_in_guest("rpm -e --noscripts sfa-plc")
1158 ### run unit tests for SFA
1159 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1160 # Running Transaction
1161 # Transaction couldn't start:
1162 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1163 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1164 # no matter how many Gbs are available on the testplc
1165 # could not figure out what's wrong, so...
1166 # if the yum install phase fails, consider the test is successful
1167 # other combinations will eventually run it hopefully
1168 def sfa_utest(self):
1169 "yum install sfa-tests and run SFA unittests"
1170 self.run_in_guest("yum -y install sfa-tests")
1171 # failed to install - forget it
1172 if self.run_in_guest("rpm -q sfa-tests")!=0:
1173 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1175 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1179 dirname="conf.%s"%self.plc_spec['name']
1180 if not os.path.isdir(dirname):
1181 utils.system("mkdir -p %s"%dirname)
1182 if not os.path.isdir(dirname):
1183 raise "Cannot create config dir for plc %s"%self.name()
1186 def conffile(self,filename):
1187 return "%s/%s"%(self.confdir(),filename)
1188 def confsubdir(self,dirname,clean,dry_run=False):
1189 subdirname="%s/%s"%(self.confdir(),dirname)
1191 utils.system("rm -rf %s"%subdirname)
1192 if not os.path.isdir(subdirname):
1193 utils.system("mkdir -p %s"%subdirname)
1194 if not dry_run and not os.path.isdir(subdirname):
1195 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1198 def conffile_clean (self,filename):
1199 filename=self.conffile(filename)
1200 return utils.system("rm -rf %s"%filename)==0
1203 def sfa_configure(self):
1204 "run sfa-config-tty"
1205 tmpname=self.conffile("sfa-config-tty")
1206 fileconf=open(tmpname,'w')
1207 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1208 'SFA_INTERFACE_HRN',
1209 # 'SFA_REGISTRY_LEVEL1_AUTH',
1210 'SFA_REGISTRY_HOST',
1211 'SFA_AGGREGATE_HOST',
1217 'SFA_PLC_DB_PASSWORD',
1220 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1221 # the way plc_config handles booleans just sucks..
1222 for var in ['SFA_API_DEBUG']:
1224 if self.plc_spec['sfa'][var]: val='true'
1225 fileconf.write ('e %s\n%s\n'%(var,val))
1226 fileconf.write('w\n')
1227 fileconf.write('R\n')
1228 fileconf.write('q\n')
1230 utils.system('cat %s'%tmpname)
1231 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1234 def aggregate_xml_line(self):
1235 port=self.plc_spec['sfa']['neighbours-port']
1236 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1237 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1239 def registry_xml_line(self):
1240 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1241 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1244 # a cross step that takes all other plcs in argument
1245 def cross_sfa_configure(self, other_plcs):
1246 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1247 # of course with a single plc, other_plcs is an empty list
1250 agg_fname=self.conffile("agg.xml")
1251 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1252 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1253 utils.header ("(Over)wrote %s"%agg_fname)
1254 reg_fname=self.conffile("reg.xml")
1255 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1256 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1257 utils.header ("(Over)wrote %s"%reg_fname)
1258 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1259 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1261 def sfa_import(self):
1263 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1264 return self.run_in_guest('sfa-import-plc.py')==0
1265 # not needed anymore
1266 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1268 def sfa_start(self):
1270 return self.run_in_guest('service sfa start')==0
1272 def sfi_configure(self):
1273 "Create /root/sfi on the plc side for sfi client configuration"
1274 if self.options.dry_run:
1275 utils.header("DRY RUN - skipping step")
1277 sfa_spec=self.plc_spec['sfa']
1278 # cannot use sfa_slice_mapper to pass dir_name
1279 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1280 site_spec = self.locate_site (slice_spec['sitename'])
1281 test_site = TestSite(self,site_spec)
1282 test_slice=TestSliceSfa(self,test_site,slice_spec)
1283 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1284 test_slice.sfi_config(dir_name)
1285 # push into the remote /root/sfi area
1286 location = test_slice.sfi_path()
1287 remote="/vservers/%s/%s"%(self.vservername,location)
1288 self.test_ssh.mkdir(remote,abs=True)
1289 # need to strip last level or remote otherwise we get an extra dir level
1290 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1294 def sfi_clean (self):
1295 "clean up /root/sfi on the plc side"
1296 self.run_in_guest("rm -rf /root/sfi")
1300 def sfa_add_user(self):
1305 def sfa_update_user(self):
1309 def sfa_add_slice(self):
1310 "run sfi.py add (on Registry) from slice.xml"
1314 def sfa_discover(self):
1315 "discover resources into resouces_in.rspec"
1319 def sfa_create_slice(self):
1320 "run sfi.py create (on SM) - 1st time"
1324 def sfa_check_slice_plc(self):
1325 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1329 def sfa_update_slice(self):
1330 "run sfi.py create (on SM) on existing object"
1335 "various registry-related calls"
1339 def ssh_slice_sfa(self):
1340 "tries to ssh-enter the SFA slice"
1344 def sfa_delete_user(self):
1349 def sfa_delete_slice(self):
1350 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1355 self.run_in_guest('service sfa stop')==0
1358 def populate (self):
1359 "creates random entries in the PLCAPI"
1360 # install the stress-test in the plc image
1361 location = "/usr/share/plc_api/plcsh_stress_test.py"
1362 remote="/vservers/%s/%s"%(self.vservername,location)
1363 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1365 command += " -- --preserve --short-names"
1366 local = (self.run_in_guest(command) == 0);
1367 # second run with --foreign
1368 command += ' --foreign'
1369 remote = (self.run_in_guest(command) == 0);
1370 return ( local and remote)
1372 def gather_logs (self):
1373 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1374 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1375 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1376 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1377 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1378 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1380 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1381 self.gather_var_logs ()
1383 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1384 self.gather_pgsql_logs ()
1386 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1387 for site_spec in self.plc_spec['sites']:
1388 test_site = TestSite (self,site_spec)
1389 for node_spec in site_spec['nodes']:
1390 test_node=TestNode(self,test_site,node_spec)
1391 test_node.gather_qemu_logs()
1393 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1394 self.gather_nodes_var_logs()
1396 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1397 self.gather_slivers_var_logs()
1400 def gather_slivers_var_logs(self):
1401 for test_sliver in self.all_sliver_objs():
1402 remote = test_sliver.tar_var_logs()
1403 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1404 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1405 utils.system(command)
1408 def gather_var_logs (self):
1409 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1410 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1411 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1412 utils.system(command)
1413 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1414 utils.system(command)
1416 def gather_pgsql_logs (self):
1417 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1418 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1419 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1420 utils.system(command)
1422 def gather_nodes_var_logs (self):
1423 for site_spec in self.plc_spec['sites']:
1424 test_site = TestSite (self,site_spec)
1425 for node_spec in site_spec['nodes']:
1426 test_node=TestNode(self,test_site,node_spec)
1427 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1428 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1429 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1430 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1431 utils.system(command)
1434 # returns the filename to use for sql dump/restore, using options.dbname if set
1435 def dbfile (self, database):
1436 # uses options.dbname if it is found
1438 name=self.options.dbname
1439 if not isinstance(name,StringTypes):
1442 t=datetime.datetime.now()
1445 return "/root/%s-%s.sql"%(database,name)
1447 def plc_db_dump(self):
1448 'dump the planetlab5 DB in /root in the PLC - filename has time'
1449 dump=self.dbfile("planetab5")
1450 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1451 utils.header('Dumped planetlab5 database in %s'%dump)
1454 def plc_db_restore(self):
1455 'restore the planetlab5 DB - looks broken, but run -n might help'
1456 dump=self.dbfile("planetab5")
1457 ##stop httpd service
1458 self.run_in_guest('service httpd stop')
1459 # xxx - need another wrapper
1460 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1461 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1462 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1463 ##starting httpd service
1464 self.run_in_guest('service httpd start')
1466 utils.header('Database restored from ' + dump)
1469 def standby_1(): pass
1471 def standby_2(): pass
1473 def standby_3(): pass
1475 def standby_4(): pass
1477 def standby_5(): pass
1479 def standby_6(): pass
1481 def standby_7(): pass
1483 def standby_8(): pass
1485 def standby_9(): pass
1487 def standby_10(): pass
1489 def standby_11(): pass
1491 def standby_12(): pass
1493 def standby_13(): pass
1495 def standby_14(): pass
1497 def standby_15(): pass
1499 def standby_16(): pass
1501 def standby_17(): pass
1503 def standby_18(): pass
1505 def standby_19(): pass
1507 def standby_20(): pass