1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
111 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
112 'plc_stop', 'vs_start', 'vs_stop', SEP,
113 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
114 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
115 'delete_leases', 'list_leases', SEP,
117 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
118 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
119 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
120 'plc_db_dump' , 'plc_db_restore', SEP,
121 'standby_1 through 20',SEP,
125 def printable_steps (list):
126 single_line=" ".join(list)+" "
127 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
129 def valid_step (step):
130 return step != SEP and step != SEPSFA
132 # turn off the sfa-related steps when build has skipped SFA
133 # this is originally for centos5 as recent SFAs won't build on this platform
135 def check_whether_build_has_sfa (rpms_url):
136 # warning, we're now building 'sface' so let's be a bit more picky
137 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
138 # full builds are expected to return with 0 here
140 # move all steps containing 'sfa' from default_steps to other_steps
141 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
142 TestPlc.other_steps += sfa_steps
143 for step in sfa_steps: TestPlc.default_steps.remove(step)
145 def __init__ (self,plc_spec,options):
146 self.plc_spec=plc_spec
148 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
155 raise Exception,'chroot-based myplc testing is deprecated'
156 self.apiserver=TestApiserver(self.url,options.dry_run)
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the vserver
189 def host_to_guest(self,command):
190 return "vserver %s exec %s"%(self.vservername,command)
192 #start/stop the vserver
193 def start_guest_in_host(self):
194 return "vserver %s start"%(self.vservername)
196 def stop_guest_in_host(self):
197 return "vserver %s stop"%(self.vservername)
200 def run_in_guest_piped (self,local,remote):
201 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
203 def auth_root (self):
204 return {'Username':self.plc_spec['PLC_ROOT_USER'],
205 'AuthMethod':'password',
206 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
207 'Role' : self.plc_spec['role']
209 def locate_site (self,sitename):
210 for site in self.plc_spec['sites']:
211 if site['site_fields']['name'] == sitename:
213 if site['site_fields']['login_base'] == sitename:
215 raise Exception,"Cannot locate site %s"%sitename
217 def locate_node (self,nodename):
218 for site in self.plc_spec['sites']:
219 for node in site['nodes']:
220 if node['name'] == nodename:
222 raise Exception,"Cannot locate node %s"%nodename
224 def locate_hostname (self,hostname):
225 for site in self.plc_spec['sites']:
226 for node in site['nodes']:
227 if node['node_fields']['hostname'] == hostname:
229 raise Exception,"Cannot locate hostname %s"%hostname
231 def locate_key (self,keyname):
232 for key in self.plc_spec['keys']:
233 if key['name'] == keyname:
235 raise Exception,"Cannot locate key %s"%keyname
237 def locate_slice (self, slicename):
238 for slice in self.plc_spec['slices']:
239 if slice['slice_fields']['name'] == slicename:
241 raise Exception,"Cannot locate slice %s"%slicename
243 def all_sliver_objs (self):
245 for slice_spec in self.plc_spec['slices']:
246 slicename = slice_spec['slice_fields']['name']
247 for nodename in slice_spec['nodenames']:
248 result.append(self.locate_sliver_obj (nodename,slicename))
251 def locate_sliver_obj (self,nodename,slicename):
252 (site,node) = self.locate_node(nodename)
253 slice = self.locate_slice (slicename)
255 test_site = TestSite (self, site)
256 test_node = TestNode (self, test_site,node)
257 # xxx the slice site is assumed to be the node site - mhh - probably harmless
258 test_slice = TestSlice (self, test_site, slice)
259 return TestSliver (self, test_node, test_slice)
261 def locate_first_node(self):
262 nodename=self.plc_spec['slices'][0]['nodenames'][0]
263 (site,node) = self.locate_node(nodename)
264 test_site = TestSite (self, site)
265 test_node = TestNode (self, test_site,node)
268 def locate_first_sliver (self):
269 slice_spec=self.plc_spec['slices'][0]
270 slicename=slice_spec['slice_fields']['name']
271 nodename=slice_spec['nodenames'][0]
272 return self.locate_sliver_obj(nodename,slicename)
274 # all different hostboxes used in this plc
275 def gather_hostBoxes(self):
276 # maps on sites and nodes, return [ (host_box,test_node) ]
278 for site_spec in self.plc_spec['sites']:
279 test_site = TestSite (self,site_spec)
280 for node_spec in site_spec['nodes']:
281 test_node = TestNode (self, test_site, node_spec)
282 if not test_node.is_real():
283 tuples.append( (test_node.host_box(),test_node) )
284 # transform into a dict { 'host_box' -> [ test_node .. ] }
286 for (box,node) in tuples:
287 if not result.has_key(box):
290 result[box].append(node)
293 # a step for checking this stuff
294 def show_boxes (self):
295 'print summary of nodes location'
296 for (box,nodes) in self.gather_hostBoxes().iteritems():
297 print box,":"," + ".join( [ node.name() for node in nodes ] )
300 # make this a valid step
301 def qemu_kill_all(self):
302 'kill all qemu instances on the qemu boxes involved by this setup'
303 # this is the brute force version, kill all qemus on that host box
304 for (box,nodes) in self.gather_hostBoxes().iteritems():
305 # pass the first nodename, as we don't push template-qemu on testboxes
306 nodedir=nodes[0].nodedir()
307 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
310 # make this a valid step
311 def qemu_list_all(self):
312 'list all qemu instances on the qemu boxes involved by this setup'
313 for (box,nodes) in self.gather_hostBoxes().iteritems():
314 # this is the brute force version, kill all qemus on that host box
315 TestBoxQemu(box,self.options.buildname).qemu_list_all()
318 # kill only the right qemus
319 def qemu_list_mine(self):
320 'list qemu instances for our nodes'
321 for (box,nodes) in self.gather_hostBoxes().iteritems():
322 # the fine-grain version
327 # kill only the right qemus
328 def qemu_kill_mine(self):
329 'kill the qemu instances for our nodes'
330 for (box,nodes) in self.gather_hostBoxes().iteritems():
331 # the fine-grain version
336 #################### display config
338 "show test configuration after localization"
339 self.display_pass (1)
340 self.display_pass (2)
344 "print cut'n paste-able stuff to export env variables to your shell"
345 # these work but the shell prompt does not get displayed..
346 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
347 command2="ssh root@%s %s"%(socket.gethostname(),command1)
348 # guess local domain from hostname
349 domain=socket.gethostname().split('.',1)[1]
350 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
351 print "export BUILD=%s"%self.options.buildname
352 print "export PLCHOST=%s"%fqdn
353 print "export GUEST=%s"%self.plc_spec['vservername']
354 # find hostname of first node
355 (hostname,qemubox) = self.all_node_infos()[0]
356 print "export KVMHOST=%s.%s"%(qemubox,domain)
357 print "export NODE=%s"%(hostname)
361 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
362 def display_pass (self,passno):
363 for (key,val) in self.plc_spec.iteritems():
364 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
368 self.display_site_spec(site)
369 for node in site['nodes']:
370 self.display_node_spec(node)
371 elif key=='initscripts':
372 for initscript in val:
373 self.display_initscript_spec (initscript)
376 self.display_slice_spec (slice)
379 self.display_key_spec (key)
381 if key not in ['sites','initscripts','slices','keys', 'sfa']:
382 print '+ ',key,':',val
384 def display_site_spec (self,site):
385 print '+ ======== site',site['site_fields']['name']
386 for (k,v) in site.iteritems():
387 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
390 print '+ ','nodes : ',
392 print node['node_fields']['hostname'],'',
398 print user['name'],'',
400 elif k == 'site_fields':
401 print '+ login_base',':',v['login_base']
402 elif k == 'address_fields':
408 def display_initscript_spec (self,initscript):
409 print '+ ======== initscript',initscript['initscript_fields']['name']
411 def display_key_spec (self,key):
412 print '+ ======== key',key['name']
414 def display_slice_spec (self,slice):
415 print '+ ======== slice',slice['slice_fields']['name']
416 for (k,v) in slice.iteritems():
429 elif k=='slice_fields':
430 print '+ fields',':',
431 print 'max_nodes=',v['max_nodes'],
436 def display_node_spec (self,node):
437 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
438 print "hostname=",node['node_fields']['hostname'],
439 print "ip=",node['interface_fields']['ip']
440 if self.options.verbose:
441 utils.pprint("node details",node,depth=3)
443 # another entry point for just showing the boxes involved
444 def display_mapping (self):
445 TestPlc.display_mapping_plc(self.plc_spec)
449 def display_mapping_plc (plc_spec):
450 print '+ MyPLC',plc_spec['name']
451 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
452 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
453 for site_spec in plc_spec['sites']:
454 for node_spec in site_spec['nodes']:
455 TestPlc.display_mapping_node(node_spec)
458 def display_mapping_node (node_spec):
459 print '+ NODE %s'%(node_spec['name'])
460 print '+\tqemu box %s'%node_spec['host_box']
461 print '+\thostname=%s'%node_spec['node_fields']['hostname']
463 # write a timestamp in /vservers/<>.timestamp
464 # cannot be inside the vserver, that causes vserver .. build to cough
465 def timestamp_vs (self):
467 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
469 def local_pre (self):
470 "run site-dependant pre-test script as defined in LocalTestResources"
471 from LocalTestResources import local_resources
472 return local_resources.step_pre(self)
474 def local_post (self):
475 "run site-dependant post-test script as defined in LocalTestResources"
476 from LocalTestResources import local_resources
477 return local_resources.step_post(self)
479 def local_list (self):
480 "run site-dependant list script as defined in LocalTestResources"
481 from LocalTestResources import local_resources
482 return local_resources.step_list(self)
484 def local_rel (self):
485 "run site-dependant release script as defined in LocalTestResources"
486 from LocalTestResources import local_resources
487 return local_resources.step_release(self)
489 def local_rel_plc (self):
490 "run site-dependant release script as defined in LocalTestResources"
491 from LocalTestResources import local_resources
492 return local_resources.step_release_plc(self)
494 def local_rel_qemu (self):
495 "run site-dependant release script as defined in LocalTestResources"
496 from LocalTestResources import local_resources
497 return local_resources.step_release_qemu(self)
500 "vserver delete the test myplc"
501 self.run_in_host("vserver --silent %s delete"%self.vservername)
502 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
506 # historically the build was being fetched by the tests
507 # now the build pushes itself as a subdir of the tests workdir
508 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
509 def vs_create (self):
510 "vserver creation (no install done)"
511 # push the local build/ dir to the testplc box
513 # a full path for the local calls
514 build_dir=os.path.dirname(sys.argv[0])
515 # sometimes this is empty - set to "." in such a case
516 if not build_dir: build_dir="."
517 build_dir += "/build"
519 # use a standard name - will be relative to remote buildname
521 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
522 self.test_ssh.rmdir(build_dir)
523 self.test_ssh.copy(build_dir,recursive=True)
524 # the repo url is taken from arch-rpms-url
525 # with the last step (i386) removed
526 repo_url = self.options.arch_rpms_url
527 for level in [ 'arch' ]:
528 repo_url = os.path.dirname(repo_url)
529 # pass the vbuild-nightly options to vtest-init-vserver
531 test_env_options += " -p %s"%self.options.personality
532 test_env_options += " -d %s"%self.options.pldistro
533 test_env_options += " -f %s"%self.options.fcdistro
534 script="vtest-init-vserver.sh"
535 vserver_name = self.vservername
536 vserver_options="--netdev eth0 --interface %s"%self.vserverip
538 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
539 vserver_options += " --hostname %s"%vserver_hostname
541 print "Cannot reverse lookup %s"%self.vserverip
542 print "This is considered fatal, as this might pollute the test results"
544 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
545 return self.run_in_host(create_vserver) == 0
548 def plc_install(self):
549 "yum install myplc, noderepo, and the plain bootstrapfs"
551 # workaround for getting pgsql8.2 on centos5
552 if self.options.fcdistro == "centos5":
553 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
556 if self.options.personality == "linux32":
558 elif self.options.personality == "linux64":
561 raise Exception, "Unsupported personality %r"%self.options.personality
562 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
565 pkgs_list.append ("slicerepo-%s"%nodefamily)
566 pkgs_list.append ("myplc")
567 pkgs_list.append ("noderepo-%s"%nodefamily)
568 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
569 pkgs_string=" ".join(pkgs_list)
570 self.run_in_guest("yum -y install %s"%pkgs_string)
571 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
574 def plc_configure(self):
576 tmpname='%s.plc-config-tty'%(self.name())
577 fileconf=open(tmpname,'w')
578 for var in [ 'PLC_NAME',
583 'PLC_MAIL_SUPPORT_ADDRESS',
586 # Above line was added for integrating SFA Testing
592 'PLC_RESERVATION_GRANULARITY',
594 'PLC_OMF_XMPP_SERVER',
596 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
597 fileconf.write('w\n')
598 fileconf.write('q\n')
600 utils.system('cat %s'%tmpname)
601 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
602 utils.system('rm %s'%tmpname)
607 self.run_in_guest('service plc start')
612 self.run_in_guest('service plc stop')
616 "start the PLC vserver"
621 "stop the PLC vserver"
625 # stores the keys from the config for further use
626 def keys_store(self):
627 "stores test users ssh keys in keys/"
628 for key_spec in self.plc_spec['keys']:
629 TestKey(self,key_spec).store_key()
632 def keys_clean(self):
633 "removes keys cached in keys/"
634 utils.system("rm -rf ./keys")
637 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
638 # for later direct access to the nodes
639 def keys_fetch(self):
640 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
642 if not os.path.isdir(dir):
644 vservername=self.vservername
646 prefix = 'debug_ssh_key'
647 for ext in [ 'pub', 'rsa' ] :
648 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
649 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
650 if self.test_ssh.fetch(src,dst) != 0: overall=False
654 "create sites with PLCAPI"
655 return self.do_sites()
657 def delete_sites (self):
658 "delete sites with PLCAPI"
659 return self.do_sites(action="delete")
661 def do_sites (self,action="add"):
662 for site_spec in self.plc_spec['sites']:
663 test_site = TestSite (self,site_spec)
664 if (action != "add"):
665 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
666 test_site.delete_site()
667 # deleted with the site
668 #test_site.delete_users()
671 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
672 test_site.create_site()
673 test_site.create_users()
676 def delete_all_sites (self):
677 "Delete all sites in PLC, and related objects"
678 print 'auth_root',self.auth_root()
679 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
680 for site_id in site_ids:
681 print 'Deleting site_id',site_id
682 self.apiserver.DeleteSite(self.auth_root(),site_id)
686 "create nodes with PLCAPI"
687 return self.do_nodes()
688 def delete_nodes (self):
689 "delete nodes with PLCAPI"
690 return self.do_nodes(action="delete")
692 def do_nodes (self,action="add"):
693 for site_spec in self.plc_spec['sites']:
694 test_site = TestSite (self,site_spec)
696 utils.header("Deleting nodes in site %s"%test_site.name())
697 for node_spec in site_spec['nodes']:
698 test_node=TestNode(self,test_site,node_spec)
699 utils.header("Deleting %s"%test_node.name())
700 test_node.delete_node()
702 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
703 for node_spec in site_spec['nodes']:
704 utils.pprint('Creating node %s'%node_spec,node_spec)
705 test_node = TestNode (self,test_site,node_spec)
706 test_node.create_node ()
709 def nodegroups (self):
710 "create nodegroups with PLCAPI"
711 return self.do_nodegroups("add")
712 def delete_nodegroups (self):
713 "delete nodegroups with PLCAPI"
714 return self.do_nodegroups("delete")
718 def translate_timestamp (start,grain,timestamp):
719 if timestamp < TestPlc.YEAR: return start+timestamp*grain
720 else: return timestamp
723 def timestamp_printable (timestamp):
724 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
727 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
729 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
730 print 'API answered grain=',grain
731 start=(now/grain)*grain
733 # find out all nodes that are reservable
734 nodes=self.all_reservable_nodenames()
736 utils.header ("No reservable node found - proceeding without leases")
739 # attach them to the leases as specified in plc_specs
740 # this is where the 'leases' field gets interpreted as relative of absolute
741 for lease_spec in self.plc_spec['leases']:
742 # skip the ones that come with a null slice id
743 if not lease_spec['slice']: continue
744 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
745 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
746 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
747 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
748 if lease_addition['errors']:
749 utils.header("Cannot create leases, %s"%lease_addition['errors'])
752 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
753 (nodes,lease_spec['slice'],
754 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
755 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
759 def delete_leases (self):
760 "remove all leases in the myplc side"
761 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
762 utils.header("Cleaning leases %r"%lease_ids)
763 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
766 def list_leases (self):
767 "list all leases known to the myplc"
768 leases = self.apiserver.GetLeases(self.auth_root())
771 current=l['t_until']>=now
772 if self.options.verbose or current:
773 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
774 TestPlc.timestamp_printable(l['t_from']),
775 TestPlc.timestamp_printable(l['t_until'])))
778 # create nodegroups if needed, and populate
779 def do_nodegroups (self, action="add"):
780 # 1st pass to scan contents
782 for site_spec in self.plc_spec['sites']:
783 test_site = TestSite (self,site_spec)
784 for node_spec in site_spec['nodes']:
785 test_node=TestNode (self,test_site,node_spec)
786 if node_spec.has_key('nodegroups'):
787 nodegroupnames=node_spec['nodegroups']
788 if isinstance(nodegroupnames,StringTypes):
789 nodegroupnames = [ nodegroupnames ]
790 for nodegroupname in nodegroupnames:
791 if not groups_dict.has_key(nodegroupname):
792 groups_dict[nodegroupname]=[]
793 groups_dict[nodegroupname].append(test_node.name())
794 auth=self.auth_root()
796 for (nodegroupname,group_nodes) in groups_dict.iteritems():
798 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
799 # first, check if the nodetagtype is here
800 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
802 tag_type_id = tag_types[0]['tag_type_id']
804 tag_type_id = self.apiserver.AddTagType(auth,
805 {'tagname':nodegroupname,
806 'description': 'for nodegroup %s'%nodegroupname,
808 print 'located tag (type)',nodegroupname,'as',tag_type_id
810 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
812 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
813 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
814 # set node tag on all nodes, value='yes'
815 for nodename in group_nodes:
817 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
819 traceback.print_exc()
820 print 'node',nodename,'seems to already have tag',nodegroupname
823 expect_yes = self.apiserver.GetNodeTags(auth,
824 {'hostname':nodename,
825 'tagname':nodegroupname},
826 ['value'])[0]['value']
827 if expect_yes != "yes":
828 print 'Mismatch node tag on node',nodename,'got',expect_yes
831 if not self.options.dry_run:
832 print 'Cannot find tag',nodegroupname,'on node',nodename
836 print 'cleaning nodegroup',nodegroupname
837 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
839 traceback.print_exc()
843 # return a list of tuples (nodename,qemuname)
844 def all_node_infos (self) :
846 for site_spec in self.plc_spec['sites']:
847 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
848 for node_spec in site_spec['nodes'] ]
851 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
852 def all_reservable_nodenames (self):
854 for site_spec in self.plc_spec['sites']:
855 for node_spec in site_spec['nodes']:
856 node_fields=node_spec['node_fields']
857 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
858 res.append(node_fields['hostname'])
861 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
862 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
863 if self.options.dry_run:
867 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
868 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
869 # the nodes that haven't checked yet - start with a full list and shrink over time
870 tocheck = self.all_hostnames()
871 utils.header("checking nodes %r"%tocheck)
872 # create a dict hostname -> status
873 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
876 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
878 for array in tocheck_status:
879 hostname=array['hostname']
880 boot_state=array['boot_state']
881 if boot_state == target_boot_state:
882 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
884 # if it's a real node, never mind
885 (site_spec,node_spec)=self.locate_hostname(hostname)
886 if TestNode.is_real_model(node_spec['node_fields']['model']):
887 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
889 boot_state = target_boot_state
890 elif datetime.datetime.now() > graceout:
891 utils.header ("%s still in '%s' state"%(hostname,boot_state))
892 graceout=datetime.datetime.now()+datetime.timedelta(1)
893 status[hostname] = boot_state
895 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
898 if datetime.datetime.now() > timeout:
899 for hostname in tocheck:
900 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
902 # otherwise, sleep for a while
904 # only useful in empty plcs
907 def nodes_booted(self):
908 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
910 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
912 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
913 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
914 vservername=self.vservername
917 local_key = "keys/%(vservername)s-debug.rsa"%locals()
920 local_key = "keys/key1.rsa"
921 node_infos = self.all_node_infos()
922 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
923 for (nodename,qemuname) in node_infos:
924 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
925 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
926 (timeout_minutes,silent_minutes,period))
928 for node_info in node_infos:
929 (hostname,qemuname) = node_info
930 # try to run 'hostname' in the node
931 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
932 # don't spam logs - show the command only after the grace period
933 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
935 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
937 node_infos.remove(node_info)
939 # we will have tried real nodes once, in case they're up - but if not, just skip
940 (site_spec,node_spec)=self.locate_hostname(hostname)
941 if TestNode.is_real_model(node_spec['node_fields']['model']):
942 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
943 node_infos.remove(node_info)
946 if datetime.datetime.now() > timeout:
947 for (hostname,qemuname) in node_infos:
948 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
950 # otherwise, sleep for a while
952 # only useful in empty plcs
955 def ssh_node_debug(self):
956 "Tries to ssh into nodes in debug mode with the debug ssh key"
957 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
959 def ssh_node_boot(self):
960 "Tries to ssh into nodes in production mode with the root ssh key"
961 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
964 def qemu_local_init (self):
965 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
969 "all nodes: invoke GetBootMedium and store result locally"
972 def qemu_local_config (self):
973 "all nodes: compute qemu config qemu.conf and store it locally"
976 def nodestate_reinstall (self):
977 "all nodes: mark PLCAPI boot_state as reinstall"
980 def nodestate_safeboot (self):
981 "all nodes: mark PLCAPI boot_state as safeboot"
984 def nodestate_boot (self):
985 "all nodes: mark PLCAPI boot_state as boot"
988 def nodestate_show (self):
989 "all nodes: show PLCAPI boot_state"
992 def qemu_export (self):
993 "all nodes: push local node-dep directory on the qemu box"
996 ### check hooks : invoke scripts from hooks/{node,slice}
997 def check_hooks_node (self):
998 return self.locate_first_node().check_hooks()
999 def check_hooks_sliver (self) :
1000 return self.locate_first_sliver().check_hooks()
1002 def check_hooks (self):
1003 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1004 return self.check_hooks_node() and self.check_hooks_sliver()
1007 def do_check_initscripts(self):
1009 for slice_spec in self.plc_spec['slices']:
1010 if not slice_spec.has_key('initscriptstamp'):
1012 stamp=slice_spec['initscriptstamp']
1013 for nodename in slice_spec['nodenames']:
1014 (site,node) = self.locate_node (nodename)
1015 # xxx - passing the wrong site - probably harmless
1016 test_site = TestSite (self,site)
1017 test_slice = TestSlice (self,test_site,slice_spec)
1018 test_node = TestNode (self,test_site,node)
1019 test_sliver = TestSliver (self, test_node, test_slice)
1020 if not test_sliver.check_initscript_stamp(stamp):
1024 def check_initscripts(self):
1025 "check that the initscripts have triggered"
1026 return self.do_check_initscripts()
1028 def initscripts (self):
1029 "create initscripts with PLCAPI"
1030 for initscript in self.plc_spec['initscripts']:
1031 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1032 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1035 def delete_initscripts (self):
1036 "delete initscripts with PLCAPI"
1037 for initscript in self.plc_spec['initscripts']:
1038 initscript_name = initscript['initscript_fields']['name']
1039 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1041 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1042 print initscript_name,'deleted'
1044 print 'deletion went wrong - probably did not exist'
1049 "create slices with PLCAPI"
1050 return self.do_slices()
1052 def delete_slices (self):
1053 "delete slices with PLCAPI"
1054 return self.do_slices("delete")
1056 def do_slices (self, action="add"):
1057 for slice in self.plc_spec['slices']:
1058 site_spec = self.locate_site (slice['sitename'])
1059 test_site = TestSite(self,site_spec)
1060 test_slice=TestSlice(self,test_site,slice)
1062 utils.header("Deleting slices in site %s"%test_site.name())
1063 test_slice.delete_slice()
1065 utils.pprint("Creating slice",slice)
1066 test_slice.create_slice()
1067 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1071 def ssh_slice(self):
1072 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1076 def keys_clear_known_hosts (self):
1077 "remove test nodes entries from the local known_hosts file"
1081 def qemu_start (self) :
1082 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1086 def timestamp_qemu (self) :
1087 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1090 def check_tcp (self):
1091 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1092 specs = self.plc_spec['tcp_test']
1097 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1098 if not s_test_sliver.run_tcp_server(port,timeout=10):
1102 # idem for the client side
1103 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1104 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1108 def plcsh_stress_test (self):
1109 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1110 # install the stress-test in the plc image
1111 location = "/usr/share/plc_api/plcsh_stress_test.py"
1112 remote="/vservers/%s/%s"%(self.vservername,location)
1113 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1115 command += " -- --check"
1116 if self.options.size == 1:
1117 command += " --tiny"
1118 return ( self.run_in_guest(command) == 0)
1120 # populate runs the same utility without slightly different options
1121 # in particular runs with --preserve (dont cleanup) and without --check
1122 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1125 def sfa_install(self):
1126 "yum install sfa, sfa-plc and sfa-client"
1128 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1129 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1132 def sfa_dbclean(self):
1133 "thoroughly wipes off the SFA database"
1134 self.run_in_guest("sfa-nuke-plc.py")==0
1137 def sfa_plcclean(self):
1138 "cleans the PLC entries that were created as a side effect of running the script"
1140 sfa_spec=self.plc_spec['sfa']
1142 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1143 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1144 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1145 except: print "Slice %s already absent from PLC db"%slicename
1147 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1148 try: self.apiserver.DeletePerson(self.auth_root(),username)
1149 except: print "User %s already absent from PLC db"%username
1151 print "REMEMBER TO RUN sfa_import AGAIN"
1154 def sfa_uninstall(self):
1155 "uses rpm to uninstall sfa - ignore result"
1156 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1157 self.run_in_guest("rm -rf /var/lib/sfa")
1158 self.run_in_guest("rm -rf /etc/sfa")
1159 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1161 self.run_in_guest("rpm -e --noscripts sfa-plc")
1164 ### run unit tests for SFA
1165 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1166 # Running Transaction
1167 # Transaction couldn't start:
1168 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1169 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1170 # no matter how many Gbs are available on the testplc
1171 # could not figure out what's wrong, so...
1172 # if the yum install phase fails, consider the test is successful
1173 # other combinations will eventually run it hopefully
1174 def sfa_utest(self):
1175 "yum install sfa-tests and run SFA unittests"
1176 self.run_in_guest("yum -y install sfa-tests")
1177 # failed to install - forget it
1178 if self.run_in_guest("rpm -q sfa-tests")!=0:
1179 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1181 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1185 dirname="conf.%s"%self.plc_spec['name']
1186 if not os.path.isdir(dirname):
1187 utils.system("mkdir -p %s"%dirname)
1188 if not os.path.isdir(dirname):
1189 raise "Cannot create config dir for plc %s"%self.name()
1192 def conffile(self,filename):
1193 return "%s/%s"%(self.confdir(),filename)
1194 def confsubdir(self,dirname,clean,dry_run=False):
1195 subdirname="%s/%s"%(self.confdir(),dirname)
1197 utils.system("rm -rf %s"%subdirname)
1198 if not os.path.isdir(subdirname):
1199 utils.system("mkdir -p %s"%subdirname)
1200 if not dry_run and not os.path.isdir(subdirname):
1201 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1204 def conffile_clean (self,filename):
1205 filename=self.conffile(filename)
1206 return utils.system("rm -rf %s"%filename)==0
1209 def sfa_configure(self):
1210 "run sfa-config-tty"
1211 tmpname=self.conffile("sfa-config-tty")
1212 fileconf=open(tmpname,'w')
1213 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1214 'SFA_INTERFACE_HRN',
1215 # 'SFA_REGISTRY_LEVEL1_AUTH',
1216 'SFA_REGISTRY_HOST',
1217 'SFA_AGGREGATE_HOST',
1223 'SFA_PLC_DB_PASSWORD',
1226 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1227 # the way plc_config handles booleans just sucks..
1228 for var in ['SFA_API_DEBUG']:
1230 if self.plc_spec['sfa'][var]: val='true'
1231 fileconf.write ('e %s\n%s\n'%(var,val))
1232 fileconf.write('w\n')
1233 fileconf.write('R\n')
1234 fileconf.write('q\n')
1236 utils.system('cat %s'%tmpname)
1237 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1240 def aggregate_xml_line(self):
1241 port=self.plc_spec['sfa']['neighbours-port']
1242 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1243 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1245 def registry_xml_line(self):
1246 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1247 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1250 # a cross step that takes all other plcs in argument
1251 def cross_sfa_configure(self, other_plcs):
1252 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1253 # of course with a single plc, other_plcs is an empty list
1256 agg_fname=self.conffile("agg.xml")
1257 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1258 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1259 utils.header ("(Over)wrote %s"%agg_fname)
1260 reg_fname=self.conffile("reg.xml")
1261 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1262 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1263 utils.header ("(Over)wrote %s"%reg_fname)
1264 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1265 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1267 def sfa_import(self):
1269 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1270 return self.run_in_guest('sfa-import-plc.py')==0
1271 # not needed anymore
1272 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1274 def sfa_start(self):
1276 return self.run_in_guest('service sfa start')==0
1278 def sfi_configure(self):
1279 "Create /root/sfi on the plc side for sfi client configuration"
1280 if self.options.dry_run:
1281 utils.header("DRY RUN - skipping step")
1283 sfa_spec=self.plc_spec['sfa']
1284 # cannot use sfa_slice_mapper to pass dir_name
1285 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1286 site_spec = self.locate_site (slice_spec['sitename'])
1287 test_site = TestSite(self,site_spec)
1288 test_slice=TestSliceSfa(self,test_site,slice_spec)
1289 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1290 test_slice.sfi_config(dir_name)
1291 # push into the remote /root/sfi area
1292 location = test_slice.sfi_path()
1293 remote="/vservers/%s/%s"%(self.vservername,location)
1294 self.test_ssh.mkdir(remote,abs=True)
1295 # need to strip last level or remote otherwise we get an extra dir level
1296 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1300 def sfi_clean (self):
1301 "clean up /root/sfi on the plc side"
1302 self.run_in_guest("rm -rf /root/sfi")
1306 def sfa_add_user(self):
1311 def sfa_update_user(self):
1315 def sfa_add_slice(self):
1316 "run sfi.py add (on Registry) from slice.xml"
1320 def sfa_discover(self):
1321 "discover resources into resouces_in.rspec"
1325 def sfa_create_slice(self):
1326 "run sfi.py create (on SM) - 1st time"
1330 def sfa_check_slice_plc(self):
1331 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1335 def sfa_update_slice(self):
1336 "run sfi.py create (on SM) on existing object"
1341 "various registry-related calls"
1345 def ssh_slice_sfa(self):
1346 "tries to ssh-enter the SFA slice"
1350 def sfa_delete_user(self):
1355 def sfa_delete_slice(self):
1356 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1361 self.run_in_guest('service sfa stop')==0
1364 def populate (self):
1365 "creates random entries in the PLCAPI"
1366 # install the stress-test in the plc image
1367 location = "/usr/share/plc_api/plcsh_stress_test.py"
1368 remote="/vservers/%s/%s"%(self.vservername,location)
1369 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1371 command += " -- --preserve --short-names"
1372 local = (self.run_in_guest(command) == 0);
1373 # second run with --foreign
1374 command += ' --foreign'
1375 remote = (self.run_in_guest(command) == 0);
1376 return ( local and remote)
1378 def gather_logs (self):
1379 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1380 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1381 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1382 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1383 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1384 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1386 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1387 self.gather_var_logs ()
1389 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1390 self.gather_pgsql_logs ()
1392 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1393 for site_spec in self.plc_spec['sites']:
1394 test_site = TestSite (self,site_spec)
1395 for node_spec in site_spec['nodes']:
1396 test_node=TestNode(self,test_site,node_spec)
1397 test_node.gather_qemu_logs()
1399 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1400 self.gather_nodes_var_logs()
1402 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1403 self.gather_slivers_var_logs()
1406 def gather_slivers_var_logs(self):
1407 for test_sliver in self.all_sliver_objs():
1408 remote = test_sliver.tar_var_logs()
1409 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1410 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1411 utils.system(command)
1414 def gather_var_logs (self):
1415 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1416 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1417 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1418 utils.system(command)
1419 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1420 utils.system(command)
1422 def gather_pgsql_logs (self):
1423 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1424 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1425 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1426 utils.system(command)
1428 def gather_nodes_var_logs (self):
1429 for site_spec in self.plc_spec['sites']:
1430 test_site = TestSite (self,site_spec)
1431 for node_spec in site_spec['nodes']:
1432 test_node=TestNode(self,test_site,node_spec)
1433 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1434 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1435 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1436 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1437 utils.system(command)
1440 # returns the filename to use for sql dump/restore, using options.dbname if set
1441 def dbfile (self, database):
1442 # uses options.dbname if it is found
1444 name=self.options.dbname
1445 if not isinstance(name,StringTypes):
1448 t=datetime.datetime.now()
1451 return "/root/%s-%s.sql"%(database,name)
1453 def plc_db_dump(self):
1454 'dump the planetlab5 DB in /root in the PLC - filename has time'
1455 dump=self.dbfile("planetab5")
1456 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1457 utils.header('Dumped planetlab5 database in %s'%dump)
1460 def plc_db_restore(self):
1461 'restore the planetlab5 DB - looks broken, but run -n might help'
1462 dump=self.dbfile("planetab5")
1463 ##stop httpd service
1464 self.run_in_guest('service httpd stop')
1465 # xxx - need another wrapper
1466 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1467 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1468 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1469 ##starting httpd service
1470 self.run_in_guest('service httpd start')
1472 utils.header('Database restored from ' + dump)
1475 def standby_1(): pass
1477 def standby_2(): pass
1479 def standby_3(): pass
1481 def standby_4(): pass
1483 def standby_5(): pass
1485 def standby_6(): pass
1487 def standby_7(): pass
1489 def standby_8(): pass
1491 def standby_9(): pass
1493 def standby_10(): pass
1495 def standby_11(): pass
1497 def standby_12(): pass
1499 def standby_13(): pass
1501 def standby_14(): pass
1503 def standby_15(): pass
1505 def standby_16(): pass
1507 def standby_17(): pass
1509 def standby_18(): pass
1511 def standby_19(): pass
1513 def standby_20(): pass