1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
111 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
112 'plc_stop', 'vs_start', 'vs_stop', SEP,
113 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
114 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
115 'delete_leases', 'list_leases', SEP,
117 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
118 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
119 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
120 'plc_db_dump' , 'plc_db_restore', SEP,
121 'standby_1 through 20',SEP,
125 def printable_steps (list):
126 single_line=" ".join(list)+" "
127 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
129 def valid_step (step):
130 return step != SEP and step != SEPSFA
132 # turn off the sfa-related steps when build has skipped SFA
133 # this is originally for centos5 as recent SFAs won't build on this platform
135 def check_whether_build_has_sfa (rpms_url):
136 # warning, we're now building 'sface' so let's be a bit more picky
137 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
138 # full builds are expected to return with 0 here
140 # move all steps containing 'sfa' from default_steps to other_steps
141 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
142 TestPlc.other_steps += sfa_steps
143 for step in sfa_steps: TestPlc.default_steps.remove(step)
145 def __init__ (self,plc_spec,options):
146 self.plc_spec=plc_spec
148 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
155 raise Exception,'chroot-based myplc testing is deprecated'
156 self.apiserver=TestApiserver(self.url,options.dry_run)
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the vserver
189 def host_to_guest(self,command):
190 return "vserver %s exec %s"%(self.vservername,command)
192 #start/stop the vserver
193 def start_guest_in_host(self):
194 return "vserver %s start"%(self.vservername)
196 def stop_guest_in_host(self):
197 return "vserver %s stop"%(self.vservername)
200 def run_in_guest_piped (self,local,remote):
201 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
203 def auth_root (self):
204 return {'Username':self.plc_spec['PLC_ROOT_USER'],
205 'AuthMethod':'password',
206 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
207 'Role' : self.plc_spec['role']
209 def locate_site (self,sitename):
210 for site in self.plc_spec['sites']:
211 if site['site_fields']['name'] == sitename:
213 if site['site_fields']['login_base'] == sitename:
215 raise Exception,"Cannot locate site %s"%sitename
217 def locate_node (self,nodename):
218 for site in self.plc_spec['sites']:
219 for node in site['nodes']:
220 if node['name'] == nodename:
222 raise Exception,"Cannot locate node %s"%nodename
224 def locate_hostname (self,hostname):
225 for site in self.plc_spec['sites']:
226 for node in site['nodes']:
227 if node['node_fields']['hostname'] == hostname:
229 raise Exception,"Cannot locate hostname %s"%hostname
231 def locate_key (self,keyname):
232 for key in self.plc_spec['keys']:
233 if key['name'] == keyname:
235 raise Exception,"Cannot locate key %s"%keyname
237 def locate_slice (self, slicename):
238 for slice in self.plc_spec['slices']:
239 if slice['slice_fields']['name'] == slicename:
241 raise Exception,"Cannot locate slice %s"%slicename
243 def all_sliver_objs (self):
245 for slice_spec in self.plc_spec['slices']:
246 slicename = slice_spec['slice_fields']['name']
247 for nodename in slice_spec['nodenames']:
248 result.append(self.locate_sliver_obj (nodename,slicename))
251 def locate_sliver_obj (self,nodename,slicename):
252 (site,node) = self.locate_node(nodename)
253 slice = self.locate_slice (slicename)
255 test_site = TestSite (self, site)
256 test_node = TestNode (self, test_site,node)
257 # xxx the slice site is assumed to be the node site - mhh - probably harmless
258 test_slice = TestSlice (self, test_site, slice)
259 return TestSliver (self, test_node, test_slice)
261 def locate_first_node(self):
262 nodename=self.plc_spec['slices'][0]['nodenames'][0]
263 (site,node) = self.locate_node(nodename)
264 test_site = TestSite (self, site)
265 test_node = TestNode (self, test_site,node)
268 def locate_first_sliver (self):
269 slice_spec=self.plc_spec['slices'][0]
270 slicename=slice_spec['slice_fields']['name']
271 nodename=slice_spec['nodenames'][0]
272 return self.locate_sliver_obj(nodename,slicename)
274 # all different hostboxes used in this plc
275 def gather_hostBoxes(self):
276 # maps on sites and nodes, return [ (host_box,test_node) ]
278 for site_spec in self.plc_spec['sites']:
279 test_site = TestSite (self,site_spec)
280 for node_spec in site_spec['nodes']:
281 test_node = TestNode (self, test_site, node_spec)
282 if not test_node.is_real():
283 tuples.append( (test_node.host_box(),test_node) )
284 # transform into a dict { 'host_box' -> [ test_node .. ] }
286 for (box,node) in tuples:
287 if not result.has_key(box):
290 result[box].append(node)
293 # a step for checking this stuff
294 def show_boxes (self):
295 'print summary of nodes location'
296 for (box,nodes) in self.gather_hostBoxes().iteritems():
297 print box,":"," + ".join( [ node.name() for node in nodes ] )
300 # make this a valid step
301 def qemu_kill_all(self):
302 'kill all qemu instances on the qemu boxes involved by this setup'
303 # this is the brute force version, kill all qemus on that host box
304 for (box,nodes) in self.gather_hostBoxes().iteritems():
305 # pass the first nodename, as we don't push template-qemu on testboxes
306 nodedir=nodes[0].nodedir()
307 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
310 # make this a valid step
311 def qemu_list_all(self):
312 'list all qemu instances on the qemu boxes involved by this setup'
313 for (box,nodes) in self.gather_hostBoxes().iteritems():
314 # this is the brute force version, kill all qemus on that host box
315 TestBoxQemu(box,self.options.buildname).qemu_list_all()
318 # kill only the right qemus
319 def qemu_list_mine(self):
320 'list qemu instances for our nodes'
321 for (box,nodes) in self.gather_hostBoxes().iteritems():
322 # the fine-grain version
327 # kill only the right qemus
328 def qemu_kill_mine(self):
329 'kill the qemu instances for our nodes'
330 for (box,nodes) in self.gather_hostBoxes().iteritems():
331 # the fine-grain version
336 #################### display config
338 "show test configuration after localization"
339 self.display_pass (1)
340 self.display_pass (2)
344 "print cut'n paste-able stuff to export env variables to your shell"
345 # these work but the shell prompt does not get displayed..
346 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
347 command2="ssh root@%s %s"%(socket.gethostname(),command1)
348 # guess local domain from hostname
349 domain=socket.gethostname().split('.',1)[1]
350 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
351 print "export BUILD=%s"%self.options.buildname
352 print "export PLCHOST=%s"%fqdn
353 print "export GUEST=%s"%self.plc_spec['vservername']
354 # find hostname of first node
355 (hostname,_) = self.all_node_infos()[0]
356 print "export NODE=%s"%(hostname)
360 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
361 def display_pass (self,passno):
362 for (key,val) in self.plc_spec.iteritems():
363 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
367 self.display_site_spec(site)
368 for node in site['nodes']:
369 self.display_node_spec(node)
370 elif key=='initscripts':
371 for initscript in val:
372 self.display_initscript_spec (initscript)
375 self.display_slice_spec (slice)
378 self.display_key_spec (key)
380 if key not in ['sites','initscripts','slices','keys', 'sfa']:
381 print '+ ',key,':',val
383 def display_site_spec (self,site):
384 print '+ ======== site',site['site_fields']['name']
385 for (k,v) in site.iteritems():
386 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
389 print '+ ','nodes : ',
391 print node['node_fields']['hostname'],'',
397 print user['name'],'',
399 elif k == 'site_fields':
400 print '+ login_base',':',v['login_base']
401 elif k == 'address_fields':
407 def display_initscript_spec (self,initscript):
408 print '+ ======== initscript',initscript['initscript_fields']['name']
410 def display_key_spec (self,key):
411 print '+ ======== key',key['name']
413 def display_slice_spec (self,slice):
414 print '+ ======== slice',slice['slice_fields']['name']
415 for (k,v) in slice.iteritems():
428 elif k=='slice_fields':
429 print '+ fields',':',
430 print 'max_nodes=',v['max_nodes'],
435 def display_node_spec (self,node):
436 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
437 print "hostname=",node['node_fields']['hostname'],
438 print "ip=",node['interface_fields']['ip']
439 if self.options.verbose:
440 utils.pprint("node details",node,depth=3)
442 # another entry point for just showing the boxes involved
443 def display_mapping (self):
444 TestPlc.display_mapping_plc(self.plc_spec)
448 def display_mapping_plc (plc_spec):
449 print '+ MyPLC',plc_spec['name']
450 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
451 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
452 for site_spec in plc_spec['sites']:
453 for node_spec in site_spec['nodes']:
454 TestPlc.display_mapping_node(node_spec)
457 def display_mapping_node (node_spec):
458 print '+ NODE %s'%(node_spec['name'])
459 print '+\tqemu box %s'%node_spec['host_box']
460 print '+\thostname=%s'%node_spec['node_fields']['hostname']
462 # write a timestamp in /vservers/<>.timestamp
463 # cannot be inside the vserver, that causes vserver .. build to cough
464 def timestamp_vs (self):
466 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
468 def local_pre (self):
469 "run site-dependant pre-test script as defined in LocalTestResources"
470 from LocalTestResources import local_resources
471 return local_resources.step_pre(self)
473 def local_post (self):
474 "run site-dependant post-test script as defined in LocalTestResources"
475 from LocalTestResources import local_resources
476 return local_resources.step_post(self)
478 def local_list (self):
479 "run site-dependant list script as defined in LocalTestResources"
480 from LocalTestResources import local_resources
481 return local_resources.step_list(self)
483 def local_rel (self):
484 "run site-dependant release script as defined in LocalTestResources"
485 from LocalTestResources import local_resources
486 return local_resources.step_release(self)
488 def local_rel_plc (self):
489 "run site-dependant release script as defined in LocalTestResources"
490 from LocalTestResources import local_resources
491 return local_resources.step_release_plc(self)
493 def local_rel_qemu (self):
494 "run site-dependant release script as defined in LocalTestResources"
495 from LocalTestResources import local_resources
496 return local_resources.step_release_qemu(self)
499 "vserver delete the test myplc"
500 self.run_in_host("vserver --silent %s delete"%self.vservername)
501 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
505 # historically the build was being fetched by the tests
506 # now the build pushes itself as a subdir of the tests workdir
507 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
508 def vs_create (self):
509 "vserver creation (no install done)"
510 # push the local build/ dir to the testplc box
512 # a full path for the local calls
513 build_dir=os.path.dirname(sys.argv[0])
514 # sometimes this is empty - set to "." in such a case
515 if not build_dir: build_dir="."
516 build_dir += "/build"
518 # use a standard name - will be relative to remote buildname
520 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
521 self.test_ssh.rmdir(build_dir)
522 self.test_ssh.copy(build_dir,recursive=True)
523 # the repo url is taken from arch-rpms-url
524 # with the last step (i386) removed
525 repo_url = self.options.arch_rpms_url
526 for level in [ 'arch' ]:
527 repo_url = os.path.dirname(repo_url)
528 # pass the vbuild-nightly options to vtest-init-vserver
530 test_env_options += " -p %s"%self.options.personality
531 test_env_options += " -d %s"%self.options.pldistro
532 test_env_options += " -f %s"%self.options.fcdistro
533 script="vtest-init-vserver.sh"
534 vserver_name = self.vservername
535 vserver_options="--netdev eth0 --interface %s"%self.vserverip
537 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
538 vserver_options += " --hostname %s"%vserver_hostname
540 print "Cannot reverse lookup %s"%self.vserverip
541 print "This is considered fatal, as this might pollute the test results"
543 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
544 return self.run_in_host(create_vserver) == 0
547 def plc_install(self):
548 "yum install myplc, noderepo, and the plain bootstrapfs"
550 # workaround for getting pgsql8.2 on centos5
551 if self.options.fcdistro == "centos5":
552 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
555 if self.options.personality == "linux32":
557 elif self.options.personality == "linux64":
560 raise Exception, "Unsupported personality %r"%self.options.personality
561 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
564 pkgs_list.append ("slicerepo-%s"%nodefamily)
565 pkgs_list.append ("myplc")
566 pkgs_list.append ("noderepo-%s"%nodefamily)
567 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
568 pkgs_string=" ".join(pkgs_list)
569 self.run_in_guest("yum -y install %s"%pkgs_string)
570 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
573 def plc_configure(self):
575 tmpname='%s.plc-config-tty'%(self.name())
576 fileconf=open(tmpname,'w')
577 for var in [ 'PLC_NAME',
582 'PLC_MAIL_SUPPORT_ADDRESS',
585 # Above line was added for integrating SFA Testing
591 'PLC_RESERVATION_GRANULARITY',
593 'PLC_OMF_XMPP_SERVER',
595 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
596 fileconf.write('w\n')
597 fileconf.write('q\n')
599 utils.system('cat %s'%tmpname)
600 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
601 utils.system('rm %s'%tmpname)
606 self.run_in_guest('service plc start')
611 self.run_in_guest('service plc stop')
615 "start the PLC vserver"
620 "stop the PLC vserver"
624 # stores the keys from the config for further use
625 def keys_store(self):
626 "stores test users ssh keys in keys/"
627 for key_spec in self.plc_spec['keys']:
628 TestKey(self,key_spec).store_key()
631 def keys_clean(self):
632 "removes keys cached in keys/"
633 utils.system("rm -rf ./keys")
636 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
637 # for later direct access to the nodes
638 def keys_fetch(self):
639 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
641 if not os.path.isdir(dir):
643 vservername=self.vservername
645 prefix = 'debug_ssh_key'
646 for ext in [ 'pub', 'rsa' ] :
647 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
648 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
649 if self.test_ssh.fetch(src,dst) != 0: overall=False
653 "create sites with PLCAPI"
654 return self.do_sites()
656 def delete_sites (self):
657 "delete sites with PLCAPI"
658 return self.do_sites(action="delete")
660 def do_sites (self,action="add"):
661 for site_spec in self.plc_spec['sites']:
662 test_site = TestSite (self,site_spec)
663 if (action != "add"):
664 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
665 test_site.delete_site()
666 # deleted with the site
667 #test_site.delete_users()
670 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
671 test_site.create_site()
672 test_site.create_users()
675 def delete_all_sites (self):
676 "Delete all sites in PLC, and related objects"
677 print 'auth_root',self.auth_root()
678 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
679 for site_id in site_ids:
680 print 'Deleting site_id',site_id
681 self.apiserver.DeleteSite(self.auth_root(),site_id)
685 "create nodes with PLCAPI"
686 return self.do_nodes()
687 def delete_nodes (self):
688 "delete nodes with PLCAPI"
689 return self.do_nodes(action="delete")
691 def do_nodes (self,action="add"):
692 for site_spec in self.plc_spec['sites']:
693 test_site = TestSite (self,site_spec)
695 utils.header("Deleting nodes in site %s"%test_site.name())
696 for node_spec in site_spec['nodes']:
697 test_node=TestNode(self,test_site,node_spec)
698 utils.header("Deleting %s"%test_node.name())
699 test_node.delete_node()
701 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
702 for node_spec in site_spec['nodes']:
703 utils.pprint('Creating node %s'%node_spec,node_spec)
704 test_node = TestNode (self,test_site,node_spec)
705 test_node.create_node ()
708 def nodegroups (self):
709 "create nodegroups with PLCAPI"
710 return self.do_nodegroups("add")
711 def delete_nodegroups (self):
712 "delete nodegroups with PLCAPI"
713 return self.do_nodegroups("delete")
717 def translate_timestamp (start,grain,timestamp):
718 if timestamp < TestPlc.YEAR: return start+timestamp*grain
719 else: return timestamp
722 def timestamp_printable (timestamp):
723 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
726 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
728 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
729 print 'API answered grain=',grain
730 start=(now/grain)*grain
732 # find out all nodes that are reservable
733 nodes=self.all_reservable_nodenames()
735 utils.header ("No reservable node found - proceeding without leases")
738 # attach them to the leases as specified in plc_specs
739 # this is where the 'leases' field gets interpreted as relative of absolute
740 for lease_spec in self.plc_spec['leases']:
741 # skip the ones that come with a null slice id
742 if not lease_spec['slice']: continue
743 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
744 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
745 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
746 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
747 if lease_addition['errors']:
748 utils.header("Cannot create leases, %s"%lease_addition['errors'])
751 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
752 (nodes,lease_spec['slice'],
753 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
754 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
758 def delete_leases (self):
759 "remove all leases in the myplc side"
760 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
761 utils.header("Cleaning leases %r"%lease_ids)
762 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
765 def list_leases (self):
766 "list all leases known to the myplc"
767 leases = self.apiserver.GetLeases(self.auth_root())
770 current=l['t_until']>=now
771 if self.options.verbose or current:
772 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
773 TestPlc.timestamp_printable(l['t_from']),
774 TestPlc.timestamp_printable(l['t_until'])))
777 # create nodegroups if needed, and populate
778 def do_nodegroups (self, action="add"):
779 # 1st pass to scan contents
781 for site_spec in self.plc_spec['sites']:
782 test_site = TestSite (self,site_spec)
783 for node_spec in site_spec['nodes']:
784 test_node=TestNode (self,test_site,node_spec)
785 if node_spec.has_key('nodegroups'):
786 nodegroupnames=node_spec['nodegroups']
787 if isinstance(nodegroupnames,StringTypes):
788 nodegroupnames = [ nodegroupnames ]
789 for nodegroupname in nodegroupnames:
790 if not groups_dict.has_key(nodegroupname):
791 groups_dict[nodegroupname]=[]
792 groups_dict[nodegroupname].append(test_node.name())
793 auth=self.auth_root()
795 for (nodegroupname,group_nodes) in groups_dict.iteritems():
797 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
798 # first, check if the nodetagtype is here
799 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
801 tag_type_id = tag_types[0]['tag_type_id']
803 tag_type_id = self.apiserver.AddTagType(auth,
804 {'tagname':nodegroupname,
805 'description': 'for nodegroup %s'%nodegroupname,
807 print 'located tag (type)',nodegroupname,'as',tag_type_id
809 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
811 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
812 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
813 # set node tag on all nodes, value='yes'
814 for nodename in group_nodes:
816 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
818 traceback.print_exc()
819 print 'node',nodename,'seems to already have tag',nodegroupname
822 expect_yes = self.apiserver.GetNodeTags(auth,
823 {'hostname':nodename,
824 'tagname':nodegroupname},
825 ['value'])[0]['value']
826 if expect_yes != "yes":
827 print 'Mismatch node tag on node',nodename,'got',expect_yes
830 if not self.options.dry_run:
831 print 'Cannot find tag',nodegroupname,'on node',nodename
835 print 'cleaning nodegroup',nodegroupname
836 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
838 traceback.print_exc()
842 # return a list of tuples (nodename,qemuname)
843 def all_node_infos (self) :
845 for site_spec in self.plc_spec['sites']:
846 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
847 for node_spec in site_spec['nodes'] ]
850 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
851 def all_reservable_nodenames (self):
853 for site_spec in self.plc_spec['sites']:
854 for node_spec in site_spec['nodes']:
855 node_fields=node_spec['node_fields']
856 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
857 res.append(node_fields['hostname'])
860 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
861 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
862 if self.options.dry_run:
866 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
867 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
868 # the nodes that haven't checked yet - start with a full list and shrink over time
869 tocheck = self.all_hostnames()
870 utils.header("checking nodes %r"%tocheck)
871 # create a dict hostname -> status
872 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
875 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
877 for array in tocheck_status:
878 hostname=array['hostname']
879 boot_state=array['boot_state']
880 if boot_state == target_boot_state:
881 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
883 # if it's a real node, never mind
884 (site_spec,node_spec)=self.locate_hostname(hostname)
885 if TestNode.is_real_model(node_spec['node_fields']['model']):
886 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
888 boot_state = target_boot_state
889 elif datetime.datetime.now() > graceout:
890 utils.header ("%s still in '%s' state"%(hostname,boot_state))
891 graceout=datetime.datetime.now()+datetime.timedelta(1)
892 status[hostname] = boot_state
894 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
897 if datetime.datetime.now() > timeout:
898 for hostname in tocheck:
899 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
901 # otherwise, sleep for a while
903 # only useful in empty plcs
906 def nodes_booted(self):
907 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
909 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
911 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
912 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
913 vservername=self.vservername
916 local_key = "keys/%(vservername)s-debug.rsa"%locals()
919 local_key = "keys/key1.rsa"
920 node_infos = self.all_node_infos()
921 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
922 for (nodename,qemuname) in node_infos:
923 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
924 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
925 (timeout_minutes,silent_minutes,period))
927 for node_info in node_infos:
928 (hostname,qemuname) = node_info
929 # try to run 'hostname' in the node
930 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
931 # don't spam logs - show the command only after the grace period
932 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
934 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
936 node_infos.remove(node_info)
938 # we will have tried real nodes once, in case they're up - but if not, just skip
939 (site_spec,node_spec)=self.locate_hostname(hostname)
940 if TestNode.is_real_model(node_spec['node_fields']['model']):
941 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
942 node_infos.remove(node_info)
945 if datetime.datetime.now() > timeout:
946 for (hostname,qemuname) in node_infos:
947 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
949 # otherwise, sleep for a while
951 # only useful in empty plcs
954 def ssh_node_debug(self):
955 "Tries to ssh into nodes in debug mode with the debug ssh key"
956 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
958 def ssh_node_boot(self):
959 "Tries to ssh into nodes in production mode with the root ssh key"
960 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
963 def qemu_local_init (self):
964 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
968 "all nodes: invoke GetBootMedium and store result locally"
971 def qemu_local_config (self):
972 "all nodes: compute qemu config qemu.conf and store it locally"
975 def nodestate_reinstall (self):
976 "all nodes: mark PLCAPI boot_state as reinstall"
979 def nodestate_safeboot (self):
980 "all nodes: mark PLCAPI boot_state as safeboot"
983 def nodestate_boot (self):
984 "all nodes: mark PLCAPI boot_state as boot"
987 def nodestate_show (self):
988 "all nodes: show PLCAPI boot_state"
991 def qemu_export (self):
992 "all nodes: push local node-dep directory on the qemu box"
995 ### check hooks : invoke scripts from hooks/{node,slice}
996 def check_hooks_node (self):
997 return self.locate_first_node().check_hooks()
998 def check_hooks_sliver (self) :
999 return self.locate_first_sliver().check_hooks()
1001 def check_hooks (self):
1002 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1003 return self.check_hooks_node() and self.check_hooks_sliver()
1006 def do_check_initscripts(self):
1008 for slice_spec in self.plc_spec['slices']:
1009 if not slice_spec.has_key('initscriptstamp'):
1011 stamp=slice_spec['initscriptstamp']
1012 for nodename in slice_spec['nodenames']:
1013 (site,node) = self.locate_node (nodename)
1014 # xxx - passing the wrong site - probably harmless
1015 test_site = TestSite (self,site)
1016 test_slice = TestSlice (self,test_site,slice_spec)
1017 test_node = TestNode (self,test_site,node)
1018 test_sliver = TestSliver (self, test_node, test_slice)
1019 if not test_sliver.check_initscript_stamp(stamp):
1023 def check_initscripts(self):
1024 "check that the initscripts have triggered"
1025 return self.do_check_initscripts()
1027 def initscripts (self):
1028 "create initscripts with PLCAPI"
1029 for initscript in self.plc_spec['initscripts']:
1030 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1031 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1034 def delete_initscripts (self):
1035 "delete initscripts with PLCAPI"
1036 for initscript in self.plc_spec['initscripts']:
1037 initscript_name = initscript['initscript_fields']['name']
1038 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1040 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1041 print initscript_name,'deleted'
1043 print 'deletion went wrong - probably did not exist'
1048 "create slices with PLCAPI"
1049 return self.do_slices()
1051 def delete_slices (self):
1052 "delete slices with PLCAPI"
1053 return self.do_slices("delete")
1055 def do_slices (self, action="add"):
1056 for slice in self.plc_spec['slices']:
1057 site_spec = self.locate_site (slice['sitename'])
1058 test_site = TestSite(self,site_spec)
1059 test_slice=TestSlice(self,test_site,slice)
1061 utils.header("Deleting slices in site %s"%test_site.name())
1062 test_slice.delete_slice()
1064 utils.pprint("Creating slice",slice)
1065 test_slice.create_slice()
1066 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1070 def ssh_slice(self):
1071 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1075 def keys_clear_known_hosts (self):
1076 "remove test nodes entries from the local known_hosts file"
1080 def qemu_start (self) :
1081 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1085 def timestamp_qemu (self) :
1086 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1089 def check_tcp (self):
1090 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1091 specs = self.plc_spec['tcp_test']
1096 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1097 if not s_test_sliver.run_tcp_server(port,timeout=10):
1101 # idem for the client side
1102 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1103 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1107 def plcsh_stress_test (self):
1108 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1109 # install the stress-test in the plc image
1110 location = "/usr/share/plc_api/plcsh_stress_test.py"
1111 remote="/vservers/%s/%s"%(self.vservername,location)
1112 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1114 command += " -- --check"
1115 if self.options.size == 1:
1116 command += " --tiny"
1117 return ( self.run_in_guest(command) == 0)
1119 # populate runs the same utility without slightly different options
1120 # in particular runs with --preserve (dont cleanup) and without --check
1121 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1124 def sfa_install(self):
1125 "yum install sfa, sfa-plc and sfa-client"
1127 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1128 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1131 def sfa_dbclean(self):
1132 "thoroughly wipes off the SFA database"
1133 self.run_in_guest("sfa-nuke-plc.py")==0
1136 def sfa_plcclean(self):
1137 "cleans the PLC entries that were created as a side effect of running the script"
1139 sfa_spec=self.plc_spec['sfa']
1141 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1142 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1143 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1144 except: print "Slice %s already absent from PLC db"%slicename
1146 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1147 try: self.apiserver.DeletePerson(self.auth_root(),username)
1148 except: print "User %s already absent from PLC db"%username
1150 print "REMEMBER TO RUN sfa_import AGAIN"
1153 def sfa_uninstall(self):
1154 "uses rpm to uninstall sfa - ignore result"
1155 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1156 self.run_in_guest("rm -rf /var/lib/sfa")
1157 self.run_in_guest("rm -rf /etc/sfa")
1158 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1160 self.run_in_guest("rpm -e --noscripts sfa-plc")
1163 ### run unit tests for SFA
1164 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1165 # Running Transaction
1166 # Transaction couldn't start:
1167 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1168 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1169 # no matter how many Gbs are available on the testplc
1170 # could not figure out what's wrong, so...
1171 # if the yum install phase fails, consider the test is successful
1172 # other combinations will eventually run it hopefully
1173 def sfa_utest(self):
1174 "yum install sfa-tests and run SFA unittests"
1175 self.run_in_guest("yum -y install sfa-tests")
1176 # failed to install - forget it
1177 if self.run_in_guest("rpm -q sfa-tests")!=0:
1178 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1180 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1184 dirname="conf.%s"%self.plc_spec['name']
1185 if not os.path.isdir(dirname):
1186 utils.system("mkdir -p %s"%dirname)
1187 if not os.path.isdir(dirname):
1188 raise "Cannot create config dir for plc %s"%self.name()
1191 def conffile(self,filename):
1192 return "%s/%s"%(self.confdir(),filename)
1193 def confsubdir(self,dirname,clean,dry_run=False):
1194 subdirname="%s/%s"%(self.confdir(),dirname)
1196 utils.system("rm -rf %s"%subdirname)
1197 if not os.path.isdir(subdirname):
1198 utils.system("mkdir -p %s"%subdirname)
1199 if not dry_run and not os.path.isdir(subdirname):
1200 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1203 def conffile_clean (self,filename):
1204 filename=self.conffile(filename)
1205 return utils.system("rm -rf %s"%filename)==0
1208 def sfa_configure(self):
1209 "run sfa-config-tty"
1210 tmpname=self.conffile("sfa-config-tty")
1211 fileconf=open(tmpname,'w')
1212 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1213 'SFA_INTERFACE_HRN',
1214 # 'SFA_REGISTRY_LEVEL1_AUTH',
1215 'SFA_REGISTRY_HOST',
1216 'SFA_AGGREGATE_HOST',
1222 'SFA_PLC_DB_PASSWORD',
1225 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1226 # the way plc_config handles booleans just sucks..
1227 for var in ['SFA_API_DEBUG']:
1229 if self.plc_spec['sfa'][var]: val='true'
1230 fileconf.write ('e %s\n%s\n'%(var,val))
1231 fileconf.write('w\n')
1232 fileconf.write('R\n')
1233 fileconf.write('q\n')
1235 utils.system('cat %s'%tmpname)
1236 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1239 def aggregate_xml_line(self):
1240 port=self.plc_spec['sfa']['neighbours-port']
1241 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1242 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1244 def registry_xml_line(self):
1245 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1246 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1249 # a cross step that takes all other plcs in argument
1250 def cross_sfa_configure(self, other_plcs):
1251 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1252 # of course with a single plc, other_plcs is an empty list
1255 agg_fname=self.conffile("agg.xml")
1256 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1257 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1258 utils.header ("(Over)wrote %s"%agg_fname)
1259 reg_fname=self.conffile("reg.xml")
1260 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1261 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1262 utils.header ("(Over)wrote %s"%reg_fname)
1263 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1264 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1266 def sfa_import(self):
1268 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1269 return self.run_in_guest('sfa-import-plc.py')==0
1270 # not needed anymore
1271 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1273 def sfa_start(self):
1275 return self.run_in_guest('service sfa start')==0
1277 def sfi_configure(self):
1278 "Create /root/sfi on the plc side for sfi client configuration"
1279 if self.options.dry_run:
1280 utils.header("DRY RUN - skipping step")
1282 sfa_spec=self.plc_spec['sfa']
1283 # cannot use sfa_slice_mapper to pass dir_name
1284 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1285 site_spec = self.locate_site (slice_spec['sitename'])
1286 test_site = TestSite(self,site_spec)
1287 test_slice=TestSliceSfa(self,test_site,slice_spec)
1288 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1289 test_slice.sfi_config(dir_name)
1290 # push into the remote /root/sfi area
1291 location = test_slice.sfi_path()
1292 remote="/vservers/%s/%s"%(self.vservername,location)
1293 self.test_ssh.mkdir(remote,abs=True)
1294 # need to strip last level or remote otherwise we get an extra dir level
1295 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1299 def sfi_clean (self):
1300 "clean up /root/sfi on the plc side"
1301 self.run_in_guest("rm -rf /root/sfi")
1305 def sfa_add_user(self):
1310 def sfa_update_user(self):
1314 def sfa_add_slice(self):
1315 "run sfi.py add (on Registry) from slice.xml"
1319 def sfa_discover(self):
1320 "discover resources into resouces_in.rspec"
1324 def sfa_create_slice(self):
1325 "run sfi.py create (on SM) - 1st time"
1329 def sfa_check_slice_plc(self):
1330 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1334 def sfa_update_slice(self):
1335 "run sfi.py create (on SM) on existing object"
1340 "various registry-related calls"
1344 def ssh_slice_sfa(self):
1345 "tries to ssh-enter the SFA slice"
1349 def sfa_delete_user(self):
1354 def sfa_delete_slice(self):
1355 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1360 self.run_in_guest('service sfa stop')==0
1363 def populate (self):
1364 "creates random entries in the PLCAPI"
1365 # install the stress-test in the plc image
1366 location = "/usr/share/plc_api/plcsh_stress_test.py"
1367 remote="/vservers/%s/%s"%(self.vservername,location)
1368 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1370 command += " -- --preserve --short-names"
1371 local = (self.run_in_guest(command) == 0);
1372 # second run with --foreign
1373 command += ' --foreign'
1374 remote = (self.run_in_guest(command) == 0);
1375 return ( local and remote)
1377 def gather_logs (self):
1378 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1379 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1380 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1381 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1382 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1383 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1385 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1386 self.gather_var_logs ()
1388 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1389 self.gather_pgsql_logs ()
1391 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1392 for site_spec in self.plc_spec['sites']:
1393 test_site = TestSite (self,site_spec)
1394 for node_spec in site_spec['nodes']:
1395 test_node=TestNode(self,test_site,node_spec)
1396 test_node.gather_qemu_logs()
1398 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1399 self.gather_nodes_var_logs()
1401 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1402 self.gather_slivers_var_logs()
1405 def gather_slivers_var_logs(self):
1406 for test_sliver in self.all_sliver_objs():
1407 remote = test_sliver.tar_var_logs()
1408 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1409 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1410 utils.system(command)
1413 def gather_var_logs (self):
1414 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1415 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1416 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1417 utils.system(command)
1418 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1419 utils.system(command)
1421 def gather_pgsql_logs (self):
1422 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1423 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1424 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1425 utils.system(command)
1427 def gather_nodes_var_logs (self):
1428 for site_spec in self.plc_spec['sites']:
1429 test_site = TestSite (self,site_spec)
1430 for node_spec in site_spec['nodes']:
1431 test_node=TestNode(self,test_site,node_spec)
1432 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1433 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1434 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1435 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1436 utils.system(command)
1439 # returns the filename to use for sql dump/restore, using options.dbname if set
1440 def dbfile (self, database):
1441 # uses options.dbname if it is found
1443 name=self.options.dbname
1444 if not isinstance(name,StringTypes):
1447 t=datetime.datetime.now()
1450 return "/root/%s-%s.sql"%(database,name)
1452 def plc_db_dump(self):
1453 'dump the planetlab5 DB in /root in the PLC - filename has time'
1454 dump=self.dbfile("planetab5")
1455 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1456 utils.header('Dumped planetlab5 database in %s'%dump)
1459 def plc_db_restore(self):
1460 'restore the planetlab5 DB - looks broken, but run -n might help'
1461 dump=self.dbfile("planetab5")
1462 ##stop httpd service
1463 self.run_in_guest('service httpd stop')
1464 # xxx - need another wrapper
1465 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1466 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1467 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1468 ##starting httpd service
1469 self.run_in_guest('service httpd start')
1471 utils.header('Database restored from ' + dump)
1474 def standby_1(): pass
1476 def standby_2(): pass
1478 def standby_3(): pass
1480 def standby_4(): pass
1482 def standby_5(): pass
1484 def standby_6(): pass
1486 def standby_7(): pass
1488 def standby_8(): pass
1490 def standby_9(): pass
1492 def standby_10(): pass
1494 def standby_11(): pass
1496 def standby_12(): pass
1498 def standby_13(): pass
1500 def standby_14(): pass
1502 def standby_15(): pass
1504 def standby_16(): pass
1506 def standby_17(): pass
1508 def standby_18(): pass
1510 def standby_19(): pass
1512 def standby_20(): pass