1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
117 'plc_db_dump' , 'plc_db_restore', SEP,
118 'standby_1_through_20',SEP,
122 def printable_steps (list):
123 single_line=" ".join(list)+" "
124 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
126 def valid_step (step):
127 return step != SEP and step != SEPSFA
129 # turn off the sfa-related steps when build has skipped SFA
130 # this is originally for centos5 as recent SFAs won't build on this platform
132 def check_whether_build_has_sfa (rpms_url):
133 # warning, we're now building 'sface' so let's be a bit more picky
134 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
135 # full builds are expected to return with 0 here
137 # move all steps containing 'sfa' from default_steps to other_steps
138 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
139 TestPlc.other_steps += sfa_steps
140 for step in sfa_steps: TestPlc.default_steps.remove(step)
142 def __init__ (self,plc_spec,options):
143 self.plc_spec=plc_spec
145 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
152 raise Exception,'chroot-based myplc testing is deprecated'
153 self.apiserver=TestApiserver(self.url,options.dry_run)
156 name=self.plc_spec['name']
157 return "%s.%s"%(name,self.vservername)
160 return self.plc_spec['host_box']
163 return self.test_ssh.is_local()
165 # define the API methods on this object through xmlrpc
166 # would help, but not strictly necessary
170 def actual_command_in_guest (self,command):
171 return self.test_ssh.actual_command(self.host_to_guest(command))
173 def start_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
176 def stop_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
179 def run_in_guest (self,command):
180 return utils.system(self.actual_command_in_guest(command))
182 def run_in_host (self,command):
183 return self.test_ssh.run_in_buildname(command)
185 #command gets run in the vserver
186 def host_to_guest(self,command):
187 return "vserver %s exec %s"%(self.vservername,command)
189 #start/stop the vserver
190 def start_guest_in_host(self):
191 return "vserver %s start"%(self.vservername)
193 def stop_guest_in_host(self):
194 return "vserver %s stop"%(self.vservername)
197 def run_in_guest_piped (self,local,remote):
198 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
200 def auth_root (self):
201 return {'Username':self.plc_spec['PLC_ROOT_USER'],
202 'AuthMethod':'password',
203 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
204 'Role' : self.plc_spec['role']
206 def locate_site (self,sitename):
207 for site in self.plc_spec['sites']:
208 if site['site_fields']['name'] == sitename:
210 if site['site_fields']['login_base'] == sitename:
212 raise Exception,"Cannot locate site %s"%sitename
214 def locate_node (self,nodename):
215 for site in self.plc_spec['sites']:
216 for node in site['nodes']:
217 if node['name'] == nodename:
219 raise Exception,"Cannot locate node %s"%nodename
221 def locate_hostname (self,hostname):
222 for site in self.plc_spec['sites']:
223 for node in site['nodes']:
224 if node['node_fields']['hostname'] == hostname:
226 raise Exception,"Cannot locate hostname %s"%hostname
228 def locate_key (self,keyname):
229 for key in self.plc_spec['keys']:
230 if key['name'] == keyname:
232 raise Exception,"Cannot locate key %s"%keyname
234 def locate_slice (self, slicename):
235 for slice in self.plc_spec['slices']:
236 if slice['slice_fields']['name'] == slicename:
238 raise Exception,"Cannot locate slice %s"%slicename
240 def all_sliver_objs (self):
242 for slice_spec in self.plc_spec['slices']:
243 slicename = slice_spec['slice_fields']['name']
244 for nodename in slice_spec['nodenames']:
245 result.append(self.locate_sliver_obj (nodename,slicename))
248 def locate_sliver_obj (self,nodename,slicename):
249 (site,node) = self.locate_node(nodename)
250 slice = self.locate_slice (slicename)
252 test_site = TestSite (self, site)
253 test_node = TestNode (self, test_site,node)
254 # xxx the slice site is assumed to be the node site - mhh - probably harmless
255 test_slice = TestSlice (self, test_site, slice)
256 return TestSliver (self, test_node, test_slice)
258 def locate_first_node(self):
259 nodename=self.plc_spec['slices'][0]['nodenames'][0]
260 (site,node) = self.locate_node(nodename)
261 test_site = TestSite (self, site)
262 test_node = TestNode (self, test_site,node)
265 def locate_first_sliver (self):
266 slice_spec=self.plc_spec['slices'][0]
267 slicename=slice_spec['slice_fields']['name']
268 nodename=slice_spec['nodenames'][0]
269 return self.locate_sliver_obj(nodename,slicename)
271 # all different hostboxes used in this plc
272 def gather_hostBoxes(self):
273 # maps on sites and nodes, return [ (host_box,test_node) ]
275 for site_spec in self.plc_spec['sites']:
276 test_site = TestSite (self,site_spec)
277 for node_spec in site_spec['nodes']:
278 test_node = TestNode (self, test_site, node_spec)
279 if not test_node.is_real():
280 tuples.append( (test_node.host_box(),test_node) )
281 # transform into a dict { 'host_box' -> [ test_node .. ] }
283 for (box,node) in tuples:
284 if not result.has_key(box):
287 result[box].append(node)
290 # a step for checking this stuff
291 def show_boxes (self):
292 'print summary of nodes location'
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 print box,":"," + ".join( [ node.name() for node in nodes ] )
297 # make this a valid step
298 def qemu_kill_all(self):
299 'kill all qemu instances on the qemu boxes involved by this setup'
300 # this is the brute force version, kill all qemus on that host box
301 for (box,nodes) in self.gather_hostBoxes().iteritems():
302 # pass the first nodename, as we don't push template-qemu on testboxes
303 nodedir=nodes[0].nodedir()
304 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
307 # make this a valid step
308 def qemu_list_all(self):
309 'list all qemu instances on the qemu boxes involved by this setup'
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # this is the brute force version, kill all qemus on that host box
312 TestBoxQemu(box,self.options.buildname).qemu_list_all()
315 # kill only the right qemus
316 def qemu_list_mine(self):
317 'list qemu instances for our nodes'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 # the fine-grain version
324 # kill only the right qemus
325 def qemu_kill_mine(self):
326 'kill the qemu instances for our nodes'
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # the fine-grain version
333 #################### display config
335 "show test configuration after localization"
336 self.display_pass (1)
337 self.display_pass (2)
341 "print cut'n paste-able stuff to export env variables to your shell"
342 # these work but the shell prompt does not get displayed..
343 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
344 command2="ssh root@%s %s"%(socket.gethostname(),command1)
345 # guess local domain from hostname
346 domain=socket.gethostname().split('.',1)[1]
347 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
348 print "export BUILD=%s"%self.options.buildname
349 print "export PLCHOST=%s"%fqdn
350 print "export GUEST=%s"%self.plc_spec['vservername']
351 # find hostname of first node
352 (hostname,qemubox) = self.all_node_infos()[0]
353 print "export KVMHOST=%s.%s"%(qemubox,domain)
354 print "export NODE=%s"%(hostname)
358 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
359 def display_pass (self,passno):
360 for (key,val) in self.plc_spec.iteritems():
361 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
365 self.display_site_spec(site)
366 for node in site['nodes']:
367 self.display_node_spec(node)
368 elif key=='initscripts':
369 for initscript in val:
370 self.display_initscript_spec (initscript)
373 self.display_slice_spec (slice)
376 self.display_key_spec (key)
378 if key not in ['sites','initscripts','slices','keys', 'sfa']:
379 print '+ ',key,':',val
381 def display_site_spec (self,site):
382 print '+ ======== site',site['site_fields']['name']
383 for (k,v) in site.iteritems():
384 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
387 print '+ ','nodes : ',
389 print node['node_fields']['hostname'],'',
395 print user['name'],'',
397 elif k == 'site_fields':
398 print '+ login_base',':',v['login_base']
399 elif k == 'address_fields':
405 def display_initscript_spec (self,initscript):
406 print '+ ======== initscript',initscript['initscript_fields']['name']
408 def display_key_spec (self,key):
409 print '+ ======== key',key['name']
411 def display_slice_spec (self,slice):
412 print '+ ======== slice',slice['slice_fields']['name']
413 for (k,v) in slice.iteritems():
426 elif k=='slice_fields':
427 print '+ fields',':',
428 print 'max_nodes=',v['max_nodes'],
433 def display_node_spec (self,node):
434 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
435 print "hostname=",node['node_fields']['hostname'],
436 print "ip=",node['interface_fields']['ip']
437 if self.options.verbose:
438 utils.pprint("node details",node,depth=3)
440 # another entry point for just showing the boxes involved
441 def display_mapping (self):
442 TestPlc.display_mapping_plc(self.plc_spec)
446 def display_mapping_plc (plc_spec):
447 print '+ MyPLC',plc_spec['name']
448 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
449 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
450 for site_spec in plc_spec['sites']:
451 for node_spec in site_spec['nodes']:
452 TestPlc.display_mapping_node(node_spec)
455 def display_mapping_node (node_spec):
456 print '+ NODE %s'%(node_spec['name'])
457 print '+\tqemu box %s'%node_spec['host_box']
458 print '+\thostname=%s'%node_spec['node_fields']['hostname']
460 # write a timestamp in /vservers/<>.timestamp
461 # cannot be inside the vserver, that causes vserver .. build to cough
462 def timestamp_vs (self):
464 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
466 # def local_pre (self):
467 # "run site-dependant pre-test script as defined in LocalTestResources"
468 # from LocalTestResources import local_resources
469 # return local_resources.step_pre(self)
471 # def local_post (self):
472 # "run site-dependant post-test script as defined in LocalTestResources"
473 # from LocalTestResources import local_resources
474 # return local_resources.step_post(self)
476 # def local_list (self):
477 # "run site-dependant list script as defined in LocalTestResources"
478 # from LocalTestResources import local_resources
479 # return local_resources.step_list(self)
481 # def local_rel (self):
482 # "run site-dependant release script as defined in LocalTestResources"
483 # from LocalTestResources import local_resources
484 # return local_resources.step_release(self)
486 # def local_rel_plc (self):
487 # "run site-dependant release script as defined in LocalTestResources"
488 # from LocalTestResources import local_resources
489 # return local_resources.step_release_plc(self)
491 # def local_rel_qemu (self):
492 # "run site-dependant release script as defined in LocalTestResources"
493 # from LocalTestResources import local_resources
494 # return local_resources.step_release_qemu(self)
497 "vserver delete the test myplc"
498 self.run_in_host("vserver --silent %s delete"%self.vservername)
499 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
503 # historically the build was being fetched by the tests
504 # now the build pushes itself as a subdir of the tests workdir
505 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
506 def vs_create (self):
507 "vserver creation (no install done)"
508 # push the local build/ dir to the testplc box
510 # a full path for the local calls
511 build_dir=os.path.dirname(sys.argv[0])
512 # sometimes this is empty - set to "." in such a case
513 if not build_dir: build_dir="."
514 build_dir += "/build"
516 # use a standard name - will be relative to remote buildname
518 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
519 self.test_ssh.rmdir(build_dir)
520 self.test_ssh.copy(build_dir,recursive=True)
521 # the repo url is taken from arch-rpms-url
522 # with the last step (i386) removed
523 repo_url = self.options.arch_rpms_url
524 for level in [ 'arch' ]:
525 repo_url = os.path.dirname(repo_url)
526 # pass the vbuild-nightly options to vtest-init-vserver
528 test_env_options += " -p %s"%self.options.personality
529 test_env_options += " -d %s"%self.options.pldistro
530 test_env_options += " -f %s"%self.options.fcdistro
531 script="vtest-init-vserver.sh"
532 vserver_name = self.vservername
533 vserver_options="--netdev eth0 --interface %s"%self.vserverip
535 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
536 vserver_options += " --hostname %s"%vserver_hostname
538 print "Cannot reverse lookup %s"%self.vserverip
539 print "This is considered fatal, as this might pollute the test results"
541 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
542 return self.run_in_host(create_vserver) == 0
545 def plc_install(self):
546 "yum install myplc, noderepo, and the plain bootstrapfs"
548 # workaround for getting pgsql8.2 on centos5
549 if self.options.fcdistro == "centos5":
550 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
553 if self.options.personality == "linux32":
555 elif self.options.personality == "linux64":
558 raise Exception, "Unsupported personality %r"%self.options.personality
559 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
562 pkgs_list.append ("slicerepo-%s"%nodefamily)
563 pkgs_list.append ("myplc")
564 pkgs_list.append ("noderepo-%s"%nodefamily)
565 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
566 pkgs_string=" ".join(pkgs_list)
567 self.run_in_guest("yum -y install %s"%pkgs_string)
568 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
571 def plc_configure(self):
573 tmpname='%s.plc-config-tty'%(self.name())
574 fileconf=open(tmpname,'w')
575 for var in [ 'PLC_NAME',
580 'PLC_MAIL_SUPPORT_ADDRESS',
583 # Above line was added for integrating SFA Testing
589 'PLC_RESERVATION_GRANULARITY',
591 'PLC_OMF_XMPP_SERVER',
593 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
594 fileconf.write('w\n')
595 fileconf.write('q\n')
597 utils.system('cat %s'%tmpname)
598 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
599 utils.system('rm %s'%tmpname)
604 self.run_in_guest('service plc start')
609 self.run_in_guest('service plc stop')
613 "start the PLC vserver"
618 "stop the PLC vserver"
622 # stores the keys from the config for further use
623 def keys_store(self):
624 "stores test users ssh keys in keys/"
625 for key_spec in self.plc_spec['keys']:
626 TestKey(self,key_spec).store_key()
629 def keys_clean(self):
630 "removes keys cached in keys/"
631 utils.system("rm -rf ./keys")
634 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
635 # for later direct access to the nodes
636 def keys_fetch(self):
637 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
639 if not os.path.isdir(dir):
641 vservername=self.vservername
643 prefix = 'debug_ssh_key'
644 for ext in [ 'pub', 'rsa' ] :
645 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
646 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
647 if self.test_ssh.fetch(src,dst) != 0: overall=False
651 "create sites with PLCAPI"
652 return self.do_sites()
654 def delete_sites (self):
655 "delete sites with PLCAPI"
656 return self.do_sites(action="delete")
658 def do_sites (self,action="add"):
659 for site_spec in self.plc_spec['sites']:
660 test_site = TestSite (self,site_spec)
661 if (action != "add"):
662 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
663 test_site.delete_site()
664 # deleted with the site
665 #test_site.delete_users()
668 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
669 test_site.create_site()
670 test_site.create_users()
673 def delete_all_sites (self):
674 "Delete all sites in PLC, and related objects"
675 print 'auth_root',self.auth_root()
676 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
677 for site_id in site_ids:
678 print 'Deleting site_id',site_id
679 self.apiserver.DeleteSite(self.auth_root(),site_id)
683 "create nodes with PLCAPI"
684 return self.do_nodes()
685 def delete_nodes (self):
686 "delete nodes with PLCAPI"
687 return self.do_nodes(action="delete")
689 def do_nodes (self,action="add"):
690 for site_spec in self.plc_spec['sites']:
691 test_site = TestSite (self,site_spec)
693 utils.header("Deleting nodes in site %s"%test_site.name())
694 for node_spec in site_spec['nodes']:
695 test_node=TestNode(self,test_site,node_spec)
696 utils.header("Deleting %s"%test_node.name())
697 test_node.delete_node()
699 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
700 for node_spec in site_spec['nodes']:
701 utils.pprint('Creating node %s'%node_spec,node_spec)
702 test_node = TestNode (self,test_site,node_spec)
703 test_node.create_node ()
706 def nodegroups (self):
707 "create nodegroups with PLCAPI"
708 return self.do_nodegroups("add")
709 def delete_nodegroups (self):
710 "delete nodegroups with PLCAPI"
711 return self.do_nodegroups("delete")
715 def translate_timestamp (start,grain,timestamp):
716 if timestamp < TestPlc.YEAR: return start+timestamp*grain
717 else: return timestamp
720 def timestamp_printable (timestamp):
721 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
724 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
726 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
727 print 'API answered grain=',grain
728 start=(now/grain)*grain
730 # find out all nodes that are reservable
731 nodes=self.all_reservable_nodenames()
733 utils.header ("No reservable node found - proceeding without leases")
736 # attach them to the leases as specified in plc_specs
737 # this is where the 'leases' field gets interpreted as relative of absolute
738 for lease_spec in self.plc_spec['leases']:
739 # skip the ones that come with a null slice id
740 if not lease_spec['slice']: continue
741 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
742 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
743 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
744 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
745 if lease_addition['errors']:
746 utils.header("Cannot create leases, %s"%lease_addition['errors'])
749 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
750 (nodes,lease_spec['slice'],
751 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
752 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
756 def delete_leases (self):
757 "remove all leases in the myplc side"
758 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
759 utils.header("Cleaning leases %r"%lease_ids)
760 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
763 def list_leases (self):
764 "list all leases known to the myplc"
765 leases = self.apiserver.GetLeases(self.auth_root())
768 current=l['t_until']>=now
769 if self.options.verbose or current:
770 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
771 TestPlc.timestamp_printable(l['t_from']),
772 TestPlc.timestamp_printable(l['t_until'])))
775 # create nodegroups if needed, and populate
776 def do_nodegroups (self, action="add"):
777 # 1st pass to scan contents
779 for site_spec in self.plc_spec['sites']:
780 test_site = TestSite (self,site_spec)
781 for node_spec in site_spec['nodes']:
782 test_node=TestNode (self,test_site,node_spec)
783 if node_spec.has_key('nodegroups'):
784 nodegroupnames=node_spec['nodegroups']
785 if isinstance(nodegroupnames,StringTypes):
786 nodegroupnames = [ nodegroupnames ]
787 for nodegroupname in nodegroupnames:
788 if not groups_dict.has_key(nodegroupname):
789 groups_dict[nodegroupname]=[]
790 groups_dict[nodegroupname].append(test_node.name())
791 auth=self.auth_root()
793 for (nodegroupname,group_nodes) in groups_dict.iteritems():
795 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
796 # first, check if the nodetagtype is here
797 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
799 tag_type_id = tag_types[0]['tag_type_id']
801 tag_type_id = self.apiserver.AddTagType(auth,
802 {'tagname':nodegroupname,
803 'description': 'for nodegroup %s'%nodegroupname,
805 print 'located tag (type)',nodegroupname,'as',tag_type_id
807 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
809 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
810 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
811 # set node tag on all nodes, value='yes'
812 for nodename in group_nodes:
814 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
816 traceback.print_exc()
817 print 'node',nodename,'seems to already have tag',nodegroupname
820 expect_yes = self.apiserver.GetNodeTags(auth,
821 {'hostname':nodename,
822 'tagname':nodegroupname},
823 ['value'])[0]['value']
824 if expect_yes != "yes":
825 print 'Mismatch node tag on node',nodename,'got',expect_yes
828 if not self.options.dry_run:
829 print 'Cannot find tag',nodegroupname,'on node',nodename
833 print 'cleaning nodegroup',nodegroupname
834 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
836 traceback.print_exc()
840 # return a list of tuples (nodename,qemuname)
841 def all_node_infos (self) :
843 for site_spec in self.plc_spec['sites']:
844 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
845 for node_spec in site_spec['nodes'] ]
848 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
849 def all_reservable_nodenames (self):
851 for site_spec in self.plc_spec['sites']:
852 for node_spec in site_spec['nodes']:
853 node_fields=node_spec['node_fields']
854 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
855 res.append(node_fields['hostname'])
858 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
859 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
860 if self.options.dry_run:
864 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
865 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
866 # the nodes that haven't checked yet - start with a full list and shrink over time
867 tocheck = self.all_hostnames()
868 utils.header("checking nodes %r"%tocheck)
869 # create a dict hostname -> status
870 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
873 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
875 for array in tocheck_status:
876 hostname=array['hostname']
877 boot_state=array['boot_state']
878 if boot_state == target_boot_state:
879 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
881 # if it's a real node, never mind
882 (site_spec,node_spec)=self.locate_hostname(hostname)
883 if TestNode.is_real_model(node_spec['node_fields']['model']):
884 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
886 boot_state = target_boot_state
887 elif datetime.datetime.now() > graceout:
888 utils.header ("%s still in '%s' state"%(hostname,boot_state))
889 graceout=datetime.datetime.now()+datetime.timedelta(1)
890 status[hostname] = boot_state
892 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
895 if datetime.datetime.now() > timeout:
896 for hostname in tocheck:
897 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
899 # otherwise, sleep for a while
901 # only useful in empty plcs
904 def nodes_booted(self):
905 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
907 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
909 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
910 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
911 vservername=self.vservername
914 local_key = "keys/%(vservername)s-debug.rsa"%locals()
917 local_key = "keys/key1.rsa"
918 node_infos = self.all_node_infos()
919 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
920 for (nodename,qemuname) in node_infos:
921 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
922 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
923 (timeout_minutes,silent_minutes,period))
925 for node_info in node_infos:
926 (hostname,qemuname) = node_info
927 # try to run 'hostname' in the node
928 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
929 # don't spam logs - show the command only after the grace period
930 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
932 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
934 node_infos.remove(node_info)
936 # we will have tried real nodes once, in case they're up - but if not, just skip
937 (site_spec,node_spec)=self.locate_hostname(hostname)
938 if TestNode.is_real_model(node_spec['node_fields']['model']):
939 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
940 node_infos.remove(node_info)
943 if datetime.datetime.now() > timeout:
944 for (hostname,qemuname) in node_infos:
945 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
947 # otherwise, sleep for a while
949 # only useful in empty plcs
952 def ssh_node_debug(self):
953 "Tries to ssh into nodes in debug mode with the debug ssh key"
954 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
956 def ssh_node_boot(self):
957 "Tries to ssh into nodes in production mode with the root ssh key"
958 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
961 def qemu_local_init (self):
962 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
966 "all nodes: invoke GetBootMedium and store result locally"
969 def qemu_local_config (self):
970 "all nodes: compute qemu config qemu.conf and store it locally"
973 def nodestate_reinstall (self):
974 "all nodes: mark PLCAPI boot_state as reinstall"
977 def nodestate_safeboot (self):
978 "all nodes: mark PLCAPI boot_state as safeboot"
981 def nodestate_boot (self):
982 "all nodes: mark PLCAPI boot_state as boot"
985 def nodestate_show (self):
986 "all nodes: show PLCAPI boot_state"
989 def qemu_export (self):
990 "all nodes: push local node-dep directory on the qemu box"
993 ### check hooks : invoke scripts from hooks/{node,slice}
994 def check_hooks_node (self):
995 return self.locate_first_node().check_hooks()
996 def check_hooks_sliver (self) :
997 return self.locate_first_sliver().check_hooks()
999 def check_hooks (self):
1000 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1001 return self.check_hooks_node() and self.check_hooks_sliver()
1004 def do_check_initscripts(self):
1006 for slice_spec in self.plc_spec['slices']:
1007 if not slice_spec.has_key('initscriptstamp'):
1009 stamp=slice_spec['initscriptstamp']
1010 for nodename in slice_spec['nodenames']:
1011 (site,node) = self.locate_node (nodename)
1012 # xxx - passing the wrong site - probably harmless
1013 test_site = TestSite (self,site)
1014 test_slice = TestSlice (self,test_site,slice_spec)
1015 test_node = TestNode (self,test_site,node)
1016 test_sliver = TestSliver (self, test_node, test_slice)
1017 if not test_sliver.check_initscript_stamp(stamp):
1021 def check_initscripts(self):
1022 "check that the initscripts have triggered"
1023 return self.do_check_initscripts()
1025 def initscripts (self):
1026 "create initscripts with PLCAPI"
1027 for initscript in self.plc_spec['initscripts']:
1028 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1029 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1032 def delete_initscripts (self):
1033 "delete initscripts with PLCAPI"
1034 for initscript in self.plc_spec['initscripts']:
1035 initscript_name = initscript['initscript_fields']['name']
1036 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1038 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1039 print initscript_name,'deleted'
1041 print 'deletion went wrong - probably did not exist'
1046 "create slices with PLCAPI"
1047 return self.do_slices()
1049 def delete_slices (self):
1050 "delete slices with PLCAPI"
1051 return self.do_slices("delete")
1053 def do_slices (self, action="add"):
1054 for slice in self.plc_spec['slices']:
1055 site_spec = self.locate_site (slice['sitename'])
1056 test_site = TestSite(self,site_spec)
1057 test_slice=TestSlice(self,test_site,slice)
1059 utils.header("Deleting slices in site %s"%test_site.name())
1060 test_slice.delete_slice()
1062 utils.pprint("Creating slice",slice)
1063 test_slice.create_slice()
1064 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1068 def ssh_slice(self):
1069 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1073 def keys_clear_known_hosts (self):
1074 "remove test nodes entries from the local known_hosts file"
1078 def qemu_start (self) :
1079 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1083 def timestamp_qemu (self) :
1084 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1087 def check_tcp (self):
1088 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1089 specs = self.plc_spec['tcp_test']
1094 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1095 if not s_test_sliver.run_tcp_server(port,timeout=10):
1099 # idem for the client side
1100 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1101 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1105 def plcsh_stress_test (self):
1106 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1107 # install the stress-test in the plc image
1108 location = "/usr/share/plc_api/plcsh_stress_test.py"
1109 remote="/vservers/%s/%s"%(self.vservername,location)
1110 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1112 command += " -- --check"
1113 if self.options.size == 1:
1114 command += " --tiny"
1115 return ( self.run_in_guest(command) == 0)
1117 # populate runs the same utility without slightly different options
1118 # in particular runs with --preserve (dont cleanup) and without --check
1119 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1122 def sfa_install(self):
1123 "yum install sfa, sfa-plc and sfa-client"
1125 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1126 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1129 def sfa_dbclean(self):
1130 "thoroughly wipes off the SFA database"
1131 self.run_in_guest("sfa-nuke-plc.py")==0
1134 def sfa_plcclean(self):
1135 "cleans the PLC entries that were created as a side effect of running the script"
1137 sfa_spec=self.plc_spec['sfa']
1139 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1140 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1141 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1142 except: print "Slice %s already absent from PLC db"%slicename
1144 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1145 try: self.apiserver.DeletePerson(self.auth_root(),username)
1146 except: print "User %s already absent from PLC db"%username
1148 print "REMEMBER TO RUN sfa_import AGAIN"
1151 def sfa_uninstall(self):
1152 "uses rpm to uninstall sfa - ignore result"
1153 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1154 self.run_in_guest("rm -rf /var/lib/sfa")
1155 self.run_in_guest("rm -rf /etc/sfa")
1156 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1158 self.run_in_guest("rpm -e --noscripts sfa-plc")
1161 ### run unit tests for SFA
1162 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1163 # Running Transaction
1164 # Transaction couldn't start:
1165 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1166 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1167 # no matter how many Gbs are available on the testplc
1168 # could not figure out what's wrong, so...
1169 # if the yum install phase fails, consider the test is successful
1170 # other combinations will eventually run it hopefully
1171 def sfa_utest(self):
1172 "yum install sfa-tests and run SFA unittests"
1173 self.run_in_guest("yum -y install sfa-tests")
1174 # failed to install - forget it
1175 if self.run_in_guest("rpm -q sfa-tests")!=0:
1176 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1178 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1182 dirname="conf.%s"%self.plc_spec['name']
1183 if not os.path.isdir(dirname):
1184 utils.system("mkdir -p %s"%dirname)
1185 if not os.path.isdir(dirname):
1186 raise "Cannot create config dir for plc %s"%self.name()
1189 def conffile(self,filename):
1190 return "%s/%s"%(self.confdir(),filename)
1191 def confsubdir(self,dirname,clean,dry_run=False):
1192 subdirname="%s/%s"%(self.confdir(),dirname)
1194 utils.system("rm -rf %s"%subdirname)
1195 if not os.path.isdir(subdirname):
1196 utils.system("mkdir -p %s"%subdirname)
1197 if not dry_run and not os.path.isdir(subdirname):
1198 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1201 def conffile_clean (self,filename):
1202 filename=self.conffile(filename)
1203 return utils.system("rm -rf %s"%filename)==0
1206 def sfa_configure(self):
1207 "run sfa-config-tty"
1208 tmpname=self.conffile("sfa-config-tty")
1209 fileconf=open(tmpname,'w')
1210 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1211 'SFA_INTERFACE_HRN',
1212 # 'SFA_REGISTRY_LEVEL1_AUTH',
1213 'SFA_REGISTRY_HOST',
1214 'SFA_AGGREGATE_HOST',
1220 'SFA_PLC_DB_PASSWORD',
1223 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1224 # the way plc_config handles booleans just sucks..
1225 for var in ['SFA_API_DEBUG']:
1227 if self.plc_spec['sfa'][var]: val='true'
1228 fileconf.write ('e %s\n%s\n'%(var,val))
1229 fileconf.write('w\n')
1230 fileconf.write('R\n')
1231 fileconf.write('q\n')
1233 utils.system('cat %s'%tmpname)
1234 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1237 def aggregate_xml_line(self):
1238 port=self.plc_spec['sfa']['neighbours-port']
1239 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1240 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1242 def registry_xml_line(self):
1243 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1244 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1247 # a cross step that takes all other plcs in argument
1248 def cross_sfa_configure(self, other_plcs):
1249 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1250 # of course with a single plc, other_plcs is an empty list
1253 agg_fname=self.conffile("agg.xml")
1254 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1255 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1256 utils.header ("(Over)wrote %s"%agg_fname)
1257 reg_fname=self.conffile("reg.xml")
1258 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1259 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1260 utils.header ("(Over)wrote %s"%reg_fname)
1261 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1262 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1264 def sfa_import(self):
1266 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1267 return self.run_in_guest('sfa-import-plc.py')==0
1268 # not needed anymore
1269 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1271 def sfa_start(self):
1273 return self.run_in_guest('service sfa start')==0
1275 def sfi_configure(self):
1276 "Create /root/sfi on the plc side for sfi client configuration"
1277 if self.options.dry_run:
1278 utils.header("DRY RUN - skipping step")
1280 sfa_spec=self.plc_spec['sfa']
1281 # cannot use sfa_slice_mapper to pass dir_name
1282 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1283 site_spec = self.locate_site (slice_spec['sitename'])
1284 test_site = TestSite(self,site_spec)
1285 test_slice=TestSliceSfa(self,test_site,slice_spec)
1286 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1287 test_slice.sfi_config(dir_name)
1288 # push into the remote /root/sfi area
1289 location = test_slice.sfi_path()
1290 remote="/vservers/%s/%s"%(self.vservername,location)
1291 self.test_ssh.mkdir(remote,abs=True)
1292 # need to strip last level or remote otherwise we get an extra dir level
1293 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1297 def sfi_clean (self):
1298 "clean up /root/sfi on the plc side"
1299 self.run_in_guest("rm -rf /root/sfi")
1303 def sfa_add_user(self):
1308 def sfa_update_user(self):
1312 def sfa_add_slice(self):
1313 "run sfi.py add (on Registry) from slice.xml"
1317 def sfa_discover(self):
1318 "discover resources into resouces_in.rspec"
1322 def sfa_create_slice(self):
1323 "run sfi.py create (on SM) - 1st time"
1327 def sfa_check_slice_plc(self):
1328 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1332 def sfa_update_slice(self):
1333 "run sfi.py create (on SM) on existing object"
1338 "various registry-related calls"
1342 def ssh_slice_sfa(self):
1343 "tries to ssh-enter the SFA slice"
1347 def sfa_delete_user(self):
1352 def sfa_delete_slice(self):
1353 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1358 self.run_in_guest('service sfa stop')==0
1361 def populate (self):
1362 "creates random entries in the PLCAPI"
1363 # install the stress-test in the plc image
1364 location = "/usr/share/plc_api/plcsh_stress_test.py"
1365 remote="/vservers/%s/%s"%(self.vservername,location)
1366 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1368 command += " -- --preserve --short-names"
1369 local = (self.run_in_guest(command) == 0);
1370 # second run with --foreign
1371 command += ' --foreign'
1372 remote = (self.run_in_guest(command) == 0);
1373 return ( local and remote)
1375 def gather_logs (self):
1376 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1377 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1378 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1379 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1380 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1381 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1383 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1384 self.gather_var_logs ()
1386 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1387 self.gather_pgsql_logs ()
1389 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1390 for site_spec in self.plc_spec['sites']:
1391 test_site = TestSite (self,site_spec)
1392 for node_spec in site_spec['nodes']:
1393 test_node=TestNode(self,test_site,node_spec)
1394 test_node.gather_qemu_logs()
1396 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1397 self.gather_nodes_var_logs()
1399 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1400 self.gather_slivers_var_logs()
1403 def gather_slivers_var_logs(self):
1404 for test_sliver in self.all_sliver_objs():
1405 remote = test_sliver.tar_var_logs()
1406 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1407 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1408 utils.system(command)
1411 def gather_var_logs (self):
1412 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1413 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1414 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1415 utils.system(command)
1416 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1417 utils.system(command)
1419 def gather_pgsql_logs (self):
1420 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1421 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1422 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1423 utils.system(command)
1425 def gather_nodes_var_logs (self):
1426 for site_spec in self.plc_spec['sites']:
1427 test_site = TestSite (self,site_spec)
1428 for node_spec in site_spec['nodes']:
1429 test_node=TestNode(self,test_site,node_spec)
1430 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1431 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1432 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1433 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1434 utils.system(command)
1437 # returns the filename to use for sql dump/restore, using options.dbname if set
1438 def dbfile (self, database):
1439 # uses options.dbname if it is found
1441 name=self.options.dbname
1442 if not isinstance(name,StringTypes):
1445 t=datetime.datetime.now()
1448 return "/root/%s-%s.sql"%(database,name)
1450 def plc_db_dump(self):
1451 'dump the planetlab5 DB in /root in the PLC - filename has time'
1452 dump=self.dbfile("planetab5")
1453 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1454 utils.header('Dumped planetlab5 database in %s'%dump)
1457 def plc_db_restore(self):
1458 'restore the planetlab5 DB - looks broken, but run -n might help'
1459 dump=self.dbfile("planetab5")
1460 ##stop httpd service
1461 self.run_in_guest('service httpd stop')
1462 # xxx - need another wrapper
1463 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1464 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1465 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1466 ##starting httpd service
1467 self.run_in_guest('service httpd start')
1469 utils.header('Database restored from ' + dump)
1471 def standby_1_through_20(self):
1472 """convenience function to wait for a specified number of minutes"""
1475 def standby_1(): pass
1477 def standby_2(): pass
1479 def standby_3(): pass
1481 def standby_4(): pass
1483 def standby_5(): pass
1485 def standby_6(): pass
1487 def standby_7(): pass
1489 def standby_8(): pass
1491 def standby_9(): pass
1493 def standby_10(): pass
1495 def standby_11(): pass
1497 def standby_12(): pass
1499 def standby_13(): pass
1501 def standby_14(): pass
1503 def standby_15(): pass
1505 def standby_16(): pass
1507 def standby_17(): pass
1509 def standby_18(): pass
1511 def standby_19(): pass
1513 def standby_20(): pass