1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_install', 'sfa_tables_install', 'sfa_plc_install', 'sfa_client_install', SEPSFA,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
148 self.vserverip=plc_spec['vserverip']
149 self.vservername=plc_spec['vservername']
150 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 raise Exception,'chroot-based myplc testing is deprecated'
154 self.apiserver=TestApiserver(self.url,options.dry_run)
157 name=self.plc_spec['name']
158 return "%s.%s"%(name,self.vservername)
161 return self.plc_spec['host_box']
164 return self.test_ssh.is_local()
166 # define the API methods on this object through xmlrpc
167 # would help, but not strictly necessary
171 def actual_command_in_guest (self,command):
172 return self.test_ssh.actual_command(self.host_to_guest(command))
174 def start_guest (self):
175 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
177 def stop_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
180 def run_in_guest (self,command):
181 return utils.system(self.actual_command_in_guest(command))
183 def run_in_host (self,command):
184 return self.test_ssh.run_in_buildname(command)
186 #command gets run in the vserver
187 def host_to_guest(self,command):
188 return "vserver %s exec %s"%(self.vservername,command)
190 #start/stop the vserver
191 def start_guest_in_host(self):
192 return "vserver %s start"%(self.vservername)
194 def stop_guest_in_host(self):
195 return "vserver %s stop"%(self.vservername)
198 def run_in_guest_piped (self,local,remote):
199 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
201 # does a yum install in the vs, ignore yum retcod, check with rpm
202 def yum_install (self, rpms):
203 if isinstance (rpms, list):
205 self.run_in_guest("yum -y install %s"%rpms)
206 self.run_in_guest("yum-complete-transaction")
207 return self.run_in_guest("rpm -q %s"%rpms)==0
209 def auth_root (self):
210 return {'Username':self.plc_spec['PLC_ROOT_USER'],
211 'AuthMethod':'password',
212 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
213 'Role' : self.plc_spec['role']
215 def locate_site (self,sitename):
216 for site in self.plc_spec['sites']:
217 if site['site_fields']['name'] == sitename:
219 if site['site_fields']['login_base'] == sitename:
221 raise Exception,"Cannot locate site %s"%sitename
223 def locate_node (self,nodename):
224 for site in self.plc_spec['sites']:
225 for node in site['nodes']:
226 if node['name'] == nodename:
228 raise Exception,"Cannot locate node %s"%nodename
230 def locate_hostname (self,hostname):
231 for site in self.plc_spec['sites']:
232 for node in site['nodes']:
233 if node['node_fields']['hostname'] == hostname:
235 raise Exception,"Cannot locate hostname %s"%hostname
237 def locate_key (self,keyname):
238 for key in self.plc_spec['keys']:
239 if key['name'] == keyname:
241 raise Exception,"Cannot locate key %s"%keyname
243 def locate_slice (self, slicename):
244 for slice in self.plc_spec['slices']:
245 if slice['slice_fields']['name'] == slicename:
247 raise Exception,"Cannot locate slice %s"%slicename
249 def all_sliver_objs (self):
251 for slice_spec in self.plc_spec['slices']:
252 slicename = slice_spec['slice_fields']['name']
253 for nodename in slice_spec['nodenames']:
254 result.append(self.locate_sliver_obj (nodename,slicename))
257 def locate_sliver_obj (self,nodename,slicename):
258 (site,node) = self.locate_node(nodename)
259 slice = self.locate_slice (slicename)
261 test_site = TestSite (self, site)
262 test_node = TestNode (self, test_site,node)
263 # xxx the slice site is assumed to be the node site - mhh - probably harmless
264 test_slice = TestSlice (self, test_site, slice)
265 return TestSliver (self, test_node, test_slice)
267 def locate_first_node(self):
268 nodename=self.plc_spec['slices'][0]['nodenames'][0]
269 (site,node) = self.locate_node(nodename)
270 test_site = TestSite (self, site)
271 test_node = TestNode (self, test_site,node)
274 def locate_first_sliver (self):
275 slice_spec=self.plc_spec['slices'][0]
276 slicename=slice_spec['slice_fields']['name']
277 nodename=slice_spec['nodenames'][0]
278 return self.locate_sliver_obj(nodename,slicename)
280 # all different hostboxes used in this plc
281 def gather_hostBoxes(self):
282 # maps on sites and nodes, return [ (host_box,test_node) ]
284 for site_spec in self.plc_spec['sites']:
285 test_site = TestSite (self,site_spec)
286 for node_spec in site_spec['nodes']:
287 test_node = TestNode (self, test_site, node_spec)
288 if not test_node.is_real():
289 tuples.append( (test_node.host_box(),test_node) )
290 # transform into a dict { 'host_box' -> [ test_node .. ] }
292 for (box,node) in tuples:
293 if not result.has_key(box):
296 result[box].append(node)
299 # a step for checking this stuff
300 def show_boxes (self):
301 'print summary of nodes location'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 print box,":"," + ".join( [ node.name() for node in nodes ] )
306 # make this a valid step
307 def qemu_kill_all(self):
308 'kill all qemu instances on the qemu boxes involved by this setup'
309 # this is the brute force version, kill all qemus on that host box
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # pass the first nodename, as we don't push template-qemu on testboxes
312 nodedir=nodes[0].nodedir()
313 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
316 # make this a valid step
317 def qemu_list_all(self):
318 'list all qemu instances on the qemu boxes involved by this setup'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # this is the brute force version, kill all qemus on that host box
321 TestBoxQemu(box,self.options.buildname).qemu_list_all()
324 # kill only the right qemus
325 def qemu_list_mine(self):
326 'list qemu instances for our nodes'
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # the fine-grain version
333 # kill only the right qemus
334 def qemu_kill_mine(self):
335 'kill the qemu instances for our nodes'
336 for (box,nodes) in self.gather_hostBoxes().iteritems():
337 # the fine-grain version
342 #################### display config
344 "show test configuration after localization"
345 self.display_pass (1)
346 self.display_pass (2)
350 "print cut'n paste-able stuff to export env variables to your shell"
351 # these work but the shell prompt does not get displayed..
352 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
353 command2="ssh root@%s %s"%(socket.gethostname(),command1)
354 # guess local domain from hostname
355 domain=socket.gethostname().split('.',1)[1]
356 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
357 print "export BUILD=%s"%self.options.buildname
358 print "export PLCHOST=%s"%fqdn
359 print "export GUEST=%s"%self.plc_spec['vservername']
360 # find hostname of first node
361 (hostname,qemubox) = self.all_node_infos()[0]
362 print "export KVMHOST=%s.%s"%(qemubox,domain)
363 print "export NODE=%s"%(hostname)
367 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
368 def display_pass (self,passno):
369 for (key,val) in self.plc_spec.iteritems():
370 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
374 self.display_site_spec(site)
375 for node in site['nodes']:
376 self.display_node_spec(node)
377 elif key=='initscripts':
378 for initscript in val:
379 self.display_initscript_spec (initscript)
382 self.display_slice_spec (slice)
385 self.display_key_spec (key)
387 if key not in ['sites','initscripts','slices','keys', 'sfa']:
388 print '+ ',key,':',val
390 def display_site_spec (self,site):
391 print '+ ======== site',site['site_fields']['name']
392 for (k,v) in site.iteritems():
393 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
396 print '+ ','nodes : ',
398 print node['node_fields']['hostname'],'',
404 print user['name'],'',
406 elif k == 'site_fields':
407 print '+ login_base',':',v['login_base']
408 elif k == 'address_fields':
414 def display_initscript_spec (self,initscript):
415 print '+ ======== initscript',initscript['initscript_fields']['name']
417 def display_key_spec (self,key):
418 print '+ ======== key',key['name']
420 def display_slice_spec (self,slice):
421 print '+ ======== slice',slice['slice_fields']['name']
422 for (k,v) in slice.iteritems():
435 elif k=='slice_fields':
436 print '+ fields',':',
437 print 'max_nodes=',v['max_nodes'],
442 def display_node_spec (self,node):
443 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
444 print "hostname=",node['node_fields']['hostname'],
445 print "ip=",node['interface_fields']['ip']
446 if self.options.verbose:
447 utils.pprint("node details",node,depth=3)
449 # another entry point for just showing the boxes involved
450 def display_mapping (self):
451 TestPlc.display_mapping_plc(self.plc_spec)
455 def display_mapping_plc (plc_spec):
456 print '+ MyPLC',plc_spec['name']
457 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
458 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
459 for site_spec in plc_spec['sites']:
460 for node_spec in site_spec['nodes']:
461 TestPlc.display_mapping_node(node_spec)
464 def display_mapping_node (node_spec):
465 print '+ NODE %s'%(node_spec['name'])
466 print '+\tqemu box %s'%node_spec['host_box']
467 print '+\thostname=%s'%node_spec['node_fields']['hostname']
469 # write a timestamp in /vservers/<>.timestamp
470 # cannot be inside the vserver, that causes vserver .. build to cough
471 def timestamp_vs (self):
473 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
475 # def local_pre (self):
476 # "run site-dependant pre-test script as defined in LocalTestResources"
477 # from LocalTestResources import local_resources
478 # return local_resources.step_pre(self)
480 # def local_post (self):
481 # "run site-dependant post-test script as defined in LocalTestResources"
482 # from LocalTestResources import local_resources
483 # return local_resources.step_post(self)
485 # def local_list (self):
486 # "run site-dependant list script as defined in LocalTestResources"
487 # from LocalTestResources import local_resources
488 # return local_resources.step_list(self)
490 # def local_rel (self):
491 # "run site-dependant release script as defined in LocalTestResources"
492 # from LocalTestResources import local_resources
493 # return local_resources.step_release(self)
495 # def local_rel_plc (self):
496 # "run site-dependant release script as defined in LocalTestResources"
497 # from LocalTestResources import local_resources
498 # return local_resources.step_release_plc(self)
500 # def local_rel_qemu (self):
501 # "run site-dependant release script as defined in LocalTestResources"
502 # from LocalTestResources import local_resources
503 # return local_resources.step_release_qemu(self)
506 "vserver delete the test myplc"
507 self.run_in_host("vserver --silent %s delete"%self.vservername)
508 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
512 # historically the build was being fetched by the tests
513 # now the build pushes itself as a subdir of the tests workdir
514 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
515 def vs_create (self):
516 "vserver creation (no install done)"
517 # push the local build/ dir to the testplc box
519 # a full path for the local calls
520 build_dir=os.path.dirname(sys.argv[0])
521 # sometimes this is empty - set to "." in such a case
522 if not build_dir: build_dir="."
523 build_dir += "/build"
525 # use a standard name - will be relative to remote buildname
527 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
528 self.test_ssh.rmdir(build_dir)
529 self.test_ssh.copy(build_dir,recursive=True)
530 # the repo url is taken from arch-rpms-url
531 # with the last step (i386) removed
532 repo_url = self.options.arch_rpms_url
533 for level in [ 'arch' ]:
534 repo_url = os.path.dirname(repo_url)
535 # pass the vbuild-nightly options to vtest-init-vserver
537 test_env_options += " -p %s"%self.options.personality
538 test_env_options += " -d %s"%self.options.pldistro
539 test_env_options += " -f %s"%self.options.fcdistro
540 script="vtest-init-vserver.sh"
541 vserver_name = self.vservername
542 vserver_options="--netdev eth0 --interface %s"%self.vserverip
544 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
545 vserver_options += " --hostname %s"%vserver_hostname
547 print "Cannot reverse lookup %s"%self.vserverip
548 print "This is considered fatal, as this might pollute the test results"
550 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
551 return self.run_in_host(create_vserver) == 0
554 def plc_install(self):
555 "yum install myplc, noderepo, and the plain bootstrapfs"
557 # workaround for getting pgsql8.2 on centos5
558 if self.options.fcdistro == "centos5":
559 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
562 if self.options.personality == "linux32":
564 elif self.options.personality == "linux64":
567 raise Exception, "Unsupported personality %r"%self.options.personality
568 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
571 pkgs_list.append ("slicerepo-%s"%nodefamily)
572 pkgs_list.append ("myplc")
573 pkgs_list.append ("noderepo-%s"%nodefamily)
574 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
575 pkgs_string=" ".join(pkgs_list)
576 return self.yum_install (pkgs_list)
579 def plc_configure(self):
581 tmpname='%s.plc-config-tty'%(self.name())
582 fileconf=open(tmpname,'w')
583 for var in [ 'PLC_NAME',
588 'PLC_MAIL_SUPPORT_ADDRESS',
591 # Above line was added for integrating SFA Testing
597 'PLC_RESERVATION_GRANULARITY',
599 'PLC_OMF_XMPP_SERVER',
601 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
602 fileconf.write('w\n')
603 fileconf.write('q\n')
605 utils.system('cat %s'%tmpname)
606 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
607 utils.system('rm %s'%tmpname)
612 self.run_in_guest('service plc start')
617 self.run_in_guest('service plc stop')
621 "start the PLC vserver"
626 "stop the PLC vserver"
630 # stores the keys from the config for further use
631 def keys_store(self):
632 "stores test users ssh keys in keys/"
633 for key_spec in self.plc_spec['keys']:
634 TestKey(self,key_spec).store_key()
637 def keys_clean(self):
638 "removes keys cached in keys/"
639 utils.system("rm -rf ./keys")
642 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
643 # for later direct access to the nodes
644 def keys_fetch(self):
645 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
647 if not os.path.isdir(dir):
649 vservername=self.vservername
651 prefix = 'debug_ssh_key'
652 for ext in [ 'pub', 'rsa' ] :
653 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
654 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
655 if self.test_ssh.fetch(src,dst) != 0: overall=False
659 "create sites with PLCAPI"
660 return self.do_sites()
662 def delete_sites (self):
663 "delete sites with PLCAPI"
664 return self.do_sites(action="delete")
666 def do_sites (self,action="add"):
667 for site_spec in self.plc_spec['sites']:
668 test_site = TestSite (self,site_spec)
669 if (action != "add"):
670 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
671 test_site.delete_site()
672 # deleted with the site
673 #test_site.delete_users()
676 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
677 test_site.create_site()
678 test_site.create_users()
681 def delete_all_sites (self):
682 "Delete all sites in PLC, and related objects"
683 print 'auth_root',self.auth_root()
684 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
685 for site_id in site_ids:
686 print 'Deleting site_id',site_id
687 self.apiserver.DeleteSite(self.auth_root(),site_id)
691 "create nodes with PLCAPI"
692 return self.do_nodes()
693 def delete_nodes (self):
694 "delete nodes with PLCAPI"
695 return self.do_nodes(action="delete")
697 def do_nodes (self,action="add"):
698 for site_spec in self.plc_spec['sites']:
699 test_site = TestSite (self,site_spec)
701 utils.header("Deleting nodes in site %s"%test_site.name())
702 for node_spec in site_spec['nodes']:
703 test_node=TestNode(self,test_site,node_spec)
704 utils.header("Deleting %s"%test_node.name())
705 test_node.delete_node()
707 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
708 for node_spec in site_spec['nodes']:
709 utils.pprint('Creating node %s'%node_spec,node_spec)
710 test_node = TestNode (self,test_site,node_spec)
711 test_node.create_node ()
714 def nodegroups (self):
715 "create nodegroups with PLCAPI"
716 return self.do_nodegroups("add")
717 def delete_nodegroups (self):
718 "delete nodegroups with PLCAPI"
719 return self.do_nodegroups("delete")
723 def translate_timestamp (start,grain,timestamp):
724 if timestamp < TestPlc.YEAR: return start+timestamp*grain
725 else: return timestamp
728 def timestamp_printable (timestamp):
729 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
732 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
734 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
735 print 'API answered grain=',grain
736 start=(now/grain)*grain
738 # find out all nodes that are reservable
739 nodes=self.all_reservable_nodenames()
741 utils.header ("No reservable node found - proceeding without leases")
744 # attach them to the leases as specified in plc_specs
745 # this is where the 'leases' field gets interpreted as relative of absolute
746 for lease_spec in self.plc_spec['leases']:
747 # skip the ones that come with a null slice id
748 if not lease_spec['slice']: continue
749 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
750 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
751 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
752 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
753 if lease_addition['errors']:
754 utils.header("Cannot create leases, %s"%lease_addition['errors'])
757 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
758 (nodes,lease_spec['slice'],
759 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
760 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
764 def delete_leases (self):
765 "remove all leases in the myplc side"
766 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
767 utils.header("Cleaning leases %r"%lease_ids)
768 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
771 def list_leases (self):
772 "list all leases known to the myplc"
773 leases = self.apiserver.GetLeases(self.auth_root())
776 current=l['t_until']>=now
777 if self.options.verbose or current:
778 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
779 TestPlc.timestamp_printable(l['t_from']),
780 TestPlc.timestamp_printable(l['t_until'])))
783 # create nodegroups if needed, and populate
784 def do_nodegroups (self, action="add"):
785 # 1st pass to scan contents
787 for site_spec in self.plc_spec['sites']:
788 test_site = TestSite (self,site_spec)
789 for node_spec in site_spec['nodes']:
790 test_node=TestNode (self,test_site,node_spec)
791 if node_spec.has_key('nodegroups'):
792 nodegroupnames=node_spec['nodegroups']
793 if isinstance(nodegroupnames,StringTypes):
794 nodegroupnames = [ nodegroupnames ]
795 for nodegroupname in nodegroupnames:
796 if not groups_dict.has_key(nodegroupname):
797 groups_dict[nodegroupname]=[]
798 groups_dict[nodegroupname].append(test_node.name())
799 auth=self.auth_root()
801 for (nodegroupname,group_nodes) in groups_dict.iteritems():
803 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
804 # first, check if the nodetagtype is here
805 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
807 tag_type_id = tag_types[0]['tag_type_id']
809 tag_type_id = self.apiserver.AddTagType(auth,
810 {'tagname':nodegroupname,
811 'description': 'for nodegroup %s'%nodegroupname,
813 print 'located tag (type)',nodegroupname,'as',tag_type_id
815 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
817 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
818 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
819 # set node tag on all nodes, value='yes'
820 for nodename in group_nodes:
822 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
824 traceback.print_exc()
825 print 'node',nodename,'seems to already have tag',nodegroupname
828 expect_yes = self.apiserver.GetNodeTags(auth,
829 {'hostname':nodename,
830 'tagname':nodegroupname},
831 ['value'])[0]['value']
832 if expect_yes != "yes":
833 print 'Mismatch node tag on node',nodename,'got',expect_yes
836 if not self.options.dry_run:
837 print 'Cannot find tag',nodegroupname,'on node',nodename
841 print 'cleaning nodegroup',nodegroupname
842 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
844 traceback.print_exc()
848 # return a list of tuples (nodename,qemuname)
849 def all_node_infos (self) :
851 for site_spec in self.plc_spec['sites']:
852 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
853 for node_spec in site_spec['nodes'] ]
856 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
857 def all_reservable_nodenames (self):
859 for site_spec in self.plc_spec['sites']:
860 for node_spec in site_spec['nodes']:
861 node_fields=node_spec['node_fields']
862 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
863 res.append(node_fields['hostname'])
866 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
867 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
868 if self.options.dry_run:
872 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
873 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
874 # the nodes that haven't checked yet - start with a full list and shrink over time
875 tocheck = self.all_hostnames()
876 utils.header("checking nodes %r"%tocheck)
877 # create a dict hostname -> status
878 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
881 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
883 for array in tocheck_status:
884 hostname=array['hostname']
885 boot_state=array['boot_state']
886 if boot_state == target_boot_state:
887 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
889 # if it's a real node, never mind
890 (site_spec,node_spec)=self.locate_hostname(hostname)
891 if TestNode.is_real_model(node_spec['node_fields']['model']):
892 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
894 boot_state = target_boot_state
895 elif datetime.datetime.now() > graceout:
896 utils.header ("%s still in '%s' state"%(hostname,boot_state))
897 graceout=datetime.datetime.now()+datetime.timedelta(1)
898 status[hostname] = boot_state
900 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
903 if datetime.datetime.now() > timeout:
904 for hostname in tocheck:
905 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
907 # otherwise, sleep for a while
909 # only useful in empty plcs
912 def nodes_booted(self):
913 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
915 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
917 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
918 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
919 vservername=self.vservername
922 local_key = "keys/%(vservername)s-debug.rsa"%locals()
925 local_key = "keys/key1.rsa"
926 node_infos = self.all_node_infos()
927 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
928 for (nodename,qemuname) in node_infos:
929 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
930 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
931 (timeout_minutes,silent_minutes,period))
933 for node_info in node_infos:
934 (hostname,qemuname) = node_info
935 # try to run 'hostname' in the node
936 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
937 # don't spam logs - show the command only after the grace period
938 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
940 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
942 node_infos.remove(node_info)
944 # we will have tried real nodes once, in case they're up - but if not, just skip
945 (site_spec,node_spec)=self.locate_hostname(hostname)
946 if TestNode.is_real_model(node_spec['node_fields']['model']):
947 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
948 node_infos.remove(node_info)
951 if datetime.datetime.now() > timeout:
952 for (hostname,qemuname) in node_infos:
953 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
955 # otherwise, sleep for a while
957 # only useful in empty plcs
960 def ssh_node_debug(self):
961 "Tries to ssh into nodes in debug mode with the debug ssh key"
962 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
964 def ssh_node_boot(self):
965 "Tries to ssh into nodes in production mode with the root ssh key"
966 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
969 def qemu_local_init (self):
970 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
974 "all nodes: invoke GetBootMedium and store result locally"
977 def qemu_local_config (self):
978 "all nodes: compute qemu config qemu.conf and store it locally"
981 def nodestate_reinstall (self):
982 "all nodes: mark PLCAPI boot_state as reinstall"
985 def nodestate_safeboot (self):
986 "all nodes: mark PLCAPI boot_state as safeboot"
989 def nodestate_boot (self):
990 "all nodes: mark PLCAPI boot_state as boot"
993 def nodestate_show (self):
994 "all nodes: show PLCAPI boot_state"
997 def qemu_export (self):
998 "all nodes: push local node-dep directory on the qemu box"
1001 ### check hooks : invoke scripts from hooks/{node,slice}
1002 def check_hooks_node (self):
1003 return self.locate_first_node().check_hooks()
1004 def check_hooks_sliver (self) :
1005 return self.locate_first_sliver().check_hooks()
1007 def check_hooks (self):
1008 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1009 return self.check_hooks_node() and self.check_hooks_sliver()
1012 def do_check_initscripts(self):
1014 for slice_spec in self.plc_spec['slices']:
1015 if not slice_spec.has_key('initscriptstamp'):
1017 stamp=slice_spec['initscriptstamp']
1018 for nodename in slice_spec['nodenames']:
1019 (site,node) = self.locate_node (nodename)
1020 # xxx - passing the wrong site - probably harmless
1021 test_site = TestSite (self,site)
1022 test_slice = TestSlice (self,test_site,slice_spec)
1023 test_node = TestNode (self,test_site,node)
1024 test_sliver = TestSliver (self, test_node, test_slice)
1025 if not test_sliver.check_initscript_stamp(stamp):
1029 def check_initscripts(self):
1030 "check that the initscripts have triggered"
1031 return self.do_check_initscripts()
1033 def initscripts (self):
1034 "create initscripts with PLCAPI"
1035 for initscript in self.plc_spec['initscripts']:
1036 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1037 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1040 def delete_initscripts (self):
1041 "delete initscripts with PLCAPI"
1042 for initscript in self.plc_spec['initscripts']:
1043 initscript_name = initscript['initscript_fields']['name']
1044 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1046 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1047 print initscript_name,'deleted'
1049 print 'deletion went wrong - probably did not exist'
1054 "create slices with PLCAPI"
1055 return self.do_slices()
1057 def delete_slices (self):
1058 "delete slices with PLCAPI"
1059 return self.do_slices("delete")
1061 def do_slices (self, action="add"):
1062 for slice in self.plc_spec['slices']:
1063 site_spec = self.locate_site (slice['sitename'])
1064 test_site = TestSite(self,site_spec)
1065 test_slice=TestSlice(self,test_site,slice)
1067 utils.header("Deleting slices in site %s"%test_site.name())
1068 test_slice.delete_slice()
1070 utils.pprint("Creating slice",slice)
1071 test_slice.create_slice()
1072 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1076 def ssh_slice(self):
1077 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1081 def keys_clear_known_hosts (self):
1082 "remove test nodes entries from the local known_hosts file"
1086 def qemu_start (self) :
1087 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1091 def timestamp_qemu (self) :
1092 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1095 def check_tcp (self):
1096 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1097 specs = self.plc_spec['tcp_test']
1102 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1103 if not s_test_sliver.run_tcp_server(port,timeout=10):
1107 # idem for the client side
1108 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1109 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1113 def plcsh_stress_test (self):
1114 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1115 # install the stress-test in the plc image
1116 location = "/usr/share/plc_api/plcsh_stress_test.py"
1117 remote="/vservers/%s/%s"%(self.vservername,location)
1118 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1120 command += " -- --check"
1121 if self.options.size == 1:
1122 command += " --tiny"
1123 return ( self.run_in_guest(command) == 0)
1125 # populate runs the same utility without slightly different options
1126 # in particular runs with --preserve (dont cleanup) and without --check
1127 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1129 def sfa_install_all (self):
1130 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1131 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1134 def sfa_install(self):
1136 return self.yum_install ("sfa")
1139 def sfa_plc_install(self):
1140 "yum install sfa-plc"
1141 return self.yum_install("sfa-plc")
1144 def sfa_client_install(self):
1145 "yum install sfa-client"
1146 return self.yum_install("sfa-client")
1149 def sfa_tables_install(self):
1150 "yum install sfa-sfatables"
1151 return self.yum_install ("sfa-sfatables")
1153 def sfa_dbclean(self):
1154 "thoroughly wipes off the SFA database"
1155 self.run_in_guest("sfa-nuke-plc.py")==0
1158 def sfa_plcclean(self):
1159 "cleans the PLC entries that were created as a side effect of running the script"
1161 sfa_spec=self.plc_spec['sfa']
1163 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1164 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1165 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1166 except: print "Slice %s already absent from PLC db"%slicename
1168 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1169 try: self.apiserver.DeletePerson(self.auth_root(),username)
1170 except: print "User %s already absent from PLC db"%username
1172 print "REMEMBER TO RUN sfa_import AGAIN"
1175 def sfa_uninstall(self):
1176 "uses rpm to uninstall sfa - ignore result"
1177 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1178 self.run_in_guest("rm -rf /var/lib/sfa")
1179 self.run_in_guest("rm -rf /etc/sfa")
1180 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1182 self.run_in_guest("rpm -e --noscripts sfa-plc")
1185 ### run unit tests for SFA
1186 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1187 # Running Transaction
1188 # Transaction couldn't start:
1189 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1190 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1191 # no matter how many Gbs are available on the testplc
1192 # could not figure out what's wrong, so...
1193 # if the yum install phase fails, consider the test is successful
1194 # other combinations will eventually run it hopefully
1195 def sfa_utest(self):
1196 "yum install sfa-tests and run SFA unittests"
1197 self.run_in_guest("yum -y install sfa-tests")
1198 # failed to install - forget it
1199 if self.run_in_guest("rpm -q sfa-tests")!=0:
1200 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1202 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1206 dirname="conf.%s"%self.plc_spec['name']
1207 if not os.path.isdir(dirname):
1208 utils.system("mkdir -p %s"%dirname)
1209 if not os.path.isdir(dirname):
1210 raise "Cannot create config dir for plc %s"%self.name()
1213 def conffile(self,filename):
1214 return "%s/%s"%(self.confdir(),filename)
1215 def confsubdir(self,dirname,clean,dry_run=False):
1216 subdirname="%s/%s"%(self.confdir(),dirname)
1218 utils.system("rm -rf %s"%subdirname)
1219 if not os.path.isdir(subdirname):
1220 utils.system("mkdir -p %s"%subdirname)
1221 if not dry_run and not os.path.isdir(subdirname):
1222 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1225 def conffile_clean (self,filename):
1226 filename=self.conffile(filename)
1227 return utils.system("rm -rf %s"%filename)==0
1230 def sfa_configure(self):
1231 "run sfa-config-tty"
1232 tmpname=self.conffile("sfa-config-tty")
1233 fileconf=open(tmpname,'w')
1234 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1235 'SFA_INTERFACE_HRN',
1236 # 'SFA_REGISTRY_LEVEL1_AUTH',
1237 'SFA_REGISTRY_HOST',
1238 'SFA_AGGREGATE_HOST',
1249 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1250 # the way plc_config handles booleans just sucks..
1251 for var in ['SFA_API_DEBUG']:
1253 if self.plc_spec['sfa'][var]: val='true'
1254 fileconf.write ('e %s\n%s\n'%(var,val))
1255 fileconf.write('w\n')
1256 fileconf.write('R\n')
1257 fileconf.write('q\n')
1259 utils.system('cat %s'%tmpname)
1260 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1263 def aggregate_xml_line(self):
1264 port=self.plc_spec['sfa']['neighbours-port']
1265 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1266 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1268 def registry_xml_line(self):
1269 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1270 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1273 # a cross step that takes all other plcs in argument
1274 def cross_sfa_configure(self, other_plcs):
1275 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1276 # of course with a single plc, other_plcs is an empty list
1279 agg_fname=self.conffile("agg.xml")
1280 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1281 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1282 utils.header ("(Over)wrote %s"%agg_fname)
1283 reg_fname=self.conffile("reg.xml")
1284 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1285 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1286 utils.header ("(Over)wrote %s"%reg_fname)
1287 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1288 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1290 def sfa_import(self):
1292 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1293 return self.run_in_guest('sfa-import-plc.py')==0
1294 # not needed anymore
1295 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1297 def sfa_start(self):
1299 return self.run_in_guest('service sfa start')==0
1301 def sfi_configure(self):
1302 "Create /root/sfi on the plc side for sfi client configuration"
1303 if self.options.dry_run:
1304 utils.header("DRY RUN - skipping step")
1306 sfa_spec=self.plc_spec['sfa']
1307 # cannot use sfa_slice_mapper to pass dir_name
1308 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1309 site_spec = self.locate_site (slice_spec['sitename'])
1310 test_site = TestSite(self,site_spec)
1311 test_slice=TestSliceSfa(self,test_site,slice_spec)
1312 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1313 test_slice.sfi_config(dir_name)
1314 # push into the remote /root/sfi area
1315 location = test_slice.sfi_path()
1316 remote="/vservers/%s/%s"%(self.vservername,location)
1317 self.test_ssh.mkdir(remote,abs=True)
1318 # need to strip last level or remote otherwise we get an extra dir level
1319 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1323 def sfi_clean (self):
1324 "clean up /root/sfi on the plc side"
1325 self.run_in_guest("rm -rf /root/sfi")
1329 def sfa_add_user(self):
1334 def sfa_update_user(self):
1338 def sfa_add_slice(self):
1339 "run sfi.py add (on Registry) from slice.xml"
1343 def sfa_discover(self):
1344 "discover resources into resouces_in.rspec"
1348 def sfa_create_slice(self):
1349 "run sfi.py create (on SM) - 1st time"
1353 def sfa_check_slice_plc(self):
1354 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1358 def sfa_update_slice(self):
1359 "run sfi.py create (on SM) on existing object"
1364 "various registry-related calls"
1368 def ssh_slice_sfa(self):
1369 "tries to ssh-enter the SFA slice"
1373 def sfa_delete_user(self):
1378 def sfa_delete_slice(self):
1379 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1384 self.run_in_guest('service sfa stop')==0
1387 def populate (self):
1388 "creates random entries in the PLCAPI"
1389 # install the stress-test in the plc image
1390 location = "/usr/share/plc_api/plcsh_stress_test.py"
1391 remote="/vservers/%s/%s"%(self.vservername,location)
1392 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1394 command += " -- --preserve --short-names"
1395 local = (self.run_in_guest(command) == 0);
1396 # second run with --foreign
1397 command += ' --foreign'
1398 remote = (self.run_in_guest(command) == 0);
1399 return ( local and remote)
1401 def gather_logs (self):
1402 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1403 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1404 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1405 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1406 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1407 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1409 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1410 self.gather_var_logs ()
1412 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1413 self.gather_pgsql_logs ()
1415 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1416 for site_spec in self.plc_spec['sites']:
1417 test_site = TestSite (self,site_spec)
1418 for node_spec in site_spec['nodes']:
1419 test_node=TestNode(self,test_site,node_spec)
1420 test_node.gather_qemu_logs()
1422 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1423 self.gather_nodes_var_logs()
1425 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1426 self.gather_slivers_var_logs()
1429 def gather_slivers_var_logs(self):
1430 for test_sliver in self.all_sliver_objs():
1431 remote = test_sliver.tar_var_logs()
1432 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1433 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1434 utils.system(command)
1437 def gather_var_logs (self):
1438 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1439 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1440 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1441 utils.system(command)
1442 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1443 utils.system(command)
1445 def gather_pgsql_logs (self):
1446 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1447 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1448 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1449 utils.system(command)
1451 def gather_nodes_var_logs (self):
1452 for site_spec in self.plc_spec['sites']:
1453 test_site = TestSite (self,site_spec)
1454 for node_spec in site_spec['nodes']:
1455 test_node=TestNode(self,test_site,node_spec)
1456 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1457 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1458 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1459 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1460 utils.system(command)
1463 # returns the filename to use for sql dump/restore, using options.dbname if set
1464 def dbfile (self, database):
1465 # uses options.dbname if it is found
1467 name=self.options.dbname
1468 if not isinstance(name,StringTypes):
1471 t=datetime.datetime.now()
1474 return "/root/%s-%s.sql"%(database,name)
1476 def plc_db_dump(self):
1477 'dump the planetlab5 DB in /root in the PLC - filename has time'
1478 dump=self.dbfile("planetab5")
1479 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1480 utils.header('Dumped planetlab5 database in %s'%dump)
1483 def plc_db_restore(self):
1484 'restore the planetlab5 DB - looks broken, but run -n might help'
1485 dump=self.dbfile("planetab5")
1486 ##stop httpd service
1487 self.run_in_guest('service httpd stop')
1488 # xxx - need another wrapper
1489 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1490 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1491 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1492 ##starting httpd service
1493 self.run_in_guest('service httpd start')
1495 utils.header('Database restored from ' + dump)
1497 def standby_1_through_20(self):
1498 """convenience function to wait for a specified number of minutes"""
1501 def standby_1(): pass
1503 def standby_2(): pass
1505 def standby_3(): pass
1507 def standby_4(): pass
1509 def standby_5(): pass
1511 def standby_6(): pass
1513 def standby_7(): pass
1515 def standby_8(): pass
1517 def standby_9(): pass
1519 def standby_10(): pass
1521 def standby_11(): pass
1523 def standby_12(): pass
1525 def standby_13(): pass
1527 def standby_14(): pass
1529 def standby_15(): pass
1531 def standby_16(): pass
1533 def standby_17(): pass
1535 def standby_18(): pass
1537 def standby_19(): pass
1539 def standby_20(): pass