1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_tables_install', 'sfa_plc_install', 'sfa_client_install', SEPSFA,
96 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
97 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
98 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
99 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
100 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
101 # but as the stress test might take a while, we sometimes missed the debug mode..
102 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
103 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
104 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
106 'force_gather_logs', SEP,
109 'export', 'show_boxes', SEP,
110 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
111 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
112 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
113 'delete_leases', 'list_leases', SEP,
115 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
116 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
148 self.vserverip=plc_spec['vserverip']
149 self.vservername=plc_spec['vservername']
150 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 raise Exception,'chroot-based myplc testing is deprecated'
154 self.apiserver=TestApiserver(self.url,options.dry_run)
157 name=self.plc_spec['name']
158 return "%s.%s"%(name,self.vservername)
161 return self.plc_spec['host_box']
164 return self.test_ssh.is_local()
166 # define the API methods on this object through xmlrpc
167 # would help, but not strictly necessary
171 def actual_command_in_guest (self,command):
172 return self.test_ssh.actual_command(self.host_to_guest(command))
174 def start_guest (self):
175 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
177 def stop_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
180 def run_in_guest (self,command):
181 return utils.system(self.actual_command_in_guest(command))
183 def run_in_host (self,command):
184 return self.test_ssh.run_in_buildname(command)
186 #command gets run in the vserver
187 def host_to_guest(self,command):
188 return "vserver %s exec %s"%(self.vservername,command)
190 #start/stop the vserver
191 def start_guest_in_host(self):
192 return "vserver %s start"%(self.vservername)
194 def stop_guest_in_host(self):
195 return "vserver %s stop"%(self.vservername)
198 def run_in_guest_piped (self,local,remote):
199 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
201 # does a yum install in the vs, ignore yum retcod, check with rpm
202 def yum_install (self, rpms):
203 if isinstance (rpms, list):
205 self.run_in_guest("yum -y install %s"%rpms)
206 self.run_in_guest("yum-complete-transaction")
207 return self.run_in_guest("rpm -q %s"%rpms)==0
209 def auth_root (self):
210 return {'Username':self.plc_spec['PLC_ROOT_USER'],
211 'AuthMethod':'password',
212 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
213 'Role' : self.plc_spec['role']
215 def locate_site (self,sitename):
216 for site in self.plc_spec['sites']:
217 if site['site_fields']['name'] == sitename:
219 if site['site_fields']['login_base'] == sitename:
221 raise Exception,"Cannot locate site %s"%sitename
223 def locate_node (self,nodename):
224 for site in self.plc_spec['sites']:
225 for node in site['nodes']:
226 if node['name'] == nodename:
228 raise Exception,"Cannot locate node %s"%nodename
230 def locate_hostname (self,hostname):
231 for site in self.plc_spec['sites']:
232 for node in site['nodes']:
233 if node['node_fields']['hostname'] == hostname:
235 raise Exception,"Cannot locate hostname %s"%hostname
237 def locate_key (self,keyname):
238 for key in self.plc_spec['keys']:
239 if key['name'] == keyname:
241 raise Exception,"Cannot locate key %s"%keyname
243 def locate_slice (self, slicename):
244 for slice in self.plc_spec['slices']:
245 if slice['slice_fields']['name'] == slicename:
247 raise Exception,"Cannot locate slice %s"%slicename
249 def all_sliver_objs (self):
251 for slice_spec in self.plc_spec['slices']:
252 slicename = slice_spec['slice_fields']['name']
253 for nodename in slice_spec['nodenames']:
254 result.append(self.locate_sliver_obj (nodename,slicename))
257 def locate_sliver_obj (self,nodename,slicename):
258 (site,node) = self.locate_node(nodename)
259 slice = self.locate_slice (slicename)
261 test_site = TestSite (self, site)
262 test_node = TestNode (self, test_site,node)
263 # xxx the slice site is assumed to be the node site - mhh - probably harmless
264 test_slice = TestSlice (self, test_site, slice)
265 return TestSliver (self, test_node, test_slice)
267 def locate_first_node(self):
268 nodename=self.plc_spec['slices'][0]['nodenames'][0]
269 (site,node) = self.locate_node(nodename)
270 test_site = TestSite (self, site)
271 test_node = TestNode (self, test_site,node)
274 def locate_first_sliver (self):
275 slice_spec=self.plc_spec['slices'][0]
276 slicename=slice_spec['slice_fields']['name']
277 nodename=slice_spec['nodenames'][0]
278 return self.locate_sliver_obj(nodename,slicename)
280 # all different hostboxes used in this plc
281 def gather_hostBoxes(self):
282 # maps on sites and nodes, return [ (host_box,test_node) ]
284 for site_spec in self.plc_spec['sites']:
285 test_site = TestSite (self,site_spec)
286 for node_spec in site_spec['nodes']:
287 test_node = TestNode (self, test_site, node_spec)
288 if not test_node.is_real():
289 tuples.append( (test_node.host_box(),test_node) )
290 # transform into a dict { 'host_box' -> [ test_node .. ] }
292 for (box,node) in tuples:
293 if not result.has_key(box):
296 result[box].append(node)
299 # a step for checking this stuff
300 def show_boxes (self):
301 'print summary of nodes location'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 print box,":"," + ".join( [ node.name() for node in nodes ] )
306 # make this a valid step
307 def qemu_kill_all(self):
308 'kill all qemu instances on the qemu boxes involved by this setup'
309 # this is the brute force version, kill all qemus on that host box
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # pass the first nodename, as we don't push template-qemu on testboxes
312 nodedir=nodes[0].nodedir()
313 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
316 # make this a valid step
317 def qemu_list_all(self):
318 'list all qemu instances on the qemu boxes involved by this setup'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # this is the brute force version, kill all qemus on that host box
321 TestBoxQemu(box,self.options.buildname).qemu_list_all()
324 # kill only the right qemus
325 def qemu_list_mine(self):
326 'list qemu instances for our nodes'
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # the fine-grain version
333 # kill only the right qemus
334 def qemu_kill_mine(self):
335 'kill the qemu instances for our nodes'
336 for (box,nodes) in self.gather_hostBoxes().iteritems():
337 # the fine-grain version
342 #################### display config
344 "show test configuration after localization"
345 self.display_pass (1)
346 self.display_pass (2)
350 "print cut'n paste-able stuff to export env variables to your shell"
351 # these work but the shell prompt does not get displayed..
352 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
353 command2="ssh root@%s %s"%(socket.gethostname(),command1)
354 # guess local domain from hostname
355 domain=socket.gethostname().split('.',1)[1]
356 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
357 print "export BUILD=%s"%self.options.buildname
358 print "export PLCHOST=%s"%fqdn
359 print "export GUEST=%s"%self.plc_spec['vservername']
360 # find hostname of first node
361 (hostname,qemubox) = self.all_node_infos()[0]
362 print "export KVMHOST=%s.%s"%(qemubox,domain)
363 print "export NODE=%s"%(hostname)
367 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
368 def display_pass (self,passno):
369 for (key,val) in self.plc_spec.iteritems():
370 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
374 self.display_site_spec(site)
375 for node in site['nodes']:
376 self.display_node_spec(node)
377 elif key=='initscripts':
378 for initscript in val:
379 self.display_initscript_spec (initscript)
382 self.display_slice_spec (slice)
385 self.display_key_spec (key)
387 if key not in ['sites','initscripts','slices','keys', 'sfa']:
388 print '+ ',key,':',val
390 def display_site_spec (self,site):
391 print '+ ======== site',site['site_fields']['name']
392 for (k,v) in site.iteritems():
393 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
396 print '+ ','nodes : ',
398 print node['node_fields']['hostname'],'',
404 print user['name'],'',
406 elif k == 'site_fields':
407 print '+ login_base',':',v['login_base']
408 elif k == 'address_fields':
414 def display_initscript_spec (self,initscript):
415 print '+ ======== initscript',initscript['initscript_fields']['name']
417 def display_key_spec (self,key):
418 print '+ ======== key',key['name']
420 def display_slice_spec (self,slice):
421 print '+ ======== slice',slice['slice_fields']['name']
422 for (k,v) in slice.iteritems():
435 elif k=='slice_fields':
436 print '+ fields',':',
437 print 'max_nodes=',v['max_nodes'],
442 def display_node_spec (self,node):
443 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
444 print "hostname=",node['node_fields']['hostname'],
445 print "ip=",node['interface_fields']['ip']
446 if self.options.verbose:
447 utils.pprint("node details",node,depth=3)
449 # another entry point for just showing the boxes involved
450 def display_mapping (self):
451 TestPlc.display_mapping_plc(self.plc_spec)
455 def display_mapping_plc (plc_spec):
456 print '+ MyPLC',plc_spec['name']
457 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
458 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
459 for site_spec in plc_spec['sites']:
460 for node_spec in site_spec['nodes']:
461 TestPlc.display_mapping_node(node_spec)
464 def display_mapping_node (node_spec):
465 print '+ NODE %s'%(node_spec['name'])
466 print '+\tqemu box %s'%node_spec['host_box']
467 print '+\thostname=%s'%node_spec['node_fields']['hostname']
469 # write a timestamp in /vservers/<>.timestamp
470 # cannot be inside the vserver, that causes vserver .. build to cough
471 def timestamp_vs (self):
473 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
475 # def local_pre (self):
476 # "run site-dependant pre-test script as defined in LocalTestResources"
477 # from LocalTestResources import local_resources
478 # return local_resources.step_pre(self)
480 # def local_post (self):
481 # "run site-dependant post-test script as defined in LocalTestResources"
482 # from LocalTestResources import local_resources
483 # return local_resources.step_post(self)
485 # def local_list (self):
486 # "run site-dependant list script as defined in LocalTestResources"
487 # from LocalTestResources import local_resources
488 # return local_resources.step_list(self)
490 # def local_rel (self):
491 # "run site-dependant release script as defined in LocalTestResources"
492 # from LocalTestResources import local_resources
493 # return local_resources.step_release(self)
495 # def local_rel_plc (self):
496 # "run site-dependant release script as defined in LocalTestResources"
497 # from LocalTestResources import local_resources
498 # return local_resources.step_release_plc(self)
500 # def local_rel_qemu (self):
501 # "run site-dependant release script as defined in LocalTestResources"
502 # from LocalTestResources import local_resources
503 # return local_resources.step_release_qemu(self)
506 "vserver delete the test myplc"
507 self.run_in_host("vserver --silent %s delete"%self.vservername)
508 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
512 # historically the build was being fetched by the tests
513 # now the build pushes itself as a subdir of the tests workdir
514 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
515 def vs_create (self):
516 "vserver creation (no install done)"
517 # push the local build/ dir to the testplc box
519 # a full path for the local calls
520 build_dir=os.path.dirname(sys.argv[0])
521 # sometimes this is empty - set to "." in such a case
522 if not build_dir: build_dir="."
523 build_dir += "/build"
525 # use a standard name - will be relative to remote buildname
527 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
528 self.test_ssh.rmdir(build_dir)
529 self.test_ssh.copy(build_dir,recursive=True)
530 # the repo url is taken from arch-rpms-url
531 # with the last step (i386) removed
532 repo_url = self.options.arch_rpms_url
533 for level in [ 'arch' ]:
534 repo_url = os.path.dirname(repo_url)
535 # pass the vbuild-nightly options to vtest-init-vserver
537 test_env_options += " -p %s"%self.options.personality
538 test_env_options += " -d %s"%self.options.pldistro
539 test_env_options += " -f %s"%self.options.fcdistro
540 script="vtest-init-vserver.sh"
541 vserver_name = self.vservername
542 vserver_options="--netdev eth0 --interface %s"%self.vserverip
544 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
545 vserver_options += " --hostname %s"%vserver_hostname
547 print "Cannot reverse lookup %s"%self.vserverip
548 print "This is considered fatal, as this might pollute the test results"
550 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
551 return self.run_in_host(create_vserver) == 0
554 def plc_install(self):
555 "yum install myplc, noderepo, and the plain bootstrapfs"
557 # workaround for getting pgsql8.2 on centos5
558 if self.options.fcdistro == "centos5":
559 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
562 if self.options.personality == "linux32":
564 elif self.options.personality == "linux64":
567 raise Exception, "Unsupported personality %r"%self.options.personality
568 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
571 pkgs_list.append ("slicerepo-%s"%nodefamily)
572 pkgs_list.append ("myplc")
573 pkgs_list.append ("noderepo-%s"%nodefamily)
574 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
575 pkgs_string=" ".join(pkgs_list)
576 return self.yum_install (pkgs_list)
579 def plc_configure(self):
581 tmpname='%s.plc-config-tty'%(self.name())
582 fileconf=open(tmpname,'w')
583 for var in [ 'PLC_NAME',
588 'PLC_MAIL_SUPPORT_ADDRESS',
591 # Above line was added for integrating SFA Testing
597 'PLC_RESERVATION_GRANULARITY',
599 'PLC_OMF_XMPP_SERVER',
601 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
602 fileconf.write('w\n')
603 fileconf.write('q\n')
605 utils.system('cat %s'%tmpname)
606 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
607 utils.system('rm %s'%tmpname)
612 self.run_in_guest('service plc start')
617 self.run_in_guest('service plc stop')
621 "start the PLC vserver"
626 "stop the PLC vserver"
630 # stores the keys from the config for further use
631 def keys_store(self):
632 "stores test users ssh keys in keys/"
633 for key_spec in self.plc_spec['keys']:
634 TestKey(self,key_spec).store_key()
637 def keys_clean(self):
638 "removes keys cached in keys/"
639 utils.system("rm -rf ./keys")
642 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
643 # for later direct access to the nodes
644 def keys_fetch(self):
645 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
647 if not os.path.isdir(dir):
649 vservername=self.vservername
651 prefix = 'debug_ssh_key'
652 for ext in [ 'pub', 'rsa' ] :
653 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
654 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
655 if self.test_ssh.fetch(src,dst) != 0: overall=False
659 "create sites with PLCAPI"
660 return self.do_sites()
662 def delete_sites (self):
663 "delete sites with PLCAPI"
664 return self.do_sites(action="delete")
666 def do_sites (self,action="add"):
667 for site_spec in self.plc_spec['sites']:
668 test_site = TestSite (self,site_spec)
669 if (action != "add"):
670 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
671 test_site.delete_site()
672 # deleted with the site
673 #test_site.delete_users()
676 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
677 test_site.create_site()
678 test_site.create_users()
681 def delete_all_sites (self):
682 "Delete all sites in PLC, and related objects"
683 print 'auth_root',self.auth_root()
684 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
685 for site_id in site_ids:
686 print 'Deleting site_id',site_id
687 self.apiserver.DeleteSite(self.auth_root(),site_id)
691 "create nodes with PLCAPI"
692 return self.do_nodes()
693 def delete_nodes (self):
694 "delete nodes with PLCAPI"
695 return self.do_nodes(action="delete")
697 def do_nodes (self,action="add"):
698 for site_spec in self.plc_spec['sites']:
699 test_site = TestSite (self,site_spec)
701 utils.header("Deleting nodes in site %s"%test_site.name())
702 for node_spec in site_spec['nodes']:
703 test_node=TestNode(self,test_site,node_spec)
704 utils.header("Deleting %s"%test_node.name())
705 test_node.delete_node()
707 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
708 for node_spec in site_spec['nodes']:
709 utils.pprint('Creating node %s'%node_spec,node_spec)
710 test_node = TestNode (self,test_site,node_spec)
711 test_node.create_node ()
714 def nodegroups (self):
715 "create nodegroups with PLCAPI"
716 return self.do_nodegroups("add")
717 def delete_nodegroups (self):
718 "delete nodegroups with PLCAPI"
719 return self.do_nodegroups("delete")
723 def translate_timestamp (start,grain,timestamp):
724 if timestamp < TestPlc.YEAR: return start+timestamp*grain
725 else: return timestamp
728 def timestamp_printable (timestamp):
729 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
732 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
734 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
735 print 'API answered grain=',grain
736 start=(now/grain)*grain
738 # find out all nodes that are reservable
739 nodes=self.all_reservable_nodenames()
741 utils.header ("No reservable node found - proceeding without leases")
744 # attach them to the leases as specified in plc_specs
745 # this is where the 'leases' field gets interpreted as relative of absolute
746 for lease_spec in self.plc_spec['leases']:
747 # skip the ones that come with a null slice id
748 if not lease_spec['slice']: continue
749 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
750 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
751 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
752 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
753 if lease_addition['errors']:
754 utils.header("Cannot create leases, %s"%lease_addition['errors'])
757 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
758 (nodes,lease_spec['slice'],
759 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
760 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
764 def delete_leases (self):
765 "remove all leases in the myplc side"
766 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
767 utils.header("Cleaning leases %r"%lease_ids)
768 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
771 def list_leases (self):
772 "list all leases known to the myplc"
773 leases = self.apiserver.GetLeases(self.auth_root())
776 current=l['t_until']>=now
777 if self.options.verbose or current:
778 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
779 TestPlc.timestamp_printable(l['t_from']),
780 TestPlc.timestamp_printable(l['t_until'])))
783 # create nodegroups if needed, and populate
784 def do_nodegroups (self, action="add"):
785 # 1st pass to scan contents
787 for site_spec in self.plc_spec['sites']:
788 test_site = TestSite (self,site_spec)
789 for node_spec in site_spec['nodes']:
790 test_node=TestNode (self,test_site,node_spec)
791 if node_spec.has_key('nodegroups'):
792 nodegroupnames=node_spec['nodegroups']
793 if isinstance(nodegroupnames,StringTypes):
794 nodegroupnames = [ nodegroupnames ]
795 for nodegroupname in nodegroupnames:
796 if not groups_dict.has_key(nodegroupname):
797 groups_dict[nodegroupname]=[]
798 groups_dict[nodegroupname].append(test_node.name())
799 auth=self.auth_root()
801 for (nodegroupname,group_nodes) in groups_dict.iteritems():
803 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
804 # first, check if the nodetagtype is here
805 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
807 tag_type_id = tag_types[0]['tag_type_id']
809 tag_type_id = self.apiserver.AddTagType(auth,
810 {'tagname':nodegroupname,
811 'description': 'for nodegroup %s'%nodegroupname,
813 print 'located tag (type)',nodegroupname,'as',tag_type_id
815 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
817 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
818 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
819 # set node tag on all nodes, value='yes'
820 for nodename in group_nodes:
822 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
824 traceback.print_exc()
825 print 'node',nodename,'seems to already have tag',nodegroupname
828 expect_yes = self.apiserver.GetNodeTags(auth,
829 {'hostname':nodename,
830 'tagname':nodegroupname},
831 ['value'])[0]['value']
832 if expect_yes != "yes":
833 print 'Mismatch node tag on node',nodename,'got',expect_yes
836 if not self.options.dry_run:
837 print 'Cannot find tag',nodegroupname,'on node',nodename
841 print 'cleaning nodegroup',nodegroupname
842 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
844 traceback.print_exc()
848 # return a list of tuples (nodename,qemuname)
849 def all_node_infos (self) :
851 for site_spec in self.plc_spec['sites']:
852 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
853 for node_spec in site_spec['nodes'] ]
856 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
857 def all_reservable_nodenames (self):
859 for site_spec in self.plc_spec['sites']:
860 for node_spec in site_spec['nodes']:
861 node_fields=node_spec['node_fields']
862 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
863 res.append(node_fields['hostname'])
866 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
867 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
868 if self.options.dry_run:
872 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
873 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
874 # the nodes that haven't checked yet - start with a full list and shrink over time
875 tocheck = self.all_hostnames()
876 utils.header("checking nodes %r"%tocheck)
877 # create a dict hostname -> status
878 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
881 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
883 for array in tocheck_status:
884 hostname=array['hostname']
885 boot_state=array['boot_state']
886 if boot_state == target_boot_state:
887 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
889 # if it's a real node, never mind
890 (site_spec,node_spec)=self.locate_hostname(hostname)
891 if TestNode.is_real_model(node_spec['node_fields']['model']):
892 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
894 boot_state = target_boot_state
895 elif datetime.datetime.now() > graceout:
896 utils.header ("%s still in '%s' state"%(hostname,boot_state))
897 graceout=datetime.datetime.now()+datetime.timedelta(1)
898 status[hostname] = boot_state
900 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
903 if datetime.datetime.now() > timeout:
904 for hostname in tocheck:
905 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
907 # otherwise, sleep for a while
909 # only useful in empty plcs
912 def nodes_booted(self):
913 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
915 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
917 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
918 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
919 vservername=self.vservername
922 local_key = "keys/%(vservername)s-debug.rsa"%locals()
925 local_key = "keys/key1.rsa"
926 node_infos = self.all_node_infos()
927 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
928 for (nodename,qemuname) in node_infos:
929 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
930 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
931 (timeout_minutes,silent_minutes,period))
933 for node_info in node_infos:
934 (hostname,qemuname) = node_info
935 # try to run 'hostname' in the node
936 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
937 # don't spam logs - show the command only after the grace period
938 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
940 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
942 node_infos.remove(node_info)
944 # we will have tried real nodes once, in case they're up - but if not, just skip
945 (site_spec,node_spec)=self.locate_hostname(hostname)
946 if TestNode.is_real_model(node_spec['node_fields']['model']):
947 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
948 node_infos.remove(node_info)
951 if datetime.datetime.now() > timeout:
952 for (hostname,qemuname) in node_infos:
953 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
955 # otherwise, sleep for a while
957 # only useful in empty plcs
960 def ssh_node_debug(self):
961 "Tries to ssh into nodes in debug mode with the debug ssh key"
962 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
964 def ssh_node_boot(self):
965 "Tries to ssh into nodes in production mode with the root ssh key"
966 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
969 def qemu_local_init (self):
970 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
974 "all nodes: invoke GetBootMedium and store result locally"
977 def qemu_local_config (self):
978 "all nodes: compute qemu config qemu.conf and store it locally"
981 def nodestate_reinstall (self):
982 "all nodes: mark PLCAPI boot_state as reinstall"
985 def nodestate_safeboot (self):
986 "all nodes: mark PLCAPI boot_state as safeboot"
989 def nodestate_boot (self):
990 "all nodes: mark PLCAPI boot_state as boot"
993 def nodestate_show (self):
994 "all nodes: show PLCAPI boot_state"
997 def qemu_export (self):
998 "all nodes: push local node-dep directory on the qemu box"
1001 ### check hooks : invoke scripts from hooks/{node,slice}
1002 def check_hooks_node (self):
1003 return self.locate_first_node().check_hooks()
1004 def check_hooks_sliver (self) :
1005 return self.locate_first_sliver().check_hooks()
1007 def check_hooks (self):
1008 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1009 return self.check_hooks_node() and self.check_hooks_sliver()
1012 def do_check_initscripts(self):
1014 for slice_spec in self.plc_spec['slices']:
1015 if not slice_spec.has_key('initscriptstamp'):
1017 stamp=slice_spec['initscriptstamp']
1018 for nodename in slice_spec['nodenames']:
1019 (site,node) = self.locate_node (nodename)
1020 # xxx - passing the wrong site - probably harmless
1021 test_site = TestSite (self,site)
1022 test_slice = TestSlice (self,test_site,slice_spec)
1023 test_node = TestNode (self,test_site,node)
1024 test_sliver = TestSliver (self, test_node, test_slice)
1025 if not test_sliver.check_initscript_stamp(stamp):
1029 def check_initscripts(self):
1030 "check that the initscripts have triggered"
1031 return self.do_check_initscripts()
1033 def initscripts (self):
1034 "create initscripts with PLCAPI"
1035 for initscript in self.plc_spec['initscripts']:
1036 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1037 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1040 def delete_initscripts (self):
1041 "delete initscripts with PLCAPI"
1042 for initscript in self.plc_spec['initscripts']:
1043 initscript_name = initscript['initscript_fields']['name']
1044 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1046 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1047 print initscript_name,'deleted'
1049 print 'deletion went wrong - probably did not exist'
1054 "create slices with PLCAPI"
1055 return self.do_slices()
1057 def delete_slices (self):
1058 "delete slices with PLCAPI"
1059 return self.do_slices("delete")
1061 def do_slices (self, action="add"):
1062 for slice in self.plc_spec['slices']:
1063 site_spec = self.locate_site (slice['sitename'])
1064 test_site = TestSite(self,site_spec)
1065 test_slice=TestSlice(self,test_site,slice)
1067 utils.header("Deleting slices in site %s"%test_site.name())
1068 test_slice.delete_slice()
1070 utils.pprint("Creating slice",slice)
1071 test_slice.create_slice()
1072 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1076 def ssh_slice(self):
1077 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1081 def keys_clear_known_hosts (self):
1082 "remove test nodes entries from the local known_hosts file"
1086 def qemu_start (self) :
1087 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1091 def timestamp_qemu (self) :
1092 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1095 def check_tcp (self):
1096 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1097 specs = self.plc_spec['tcp_test']
1102 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1103 if not s_test_sliver.run_tcp_server(port,timeout=10):
1107 # idem for the client side
1108 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1109 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1113 def plcsh_stress_test (self):
1114 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1115 # install the stress-test in the plc image
1116 location = "/usr/share/plc_api/plcsh_stress_test.py"
1117 remote="/vservers/%s/%s"%(self.vservername,location)
1118 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1120 command += " -- --check"
1121 if self.options.size == 1:
1122 command += " --tiny"
1123 return ( self.run_in_guest(command) == 0)
1125 # populate runs the same utility without slightly different options
1126 # in particular runs with --preserve (dont cleanup) and without --check
1127 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1130 def sfa_install(self):
1132 return self.yum_install ("sfa")
1135 def sfa_plc_install(self):
1136 "yum install sfa-plc"
1137 return self.yum_install("sfa-plc")
1140 def sfa_client_install(self):
1141 "yum install sfa-client"
1142 return self.yum_install("sfa-client")
1145 def sfa_tables_install(self):
1146 "yum install sfa-client"
1147 return self.yum_install ("sfa-sfatables")
1149 def sfa_dbclean(self):
1150 "thoroughly wipes off the SFA database"
1151 self.run_in_guest("sfa-nuke-plc.py")==0
1154 def sfa_plcclean(self):
1155 "cleans the PLC entries that were created as a side effect of running the script"
1157 sfa_spec=self.plc_spec['sfa']
1159 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1160 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1161 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1162 except: print "Slice %s already absent from PLC db"%slicename
1164 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1165 try: self.apiserver.DeletePerson(self.auth_root(),username)
1166 except: print "User %s already absent from PLC db"%username
1168 print "REMEMBER TO RUN sfa_import AGAIN"
1171 def sfa_uninstall(self):
1172 "uses rpm to uninstall sfa - ignore result"
1173 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1174 self.run_in_guest("rm -rf /var/lib/sfa")
1175 self.run_in_guest("rm -rf /etc/sfa")
1176 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1178 self.run_in_guest("rpm -e --noscripts sfa-plc")
1181 ### run unit tests for SFA
1182 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1183 # Running Transaction
1184 # Transaction couldn't start:
1185 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1186 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1187 # no matter how many Gbs are available on the testplc
1188 # could not figure out what's wrong, so...
1189 # if the yum install phase fails, consider the test is successful
1190 # other combinations will eventually run it hopefully
1191 def sfa_utest(self):
1192 "yum install sfa-tests and run SFA unittests"
1193 self.run_in_guest("yum -y install sfa-tests")
1194 # failed to install - forget it
1195 if self.run_in_guest("rpm -q sfa-tests")!=0:
1196 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1198 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1202 dirname="conf.%s"%self.plc_spec['name']
1203 if not os.path.isdir(dirname):
1204 utils.system("mkdir -p %s"%dirname)
1205 if not os.path.isdir(dirname):
1206 raise "Cannot create config dir for plc %s"%self.name()
1209 def conffile(self,filename):
1210 return "%s/%s"%(self.confdir(),filename)
1211 def confsubdir(self,dirname,clean,dry_run=False):
1212 subdirname="%s/%s"%(self.confdir(),dirname)
1214 utils.system("rm -rf %s"%subdirname)
1215 if not os.path.isdir(subdirname):
1216 utils.system("mkdir -p %s"%subdirname)
1217 if not dry_run and not os.path.isdir(subdirname):
1218 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1221 def conffile_clean (self,filename):
1222 filename=self.conffile(filename)
1223 return utils.system("rm -rf %s"%filename)==0
1226 def sfa_configure(self):
1227 "run sfa-config-tty"
1228 tmpname=self.conffile("sfa-config-tty")
1229 fileconf=open(tmpname,'w')
1230 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1231 'SFA_INTERFACE_HRN',
1232 # 'SFA_REGISTRY_LEVEL1_AUTH',
1233 'SFA_REGISTRY_HOST',
1234 'SFA_AGGREGATE_HOST',
1245 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1246 # the way plc_config handles booleans just sucks..
1247 for var in ['SFA_API_DEBUG']:
1249 if self.plc_spec['sfa'][var]: val='true'
1250 fileconf.write ('e %s\n%s\n'%(var,val))
1251 fileconf.write('w\n')
1252 fileconf.write('R\n')
1253 fileconf.write('q\n')
1255 utils.system('cat %s'%tmpname)
1256 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1259 def aggregate_xml_line(self):
1260 port=self.plc_spec['sfa']['neighbours-port']
1261 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1262 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1264 def registry_xml_line(self):
1265 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1266 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1269 # a cross step that takes all other plcs in argument
1270 def cross_sfa_configure(self, other_plcs):
1271 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1272 # of course with a single plc, other_plcs is an empty list
1275 agg_fname=self.conffile("agg.xml")
1276 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1277 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1278 utils.header ("(Over)wrote %s"%agg_fname)
1279 reg_fname=self.conffile("reg.xml")
1280 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1281 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1282 utils.header ("(Over)wrote %s"%reg_fname)
1283 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1284 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1286 def sfa_import(self):
1288 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1289 return self.run_in_guest('sfa-import-plc.py')==0
1290 # not needed anymore
1291 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1293 def sfa_start(self):
1295 return self.run_in_guest('service sfa start')==0
1297 def sfi_configure(self):
1298 "Create /root/sfi on the plc side for sfi client configuration"
1299 if self.options.dry_run:
1300 utils.header("DRY RUN - skipping step")
1302 sfa_spec=self.plc_spec['sfa']
1303 # cannot use sfa_slice_mapper to pass dir_name
1304 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1305 site_spec = self.locate_site (slice_spec['sitename'])
1306 test_site = TestSite(self,site_spec)
1307 test_slice=TestSliceSfa(self,test_site,slice_spec)
1308 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1309 test_slice.sfi_config(dir_name)
1310 # push into the remote /root/sfi area
1311 location = test_slice.sfi_path()
1312 remote="/vservers/%s/%s"%(self.vservername,location)
1313 self.test_ssh.mkdir(remote,abs=True)
1314 # need to strip last level or remote otherwise we get an extra dir level
1315 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1319 def sfi_clean (self):
1320 "clean up /root/sfi on the plc side"
1321 self.run_in_guest("rm -rf /root/sfi")
1325 def sfa_add_user(self):
1330 def sfa_update_user(self):
1334 def sfa_add_slice(self):
1335 "run sfi.py add (on Registry) from slice.xml"
1339 def sfa_discover(self):
1340 "discover resources into resouces_in.rspec"
1344 def sfa_create_slice(self):
1345 "run sfi.py create (on SM) - 1st time"
1349 def sfa_check_slice_plc(self):
1350 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1354 def sfa_update_slice(self):
1355 "run sfi.py create (on SM) on existing object"
1360 "various registry-related calls"
1364 def ssh_slice_sfa(self):
1365 "tries to ssh-enter the SFA slice"
1369 def sfa_delete_user(self):
1374 def sfa_delete_slice(self):
1375 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1380 self.run_in_guest('service sfa stop')==0
1383 def populate (self):
1384 "creates random entries in the PLCAPI"
1385 # install the stress-test in the plc image
1386 location = "/usr/share/plc_api/plcsh_stress_test.py"
1387 remote="/vservers/%s/%s"%(self.vservername,location)
1388 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1390 command += " -- --preserve --short-names"
1391 local = (self.run_in_guest(command) == 0);
1392 # second run with --foreign
1393 command += ' --foreign'
1394 remote = (self.run_in_guest(command) == 0);
1395 return ( local and remote)
1397 def gather_logs (self):
1398 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1399 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1400 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1401 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1402 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1403 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1405 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1406 self.gather_var_logs ()
1408 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1409 self.gather_pgsql_logs ()
1411 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1412 for site_spec in self.plc_spec['sites']:
1413 test_site = TestSite (self,site_spec)
1414 for node_spec in site_spec['nodes']:
1415 test_node=TestNode(self,test_site,node_spec)
1416 test_node.gather_qemu_logs()
1418 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1419 self.gather_nodes_var_logs()
1421 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1422 self.gather_slivers_var_logs()
1425 def gather_slivers_var_logs(self):
1426 for test_sliver in self.all_sliver_objs():
1427 remote = test_sliver.tar_var_logs()
1428 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1429 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1430 utils.system(command)
1433 def gather_var_logs (self):
1434 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1435 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1436 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1437 utils.system(command)
1438 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1439 utils.system(command)
1441 def gather_pgsql_logs (self):
1442 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1443 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1444 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1445 utils.system(command)
1447 def gather_nodes_var_logs (self):
1448 for site_spec in self.plc_spec['sites']:
1449 test_site = TestSite (self,site_spec)
1450 for node_spec in site_spec['nodes']:
1451 test_node=TestNode(self,test_site,node_spec)
1452 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1453 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1454 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1455 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1456 utils.system(command)
1459 # returns the filename to use for sql dump/restore, using options.dbname if set
1460 def dbfile (self, database):
1461 # uses options.dbname if it is found
1463 name=self.options.dbname
1464 if not isinstance(name,StringTypes):
1467 t=datetime.datetime.now()
1470 return "/root/%s-%s.sql"%(database,name)
1472 def plc_db_dump(self):
1473 'dump the planetlab5 DB in /root in the PLC - filename has time'
1474 dump=self.dbfile("planetab5")
1475 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1476 utils.header('Dumped planetlab5 database in %s'%dump)
1479 def plc_db_restore(self):
1480 'restore the planetlab5 DB - looks broken, but run -n might help'
1481 dump=self.dbfile("planetab5")
1482 ##stop httpd service
1483 self.run_in_guest('service httpd stop')
1484 # xxx - need another wrapper
1485 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1486 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1487 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1488 ##starting httpd service
1489 self.run_in_guest('service httpd start')
1491 utils.header('Database restored from ' + dump)
1493 def standby_1_through_20(self):
1494 """convenience function to wait for a specified number of minutes"""
1497 def standby_1(): pass
1499 def standby_2(): pass
1501 def standby_3(): pass
1503 def standby_4(): pass
1505 def standby_5(): pass
1507 def standby_6(): pass
1509 def standby_7(): pass
1511 def standby_8(): pass
1513 def standby_9(): pass
1515 def standby_10(): pass
1517 def standby_11(): pass
1519 def standby_12(): pass
1521 def standby_13(): pass
1523 def standby_14(): pass
1525 def standby_15(): pass
1527 def standby_16(): pass
1529 def standby_17(): pass
1531 def standby_18(): pass
1533 def standby_19(): pass
1535 def standby_20(): pass