1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_tables_install', 'sfa_plc_install', 'sfa_client_install', SEPSFA,
96 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
97 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
98 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
99 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
100 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
101 # but as the stress test might take a while, we sometimes missed the debug mode..
102 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
103 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
104 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
106 'force_gather_logs', SEP,
109 'export', 'show_boxes', SEP,
110 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
111 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
112 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
113 'delete_leases', 'list_leases', SEP,
115 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
116 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
148 self.vserverip=plc_spec['vserverip']
149 self.vservername=plc_spec['vservername']
150 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 raise Exception,'chroot-based myplc testing is deprecated'
154 self.apiserver=TestApiserver(self.url,options.dry_run)
157 name=self.plc_spec['name']
158 return "%s.%s"%(name,self.vservername)
161 return self.plc_spec['host_box']
164 return self.test_ssh.is_local()
166 # define the API methods on this object through xmlrpc
167 # would help, but not strictly necessary
171 def actual_command_in_guest (self,command):
172 return self.test_ssh.actual_command(self.host_to_guest(command))
174 def start_guest (self):
175 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
177 def stop_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
180 def run_in_guest (self,command):
181 return utils.system(self.actual_command_in_guest(command))
183 def run_in_host (self,command):
184 return self.test_ssh.run_in_buildname(command)
186 #command gets run in the vserver
187 def host_to_guest(self,command):
188 return "vserver %s exec %s"%(self.vservername,command)
190 #start/stop the vserver
191 def start_guest_in_host(self):
192 return "vserver %s start"%(self.vservername)
194 def stop_guest_in_host(self):
195 return "vserver %s stop"%(self.vservername)
198 def run_in_guest_piped (self,local,remote):
199 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
201 # does a yum install in the vs, ignore yum retcod, check with rpm
202 def yum_install (self, rpms):
203 if isinstance (rpms, list):
205 self.run_in_guest("yum -y install %s"%rpms)
206 return self.run_in_guest("rpm -q %s"%rpms)==0
208 def auth_root (self):
209 return {'Username':self.plc_spec['PLC_ROOT_USER'],
210 'AuthMethod':'password',
211 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
212 'Role' : self.plc_spec['role']
214 def locate_site (self,sitename):
215 for site in self.plc_spec['sites']:
216 if site['site_fields']['name'] == sitename:
218 if site['site_fields']['login_base'] == sitename:
220 raise Exception,"Cannot locate site %s"%sitename
222 def locate_node (self,nodename):
223 for site in self.plc_spec['sites']:
224 for node in site['nodes']:
225 if node['name'] == nodename:
227 raise Exception,"Cannot locate node %s"%nodename
229 def locate_hostname (self,hostname):
230 for site in self.plc_spec['sites']:
231 for node in site['nodes']:
232 if node['node_fields']['hostname'] == hostname:
234 raise Exception,"Cannot locate hostname %s"%hostname
236 def locate_key (self,keyname):
237 for key in self.plc_spec['keys']:
238 if key['name'] == keyname:
240 raise Exception,"Cannot locate key %s"%keyname
242 def locate_slice (self, slicename):
243 for slice in self.plc_spec['slices']:
244 if slice['slice_fields']['name'] == slicename:
246 raise Exception,"Cannot locate slice %s"%slicename
248 def all_sliver_objs (self):
250 for slice_spec in self.plc_spec['slices']:
251 slicename = slice_spec['slice_fields']['name']
252 for nodename in slice_spec['nodenames']:
253 result.append(self.locate_sliver_obj (nodename,slicename))
256 def locate_sliver_obj (self,nodename,slicename):
257 (site,node) = self.locate_node(nodename)
258 slice = self.locate_slice (slicename)
260 test_site = TestSite (self, site)
261 test_node = TestNode (self, test_site,node)
262 # xxx the slice site is assumed to be the node site - mhh - probably harmless
263 test_slice = TestSlice (self, test_site, slice)
264 return TestSliver (self, test_node, test_slice)
266 def locate_first_node(self):
267 nodename=self.plc_spec['slices'][0]['nodenames'][0]
268 (site,node) = self.locate_node(nodename)
269 test_site = TestSite (self, site)
270 test_node = TestNode (self, test_site,node)
273 def locate_first_sliver (self):
274 slice_spec=self.plc_spec['slices'][0]
275 slicename=slice_spec['slice_fields']['name']
276 nodename=slice_spec['nodenames'][0]
277 return self.locate_sliver_obj(nodename,slicename)
279 # all different hostboxes used in this plc
280 def gather_hostBoxes(self):
281 # maps on sites and nodes, return [ (host_box,test_node) ]
283 for site_spec in self.plc_spec['sites']:
284 test_site = TestSite (self,site_spec)
285 for node_spec in site_spec['nodes']:
286 test_node = TestNode (self, test_site, node_spec)
287 if not test_node.is_real():
288 tuples.append( (test_node.host_box(),test_node) )
289 # transform into a dict { 'host_box' -> [ test_node .. ] }
291 for (box,node) in tuples:
292 if not result.has_key(box):
295 result[box].append(node)
298 # a step for checking this stuff
299 def show_boxes (self):
300 'print summary of nodes location'
301 for (box,nodes) in self.gather_hostBoxes().iteritems():
302 print box,":"," + ".join( [ node.name() for node in nodes ] )
305 # make this a valid step
306 def qemu_kill_all(self):
307 'kill all qemu instances on the qemu boxes involved by this setup'
308 # this is the brute force version, kill all qemus on that host box
309 for (box,nodes) in self.gather_hostBoxes().iteritems():
310 # pass the first nodename, as we don't push template-qemu on testboxes
311 nodedir=nodes[0].nodedir()
312 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
315 # make this a valid step
316 def qemu_list_all(self):
317 'list all qemu instances on the qemu boxes involved by this setup'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 # this is the brute force version, kill all qemus on that host box
320 TestBoxQemu(box,self.options.buildname).qemu_list_all()
323 # kill only the right qemus
324 def qemu_list_mine(self):
325 'list qemu instances for our nodes'
326 for (box,nodes) in self.gather_hostBoxes().iteritems():
327 # the fine-grain version
332 # kill only the right qemus
333 def qemu_kill_mine(self):
334 'kill the qemu instances for our nodes'
335 for (box,nodes) in self.gather_hostBoxes().iteritems():
336 # the fine-grain version
341 #################### display config
343 "show test configuration after localization"
344 self.display_pass (1)
345 self.display_pass (2)
349 "print cut'n paste-able stuff to export env variables to your shell"
350 # these work but the shell prompt does not get displayed..
351 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
352 command2="ssh root@%s %s"%(socket.gethostname(),command1)
353 # guess local domain from hostname
354 domain=socket.gethostname().split('.',1)[1]
355 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
356 print "export BUILD=%s"%self.options.buildname
357 print "export PLCHOST=%s"%fqdn
358 print "export GUEST=%s"%self.plc_spec['vservername']
359 # find hostname of first node
360 (hostname,qemubox) = self.all_node_infos()[0]
361 print "export KVMHOST=%s.%s"%(qemubox,domain)
362 print "export NODE=%s"%(hostname)
366 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
367 def display_pass (self,passno):
368 for (key,val) in self.plc_spec.iteritems():
369 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
373 self.display_site_spec(site)
374 for node in site['nodes']:
375 self.display_node_spec(node)
376 elif key=='initscripts':
377 for initscript in val:
378 self.display_initscript_spec (initscript)
381 self.display_slice_spec (slice)
384 self.display_key_spec (key)
386 if key not in ['sites','initscripts','slices','keys', 'sfa']:
387 print '+ ',key,':',val
389 def display_site_spec (self,site):
390 print '+ ======== site',site['site_fields']['name']
391 for (k,v) in site.iteritems():
392 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
395 print '+ ','nodes : ',
397 print node['node_fields']['hostname'],'',
403 print user['name'],'',
405 elif k == 'site_fields':
406 print '+ login_base',':',v['login_base']
407 elif k == 'address_fields':
413 def display_initscript_spec (self,initscript):
414 print '+ ======== initscript',initscript['initscript_fields']['name']
416 def display_key_spec (self,key):
417 print '+ ======== key',key['name']
419 def display_slice_spec (self,slice):
420 print '+ ======== slice',slice['slice_fields']['name']
421 for (k,v) in slice.iteritems():
434 elif k=='slice_fields':
435 print '+ fields',':',
436 print 'max_nodes=',v['max_nodes'],
441 def display_node_spec (self,node):
442 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
443 print "hostname=",node['node_fields']['hostname'],
444 print "ip=",node['interface_fields']['ip']
445 if self.options.verbose:
446 utils.pprint("node details",node,depth=3)
448 # another entry point for just showing the boxes involved
449 def display_mapping (self):
450 TestPlc.display_mapping_plc(self.plc_spec)
454 def display_mapping_plc (plc_spec):
455 print '+ MyPLC',plc_spec['name']
456 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
457 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
458 for site_spec in plc_spec['sites']:
459 for node_spec in site_spec['nodes']:
460 TestPlc.display_mapping_node(node_spec)
463 def display_mapping_node (node_spec):
464 print '+ NODE %s'%(node_spec['name'])
465 print '+\tqemu box %s'%node_spec['host_box']
466 print '+\thostname=%s'%node_spec['node_fields']['hostname']
468 # write a timestamp in /vservers/<>.timestamp
469 # cannot be inside the vserver, that causes vserver .. build to cough
470 def timestamp_vs (self):
472 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
474 # def local_pre (self):
475 # "run site-dependant pre-test script as defined in LocalTestResources"
476 # from LocalTestResources import local_resources
477 # return local_resources.step_pre(self)
479 # def local_post (self):
480 # "run site-dependant post-test script as defined in LocalTestResources"
481 # from LocalTestResources import local_resources
482 # return local_resources.step_post(self)
484 # def local_list (self):
485 # "run site-dependant list script as defined in LocalTestResources"
486 # from LocalTestResources import local_resources
487 # return local_resources.step_list(self)
489 # def local_rel (self):
490 # "run site-dependant release script as defined in LocalTestResources"
491 # from LocalTestResources import local_resources
492 # return local_resources.step_release(self)
494 # def local_rel_plc (self):
495 # "run site-dependant release script as defined in LocalTestResources"
496 # from LocalTestResources import local_resources
497 # return local_resources.step_release_plc(self)
499 # def local_rel_qemu (self):
500 # "run site-dependant release script as defined in LocalTestResources"
501 # from LocalTestResources import local_resources
502 # return local_resources.step_release_qemu(self)
505 "vserver delete the test myplc"
506 self.run_in_host("vserver --silent %s delete"%self.vservername)
507 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
511 # historically the build was being fetched by the tests
512 # now the build pushes itself as a subdir of the tests workdir
513 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
514 def vs_create (self):
515 "vserver creation (no install done)"
516 # push the local build/ dir to the testplc box
518 # a full path for the local calls
519 build_dir=os.path.dirname(sys.argv[0])
520 # sometimes this is empty - set to "." in such a case
521 if not build_dir: build_dir="."
522 build_dir += "/build"
524 # use a standard name - will be relative to remote buildname
526 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
527 self.test_ssh.rmdir(build_dir)
528 self.test_ssh.copy(build_dir,recursive=True)
529 # the repo url is taken from arch-rpms-url
530 # with the last step (i386) removed
531 repo_url = self.options.arch_rpms_url
532 for level in [ 'arch' ]:
533 repo_url = os.path.dirname(repo_url)
534 # pass the vbuild-nightly options to vtest-init-vserver
536 test_env_options += " -p %s"%self.options.personality
537 test_env_options += " -d %s"%self.options.pldistro
538 test_env_options += " -f %s"%self.options.fcdistro
539 script="vtest-init-vserver.sh"
540 vserver_name = self.vservername
541 vserver_options="--netdev eth0 --interface %s"%self.vserverip
543 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
544 vserver_options += " --hostname %s"%vserver_hostname
546 print "Cannot reverse lookup %s"%self.vserverip
547 print "This is considered fatal, as this might pollute the test results"
549 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
550 return self.run_in_host(create_vserver) == 0
553 def plc_install(self):
554 "yum install myplc, noderepo, and the plain bootstrapfs"
556 # workaround for getting pgsql8.2 on centos5
557 if self.options.fcdistro == "centos5":
558 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
561 if self.options.personality == "linux32":
563 elif self.options.personality == "linux64":
566 raise Exception, "Unsupported personality %r"%self.options.personality
567 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
570 pkgs_list.append ("slicerepo-%s"%nodefamily)
571 pkgs_list.append ("myplc")
572 pkgs_list.append ("noderepo-%s"%nodefamily)
573 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
574 pkgs_string=" ".join(pkgs_list)
575 return self.yum_install (pkgs_list)
578 def plc_configure(self):
580 tmpname='%s.plc-config-tty'%(self.name())
581 fileconf=open(tmpname,'w')
582 for var in [ 'PLC_NAME',
587 'PLC_MAIL_SUPPORT_ADDRESS',
590 # Above line was added for integrating SFA Testing
596 'PLC_RESERVATION_GRANULARITY',
598 'PLC_OMF_XMPP_SERVER',
600 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
601 fileconf.write('w\n')
602 fileconf.write('q\n')
604 utils.system('cat %s'%tmpname)
605 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
606 utils.system('rm %s'%tmpname)
611 self.run_in_guest('service plc start')
616 self.run_in_guest('service plc stop')
620 "start the PLC vserver"
625 "stop the PLC vserver"
629 # stores the keys from the config for further use
630 def keys_store(self):
631 "stores test users ssh keys in keys/"
632 for key_spec in self.plc_spec['keys']:
633 TestKey(self,key_spec).store_key()
636 def keys_clean(self):
637 "removes keys cached in keys/"
638 utils.system("rm -rf ./keys")
641 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
642 # for later direct access to the nodes
643 def keys_fetch(self):
644 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
646 if not os.path.isdir(dir):
648 vservername=self.vservername
650 prefix = 'debug_ssh_key'
651 for ext in [ 'pub', 'rsa' ] :
652 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
653 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
654 if self.test_ssh.fetch(src,dst) != 0: overall=False
658 "create sites with PLCAPI"
659 return self.do_sites()
661 def delete_sites (self):
662 "delete sites with PLCAPI"
663 return self.do_sites(action="delete")
665 def do_sites (self,action="add"):
666 for site_spec in self.plc_spec['sites']:
667 test_site = TestSite (self,site_spec)
668 if (action != "add"):
669 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
670 test_site.delete_site()
671 # deleted with the site
672 #test_site.delete_users()
675 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
676 test_site.create_site()
677 test_site.create_users()
680 def delete_all_sites (self):
681 "Delete all sites in PLC, and related objects"
682 print 'auth_root',self.auth_root()
683 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
684 for site_id in site_ids:
685 print 'Deleting site_id',site_id
686 self.apiserver.DeleteSite(self.auth_root(),site_id)
690 "create nodes with PLCAPI"
691 return self.do_nodes()
692 def delete_nodes (self):
693 "delete nodes with PLCAPI"
694 return self.do_nodes(action="delete")
696 def do_nodes (self,action="add"):
697 for site_spec in self.plc_spec['sites']:
698 test_site = TestSite (self,site_spec)
700 utils.header("Deleting nodes in site %s"%test_site.name())
701 for node_spec in site_spec['nodes']:
702 test_node=TestNode(self,test_site,node_spec)
703 utils.header("Deleting %s"%test_node.name())
704 test_node.delete_node()
706 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
707 for node_spec in site_spec['nodes']:
708 utils.pprint('Creating node %s'%node_spec,node_spec)
709 test_node = TestNode (self,test_site,node_spec)
710 test_node.create_node ()
713 def nodegroups (self):
714 "create nodegroups with PLCAPI"
715 return self.do_nodegroups("add")
716 def delete_nodegroups (self):
717 "delete nodegroups with PLCAPI"
718 return self.do_nodegroups("delete")
722 def translate_timestamp (start,grain,timestamp):
723 if timestamp < TestPlc.YEAR: return start+timestamp*grain
724 else: return timestamp
727 def timestamp_printable (timestamp):
728 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
731 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
733 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
734 print 'API answered grain=',grain
735 start=(now/grain)*grain
737 # find out all nodes that are reservable
738 nodes=self.all_reservable_nodenames()
740 utils.header ("No reservable node found - proceeding without leases")
743 # attach them to the leases as specified in plc_specs
744 # this is where the 'leases' field gets interpreted as relative of absolute
745 for lease_spec in self.plc_spec['leases']:
746 # skip the ones that come with a null slice id
747 if not lease_spec['slice']: continue
748 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
749 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
750 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
751 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
752 if lease_addition['errors']:
753 utils.header("Cannot create leases, %s"%lease_addition['errors'])
756 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
757 (nodes,lease_spec['slice'],
758 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
759 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
763 def delete_leases (self):
764 "remove all leases in the myplc side"
765 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
766 utils.header("Cleaning leases %r"%lease_ids)
767 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
770 def list_leases (self):
771 "list all leases known to the myplc"
772 leases = self.apiserver.GetLeases(self.auth_root())
775 current=l['t_until']>=now
776 if self.options.verbose or current:
777 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
778 TestPlc.timestamp_printable(l['t_from']),
779 TestPlc.timestamp_printable(l['t_until'])))
782 # create nodegroups if needed, and populate
783 def do_nodegroups (self, action="add"):
784 # 1st pass to scan contents
786 for site_spec in self.plc_spec['sites']:
787 test_site = TestSite (self,site_spec)
788 for node_spec in site_spec['nodes']:
789 test_node=TestNode (self,test_site,node_spec)
790 if node_spec.has_key('nodegroups'):
791 nodegroupnames=node_spec['nodegroups']
792 if isinstance(nodegroupnames,StringTypes):
793 nodegroupnames = [ nodegroupnames ]
794 for nodegroupname in nodegroupnames:
795 if not groups_dict.has_key(nodegroupname):
796 groups_dict[nodegroupname]=[]
797 groups_dict[nodegroupname].append(test_node.name())
798 auth=self.auth_root()
800 for (nodegroupname,group_nodes) in groups_dict.iteritems():
802 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
803 # first, check if the nodetagtype is here
804 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
806 tag_type_id = tag_types[0]['tag_type_id']
808 tag_type_id = self.apiserver.AddTagType(auth,
809 {'tagname':nodegroupname,
810 'description': 'for nodegroup %s'%nodegroupname,
812 print 'located tag (type)',nodegroupname,'as',tag_type_id
814 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
816 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
817 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
818 # set node tag on all nodes, value='yes'
819 for nodename in group_nodes:
821 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
823 traceback.print_exc()
824 print 'node',nodename,'seems to already have tag',nodegroupname
827 expect_yes = self.apiserver.GetNodeTags(auth,
828 {'hostname':nodename,
829 'tagname':nodegroupname},
830 ['value'])[0]['value']
831 if expect_yes != "yes":
832 print 'Mismatch node tag on node',nodename,'got',expect_yes
835 if not self.options.dry_run:
836 print 'Cannot find tag',nodegroupname,'on node',nodename
840 print 'cleaning nodegroup',nodegroupname
841 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
843 traceback.print_exc()
847 # return a list of tuples (nodename,qemuname)
848 def all_node_infos (self) :
850 for site_spec in self.plc_spec['sites']:
851 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
852 for node_spec in site_spec['nodes'] ]
855 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
856 def all_reservable_nodenames (self):
858 for site_spec in self.plc_spec['sites']:
859 for node_spec in site_spec['nodes']:
860 node_fields=node_spec['node_fields']
861 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
862 res.append(node_fields['hostname'])
865 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
866 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
867 if self.options.dry_run:
871 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
872 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
873 # the nodes that haven't checked yet - start with a full list and shrink over time
874 tocheck = self.all_hostnames()
875 utils.header("checking nodes %r"%tocheck)
876 # create a dict hostname -> status
877 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
880 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
882 for array in tocheck_status:
883 hostname=array['hostname']
884 boot_state=array['boot_state']
885 if boot_state == target_boot_state:
886 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
888 # if it's a real node, never mind
889 (site_spec,node_spec)=self.locate_hostname(hostname)
890 if TestNode.is_real_model(node_spec['node_fields']['model']):
891 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
893 boot_state = target_boot_state
894 elif datetime.datetime.now() > graceout:
895 utils.header ("%s still in '%s' state"%(hostname,boot_state))
896 graceout=datetime.datetime.now()+datetime.timedelta(1)
897 status[hostname] = boot_state
899 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
902 if datetime.datetime.now() > timeout:
903 for hostname in tocheck:
904 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
906 # otherwise, sleep for a while
908 # only useful in empty plcs
911 def nodes_booted(self):
912 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
914 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
916 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
917 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
918 vservername=self.vservername
921 local_key = "keys/%(vservername)s-debug.rsa"%locals()
924 local_key = "keys/key1.rsa"
925 node_infos = self.all_node_infos()
926 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
927 for (nodename,qemuname) in node_infos:
928 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
929 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
930 (timeout_minutes,silent_minutes,period))
932 for node_info in node_infos:
933 (hostname,qemuname) = node_info
934 # try to run 'hostname' in the node
935 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
936 # don't spam logs - show the command only after the grace period
937 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
939 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
941 node_infos.remove(node_info)
943 # we will have tried real nodes once, in case they're up - but if not, just skip
944 (site_spec,node_spec)=self.locate_hostname(hostname)
945 if TestNode.is_real_model(node_spec['node_fields']['model']):
946 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
947 node_infos.remove(node_info)
950 if datetime.datetime.now() > timeout:
951 for (hostname,qemuname) in node_infos:
952 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
954 # otherwise, sleep for a while
956 # only useful in empty plcs
959 def ssh_node_debug(self):
960 "Tries to ssh into nodes in debug mode with the debug ssh key"
961 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
963 def ssh_node_boot(self):
964 "Tries to ssh into nodes in production mode with the root ssh key"
965 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
968 def qemu_local_init (self):
969 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
973 "all nodes: invoke GetBootMedium and store result locally"
976 def qemu_local_config (self):
977 "all nodes: compute qemu config qemu.conf and store it locally"
980 def nodestate_reinstall (self):
981 "all nodes: mark PLCAPI boot_state as reinstall"
984 def nodestate_safeboot (self):
985 "all nodes: mark PLCAPI boot_state as safeboot"
988 def nodestate_boot (self):
989 "all nodes: mark PLCAPI boot_state as boot"
992 def nodestate_show (self):
993 "all nodes: show PLCAPI boot_state"
996 def qemu_export (self):
997 "all nodes: push local node-dep directory on the qemu box"
1000 ### check hooks : invoke scripts from hooks/{node,slice}
1001 def check_hooks_node (self):
1002 return self.locate_first_node().check_hooks()
1003 def check_hooks_sliver (self) :
1004 return self.locate_first_sliver().check_hooks()
1006 def check_hooks (self):
1007 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1008 return self.check_hooks_node() and self.check_hooks_sliver()
1011 def do_check_initscripts(self):
1013 for slice_spec in self.plc_spec['slices']:
1014 if not slice_spec.has_key('initscriptstamp'):
1016 stamp=slice_spec['initscriptstamp']
1017 for nodename in slice_spec['nodenames']:
1018 (site,node) = self.locate_node (nodename)
1019 # xxx - passing the wrong site - probably harmless
1020 test_site = TestSite (self,site)
1021 test_slice = TestSlice (self,test_site,slice_spec)
1022 test_node = TestNode (self,test_site,node)
1023 test_sliver = TestSliver (self, test_node, test_slice)
1024 if not test_sliver.check_initscript_stamp(stamp):
1028 def check_initscripts(self):
1029 "check that the initscripts have triggered"
1030 return self.do_check_initscripts()
1032 def initscripts (self):
1033 "create initscripts with PLCAPI"
1034 for initscript in self.plc_spec['initscripts']:
1035 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1036 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1039 def delete_initscripts (self):
1040 "delete initscripts with PLCAPI"
1041 for initscript in self.plc_spec['initscripts']:
1042 initscript_name = initscript['initscript_fields']['name']
1043 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1045 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1046 print initscript_name,'deleted'
1048 print 'deletion went wrong - probably did not exist'
1053 "create slices with PLCAPI"
1054 return self.do_slices()
1056 def delete_slices (self):
1057 "delete slices with PLCAPI"
1058 return self.do_slices("delete")
1060 def do_slices (self, action="add"):
1061 for slice in self.plc_spec['slices']:
1062 site_spec = self.locate_site (slice['sitename'])
1063 test_site = TestSite(self,site_spec)
1064 test_slice=TestSlice(self,test_site,slice)
1066 utils.header("Deleting slices in site %s"%test_site.name())
1067 test_slice.delete_slice()
1069 utils.pprint("Creating slice",slice)
1070 test_slice.create_slice()
1071 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1075 def ssh_slice(self):
1076 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1080 def keys_clear_known_hosts (self):
1081 "remove test nodes entries from the local known_hosts file"
1085 def qemu_start (self) :
1086 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1090 def timestamp_qemu (self) :
1091 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1094 def check_tcp (self):
1095 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1096 specs = self.plc_spec['tcp_test']
1101 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1102 if not s_test_sliver.run_tcp_server(port,timeout=10):
1106 # idem for the client side
1107 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1108 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1112 def plcsh_stress_test (self):
1113 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1114 # install the stress-test in the plc image
1115 location = "/usr/share/plc_api/plcsh_stress_test.py"
1116 remote="/vservers/%s/%s"%(self.vservername,location)
1117 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1119 command += " -- --check"
1120 if self.options.size == 1:
1121 command += " --tiny"
1122 return ( self.run_in_guest(command) == 0)
1124 # populate runs the same utility without slightly different options
1125 # in particular runs with --preserve (dont cleanup) and without --check
1126 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1129 def sfa_install(self):
1131 return self.yum_install ("sfa")
1134 def sfa_plc_install(self):
1135 "yum install sfa-plc"
1136 return self.yum_install("sfa-plc")
1139 def sfa_client_install(self):
1140 "yum install sfa-client"
1141 return self.yum_install("sfa-client")
1144 def sfa_tables_install(self):
1145 "yum install sfa-client"
1146 return self.yum_install ("sfa-sfatables")
1148 def sfa_dbclean(self):
1149 "thoroughly wipes off the SFA database"
1150 self.run_in_guest("sfa-nuke-plc.py")==0
1153 def sfa_plcclean(self):
1154 "cleans the PLC entries that were created as a side effect of running the script"
1156 sfa_spec=self.plc_spec['sfa']
1158 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1159 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1160 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1161 except: print "Slice %s already absent from PLC db"%slicename
1163 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1164 try: self.apiserver.DeletePerson(self.auth_root(),username)
1165 except: print "User %s already absent from PLC db"%username
1167 print "REMEMBER TO RUN sfa_import AGAIN"
1170 def sfa_uninstall(self):
1171 "uses rpm to uninstall sfa - ignore result"
1172 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1173 self.run_in_guest("rm -rf /var/lib/sfa")
1174 self.run_in_guest("rm -rf /etc/sfa")
1175 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1177 self.run_in_guest("rpm -e --noscripts sfa-plc")
1180 ### run unit tests for SFA
1181 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1182 # Running Transaction
1183 # Transaction couldn't start:
1184 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1185 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1186 # no matter how many Gbs are available on the testplc
1187 # could not figure out what's wrong, so...
1188 # if the yum install phase fails, consider the test is successful
1189 # other combinations will eventually run it hopefully
1190 def sfa_utest(self):
1191 "yum install sfa-tests and run SFA unittests"
1192 self.run_in_guest("yum -y install sfa-tests")
1193 # failed to install - forget it
1194 if self.run_in_guest("rpm -q sfa-tests")!=0:
1195 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1197 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1201 dirname="conf.%s"%self.plc_spec['name']
1202 if not os.path.isdir(dirname):
1203 utils.system("mkdir -p %s"%dirname)
1204 if not os.path.isdir(dirname):
1205 raise "Cannot create config dir for plc %s"%self.name()
1208 def conffile(self,filename):
1209 return "%s/%s"%(self.confdir(),filename)
1210 def confsubdir(self,dirname,clean,dry_run=False):
1211 subdirname="%s/%s"%(self.confdir(),dirname)
1213 utils.system("rm -rf %s"%subdirname)
1214 if not os.path.isdir(subdirname):
1215 utils.system("mkdir -p %s"%subdirname)
1216 if not dry_run and not os.path.isdir(subdirname):
1217 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1220 def conffile_clean (self,filename):
1221 filename=self.conffile(filename)
1222 return utils.system("rm -rf %s"%filename)==0
1225 def sfa_configure(self):
1226 "run sfa-config-tty"
1227 tmpname=self.conffile("sfa-config-tty")
1228 fileconf=open(tmpname,'w')
1229 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1230 'SFA_INTERFACE_HRN',
1231 # 'SFA_REGISTRY_LEVEL1_AUTH',
1232 'SFA_REGISTRY_HOST',
1233 'SFA_AGGREGATE_HOST',
1239 'SFA_PLC_DB_PASSWORD',
1242 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1243 # the way plc_config handles booleans just sucks..
1244 for var in ['SFA_API_DEBUG']:
1246 if self.plc_spec['sfa'][var]: val='true'
1247 fileconf.write ('e %s\n%s\n'%(var,val))
1248 fileconf.write('w\n')
1249 fileconf.write('R\n')
1250 fileconf.write('q\n')
1252 utils.system('cat %s'%tmpname)
1253 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1256 def aggregate_xml_line(self):
1257 port=self.plc_spec['sfa']['neighbours-port']
1258 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1259 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1261 def registry_xml_line(self):
1262 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1263 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1266 # a cross step that takes all other plcs in argument
1267 def cross_sfa_configure(self, other_plcs):
1268 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1269 # of course with a single plc, other_plcs is an empty list
1272 agg_fname=self.conffile("agg.xml")
1273 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1274 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1275 utils.header ("(Over)wrote %s"%agg_fname)
1276 reg_fname=self.conffile("reg.xml")
1277 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1278 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1279 utils.header ("(Over)wrote %s"%reg_fname)
1280 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1281 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1283 def sfa_import(self):
1285 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1286 return self.run_in_guest('sfa-import-plc.py')==0
1287 # not needed anymore
1288 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1290 def sfa_start(self):
1292 return self.run_in_guest('service sfa start')==0
1294 def sfi_configure(self):
1295 "Create /root/sfi on the plc side for sfi client configuration"
1296 if self.options.dry_run:
1297 utils.header("DRY RUN - skipping step")
1299 sfa_spec=self.plc_spec['sfa']
1300 # cannot use sfa_slice_mapper to pass dir_name
1301 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1302 site_spec = self.locate_site (slice_spec['sitename'])
1303 test_site = TestSite(self,site_spec)
1304 test_slice=TestSliceSfa(self,test_site,slice_spec)
1305 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1306 test_slice.sfi_config(dir_name)
1307 # push into the remote /root/sfi area
1308 location = test_slice.sfi_path()
1309 remote="/vservers/%s/%s"%(self.vservername,location)
1310 self.test_ssh.mkdir(remote,abs=True)
1311 # need to strip last level or remote otherwise we get an extra dir level
1312 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1316 def sfi_clean (self):
1317 "clean up /root/sfi on the plc side"
1318 self.run_in_guest("rm -rf /root/sfi")
1322 def sfa_add_user(self):
1327 def sfa_update_user(self):
1331 def sfa_add_slice(self):
1332 "run sfi.py add (on Registry) from slice.xml"
1336 def sfa_discover(self):
1337 "discover resources into resouces_in.rspec"
1341 def sfa_create_slice(self):
1342 "run sfi.py create (on SM) - 1st time"
1346 def sfa_check_slice_plc(self):
1347 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1351 def sfa_update_slice(self):
1352 "run sfi.py create (on SM) on existing object"
1357 "various registry-related calls"
1361 def ssh_slice_sfa(self):
1362 "tries to ssh-enter the SFA slice"
1366 def sfa_delete_user(self):
1371 def sfa_delete_slice(self):
1372 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1377 self.run_in_guest('service sfa stop')==0
1380 def populate (self):
1381 "creates random entries in the PLCAPI"
1382 # install the stress-test in the plc image
1383 location = "/usr/share/plc_api/plcsh_stress_test.py"
1384 remote="/vservers/%s/%s"%(self.vservername,location)
1385 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1387 command += " -- --preserve --short-names"
1388 local = (self.run_in_guest(command) == 0);
1389 # second run with --foreign
1390 command += ' --foreign'
1391 remote = (self.run_in_guest(command) == 0);
1392 return ( local and remote)
1394 def gather_logs (self):
1395 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1396 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1397 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1398 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1399 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1400 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1402 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1403 self.gather_var_logs ()
1405 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1406 self.gather_pgsql_logs ()
1408 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1409 for site_spec in self.plc_spec['sites']:
1410 test_site = TestSite (self,site_spec)
1411 for node_spec in site_spec['nodes']:
1412 test_node=TestNode(self,test_site,node_spec)
1413 test_node.gather_qemu_logs()
1415 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1416 self.gather_nodes_var_logs()
1418 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1419 self.gather_slivers_var_logs()
1422 def gather_slivers_var_logs(self):
1423 for test_sliver in self.all_sliver_objs():
1424 remote = test_sliver.tar_var_logs()
1425 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1426 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1427 utils.system(command)
1430 def gather_var_logs (self):
1431 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1432 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1433 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1434 utils.system(command)
1435 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1436 utils.system(command)
1438 def gather_pgsql_logs (self):
1439 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1440 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1441 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1442 utils.system(command)
1444 def gather_nodes_var_logs (self):
1445 for site_spec in self.plc_spec['sites']:
1446 test_site = TestSite (self,site_spec)
1447 for node_spec in site_spec['nodes']:
1448 test_node=TestNode(self,test_site,node_spec)
1449 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1450 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1451 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1452 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1453 utils.system(command)
1456 # returns the filename to use for sql dump/restore, using options.dbname if set
1457 def dbfile (self, database):
1458 # uses options.dbname if it is found
1460 name=self.options.dbname
1461 if not isinstance(name,StringTypes):
1464 t=datetime.datetime.now()
1467 return "/root/%s-%s.sql"%(database,name)
1469 def plc_db_dump(self):
1470 'dump the planetlab5 DB in /root in the PLC - filename has time'
1471 dump=self.dbfile("planetab5")
1472 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1473 utils.header('Dumped planetlab5 database in %s'%dump)
1476 def plc_db_restore(self):
1477 'restore the planetlab5 DB - looks broken, but run -n might help'
1478 dump=self.dbfile("planetab5")
1479 ##stop httpd service
1480 self.run_in_guest('service httpd stop')
1481 # xxx - need another wrapper
1482 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1483 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1484 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1485 ##starting httpd service
1486 self.run_in_guest('service httpd start')
1488 utils.header('Database restored from ' + dump)
1490 def standby_1_through_20(self):
1491 """convenience function to wait for a specified number of minutes"""
1494 def standby_1(): pass
1496 def standby_2(): pass
1498 def standby_3(): pass
1500 def standby_4(): pass
1502 def standby_5(): pass
1504 def standby_6(): pass
1506 def standby_7(): pass
1508 def standby_8(): pass
1510 def standby_9(): pass
1512 def standby_10(): pass
1514 def standby_11(): pass
1516 def standby_12(): pass
1518 def standby_13(): pass
1520 def standby_14(): pass
1522 def standby_15(): pass
1524 def standby_16(): pass
1526 def standby_17(): pass
1528 def standby_18(): pass
1530 def standby_19(): pass
1532 def standby_20(): pass