1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'show', 'local_pre', SEP,
90 'vs_delete','vs_create','plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', 'qemu_export', 'qemu_kill_all', 'qemu_start', SEP,
94 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
95 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', 'sfa_create_slice@1', SEPSFA,
96 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', SEPSFA,
97 'sfa_utest_install@1','sfa_utest_run@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
101 'ssh_node_boot', 'ssh_slice', 'check_initscripts', SEP,
102 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
103 'check_tcp', 'check_hooks@1', SEP,
104 'force_gather_logs', 'force_local_post', SEP,
107 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
108 'plc_stop', 'vs_start', SEP,
109 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
110 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
111 'delete_leases', 'list_leases', SEP,
113 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
114 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_mine', SEP,
115 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
116 'plc_db_dump' , 'plc_db_restore', SEP,
117 'standby_1 through 20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this is originally for centos5 as recent SFAs won't build on this platformb
131 def check_whether_build_has_sfa (rpms_url):
132 # warning, we're now building 'sface' so let's be a bit more picky
133 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
134 # full builds are expected to return with 0 here
136 # move all steps containing 'sfa' from default_steps to other_steps
137 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
138 TestPlc.other_steps += sfa_steps
139 for step in sfa_steps: TestPlc.default_steps.remove(step)
141 def __init__ (self,plc_spec,options):
142 self.plc_spec=plc_spec
144 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
146 self.vserverip=plc_spec['vserverip']
147 self.vservername=plc_spec['vservername']
148 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
151 raise Exception,'chroot-based myplc testing is deprecated'
152 self.apiserver=TestApiserver(self.url,options.dry_run)
155 name=self.plc_spec['name']
156 return "%s.%s"%(name,self.vservername)
159 return self.plc_spec['hostname']
162 return self.test_ssh.is_local()
164 # define the API methods on this object through xmlrpc
165 # would help, but not strictly necessary
169 def actual_command_in_guest (self,command):
170 return self.test_ssh.actual_command(self.host_to_guest(command))
172 def start_guest (self):
173 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
175 def run_in_guest (self,command):
176 return utils.system(self.actual_command_in_guest(command))
178 def run_in_host (self,command):
179 return self.test_ssh.run_in_buildname(command)
181 #command gets run in the vserver
182 def host_to_guest(self,command):
183 return "vserver %s exec %s"%(self.vservername,command)
185 #command gets run in the vserver
186 def start_guest_in_host(self):
187 return "vserver %s start"%(self.vservername)
190 def run_in_guest_piped (self,local,remote):
191 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
193 def auth_root (self):
194 return {'Username':self.plc_spec['PLC_ROOT_USER'],
195 'AuthMethod':'password',
196 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
197 'Role' : self.plc_spec['role']
199 def locate_site (self,sitename):
200 for site in self.plc_spec['sites']:
201 if site['site_fields']['name'] == sitename:
203 if site['site_fields']['login_base'] == sitename:
205 raise Exception,"Cannot locate site %s"%sitename
207 def locate_node (self,nodename):
208 for site in self.plc_spec['sites']:
209 for node in site['nodes']:
210 if node['name'] == nodename:
212 raise Exception,"Cannot locate node %s"%nodename
214 def locate_hostname (self,hostname):
215 for site in self.plc_spec['sites']:
216 for node in site['nodes']:
217 if node['node_fields']['hostname'] == hostname:
219 raise Exception,"Cannot locate hostname %s"%hostname
221 def locate_key (self,keyname):
222 for key in self.plc_spec['keys']:
223 if key['name'] == keyname:
225 raise Exception,"Cannot locate key %s"%keyname
227 def locate_slice (self, slicename):
228 for slice in self.plc_spec['slices']:
229 if slice['slice_fields']['name'] == slicename:
231 raise Exception,"Cannot locate slice %s"%slicename
233 def all_sliver_objs (self):
235 for slice_spec in self.plc_spec['slices']:
236 slicename = slice_spec['slice_fields']['name']
237 for nodename in slice_spec['nodenames']:
238 result.append(self.locate_sliver_obj (nodename,slicename))
241 def locate_sliver_obj (self,nodename,slicename):
242 (site,node) = self.locate_node(nodename)
243 slice = self.locate_slice (slicename)
245 test_site = TestSite (self, site)
246 test_node = TestNode (self, test_site,node)
247 # xxx the slice site is assumed to be the node site - mhh - probably harmless
248 test_slice = TestSlice (self, test_site, slice)
249 return TestSliver (self, test_node, test_slice)
251 def locate_first_node(self):
252 nodename=self.plc_spec['slices'][0]['nodenames'][0]
253 (site,node) = self.locate_node(nodename)
254 test_site = TestSite (self, site)
255 test_node = TestNode (self, test_site,node)
258 def locate_first_sliver (self):
259 slice_spec=self.plc_spec['slices'][0]
260 slicename=slice_spec['slice_fields']['name']
261 nodename=slice_spec['nodenames'][0]
262 return self.locate_sliver_obj(nodename,slicename)
264 # all different hostboxes used in this plc
265 def gather_hostBoxes(self):
266 # maps on sites and nodes, return [ (host_box,test_node) ]
268 for site_spec in self.plc_spec['sites']:
269 test_site = TestSite (self,site_spec)
270 for node_spec in site_spec['nodes']:
271 test_node = TestNode (self, test_site, node_spec)
272 if not test_node.is_real():
273 tuples.append( (test_node.host_box(),test_node) )
274 # transform into a dict { 'host_box' -> [ test_node .. ] }
276 for (box,node) in tuples:
277 if not result.has_key(box):
280 result[box].append(node)
283 # a step for checking this stuff
284 def show_boxes (self):
285 'print summary of nodes location'
286 for (box,nodes) in self.gather_hostBoxes().iteritems():
287 print box,":"," + ".join( [ node.name() for node in nodes ] )
290 # make this a valid step
291 def qemu_kill_all(self):
292 'kill all qemu instances on the qemu boxes involved by this setup'
293 # this is the brute force version, kill all qemus on that host box
294 for (box,nodes) in self.gather_hostBoxes().iteritems():
295 # pass the first nodename, as we don't push template-qemu on testboxes
296 nodedir=nodes[0].nodedir()
297 TestBox(box,self.options.buildname).qemu_kill_all(nodedir)
300 # make this a valid step
301 def qemu_list_all(self):
302 'list all qemu instances on the qemu boxes involved by this setup'
303 for (box,nodes) in self.gather_hostBoxes().iteritems():
304 # this is the brute force version, kill all qemus on that host box
305 TestBox(box,self.options.buildname).qemu_list_all()
308 # kill only the right qemus
309 def qemu_list_mine(self):
310 'list qemu instances for our nodes'
311 for (box,nodes) in self.gather_hostBoxes().iteritems():
312 # the fine-grain version
317 # kill only the right qemus
318 def qemu_kill_mine(self):
319 'kill the qemu instances for our nodes'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 # the fine-grain version
326 #################### display config
328 "show test configuration after localization"
329 self.display_pass (1)
330 self.display_pass (2)
334 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
335 def display_pass (self,passno):
336 for (key,val) in self.plc_spec.iteritems():
337 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
341 self.display_site_spec(site)
342 for node in site['nodes']:
343 self.display_node_spec(node)
344 elif key=='initscripts':
345 for initscript in val:
346 self.display_initscript_spec (initscript)
349 self.display_slice_spec (slice)
352 self.display_key_spec (key)
354 if key not in ['sites','initscripts','slices','keys', 'sfa']:
355 print '+ ',key,':',val
357 def display_site_spec (self,site):
358 print '+ ======== site',site['site_fields']['name']
359 for (k,v) in site.iteritems():
360 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
363 print '+ ','nodes : ',
365 print node['node_fields']['hostname'],'',
371 print user['name'],'',
373 elif k == 'site_fields':
374 print '+ login_base',':',v['login_base']
375 elif k == 'address_fields':
381 def display_initscript_spec (self,initscript):
382 print '+ ======== initscript',initscript['initscript_fields']['name']
384 def display_key_spec (self,key):
385 print '+ ======== key',key['name']
387 def display_slice_spec (self,slice):
388 print '+ ======== slice',slice['slice_fields']['name']
389 for (k,v) in slice.iteritems():
402 elif k=='slice_fields':
403 print '+ fields',':',
404 print 'max_nodes=',v['max_nodes'],
409 def display_node_spec (self,node):
410 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
411 print "hostname=",node['node_fields']['hostname'],
412 print "ip=",node['interface_fields']['ip']
413 if self.options.verbose:
414 utils.pprint("node details",node,depth=3)
416 # another entry point for just showing the boxes involved
417 def display_mapping (self):
418 TestPlc.display_mapping_plc(self.plc_spec)
422 def display_mapping_plc (plc_spec):
423 print '+ MyPLC',plc_spec['name']
424 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
425 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
426 for site_spec in plc_spec['sites']:
427 for node_spec in site_spec['nodes']:
428 TestPlc.display_mapping_node(node_spec)
431 def display_mapping_node (node_spec):
432 print '+ NODE %s'%(node_spec['name'])
433 print '+\tqemu box %s'%node_spec['host_box']
434 print '+\thostname=%s'%node_spec['node_fields']['hostname']
436 def local_pre (self):
437 "run site-dependant pre-test script as defined in LocalTestResources"
438 from LocalTestResources import local_resources
439 return local_resources.step_pre(self)
441 def local_post (self):
442 "run site-dependant post-test script as defined in LocalTestResources"
443 from LocalTestResources import local_resources
444 return local_resources.step_post(self)
446 def local_list (self):
447 "run site-dependant list script as defined in LocalTestResources"
448 from LocalTestResources import local_resources
449 return local_resources.step_list(self)
451 def local_rel (self):
452 "run site-dependant release script as defined in LocalTestResources"
453 from LocalTestResources import local_resources
454 return local_resources.step_release(self)
456 def local_rel_plc (self):
457 "run site-dependant release script as defined in LocalTestResources"
458 from LocalTestResources import local_resources
459 return local_resources.step_release_plc(self)
461 def local_rel_qemu (self):
462 "run site-dependant release script as defined in LocalTestResources"
463 from LocalTestResources import local_resources
464 return local_resources.step_release_qemu(self)
467 "vserver delete the test myplc"
468 self.run_in_host("vserver --silent %s delete"%self.vservername)
472 # historically the build was being fetched by the tests
473 # now the build pushes itself as a subdir of the tests workdir
474 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
475 def vs_create (self):
476 "vserver creation (no install done)"
477 # push the local build/ dir to the testplc box
479 # a full path for the local calls
480 build_dir=os.path.dirname(sys.argv[0])
481 # sometimes this is empty - set to "." in such a case
482 if not build_dir: build_dir="."
483 build_dir += "/build"
485 # use a standard name - will be relative to remote buildname
487 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
488 self.test_ssh.rmdir(build_dir)
489 self.test_ssh.copy(build_dir,recursive=True)
490 # the repo url is taken from arch-rpms-url
491 # with the last step (i386) removed
492 repo_url = self.options.arch_rpms_url
493 for level in [ 'arch' ]:
494 repo_url = os.path.dirname(repo_url)
495 # pass the vbuild-nightly options to vtest-init-vserver
497 test_env_options += " -p %s"%self.options.personality
498 test_env_options += " -d %s"%self.options.pldistro
499 test_env_options += " -f %s"%self.options.fcdistro
500 script="vtest-init-vserver.sh"
501 vserver_name = self.vservername
502 vserver_options="--netdev eth0 --interface %s"%self.vserverip
504 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
505 vserver_options += " --hostname %s"%vserver_hostname
507 print "Cannot reverse lookup %s"%self.vserverip
508 print "This is considered fatal, as this might pollute the test results"
510 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
511 return self.run_in_host(create_vserver) == 0
514 def plc_install(self):
515 "yum install myplc, noderepo, and the plain bootstrapfs"
517 # workaround for getting pgsql8.2 on centos5
518 if self.options.fcdistro == "centos5":
519 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
522 if self.options.personality == "linux32":
524 elif self.options.personality == "linux64":
527 raise Exception, "Unsupported personality %r"%self.options.personality
528 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
531 pkgs_list.append ("slicerepo-%s"%nodefamily)
532 pkgs_list.append ("myplc")
533 pkgs_list.append ("noderepo-%s"%nodefamily)
534 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
535 pkgs_string=" ".join(pkgs_list)
536 self.run_in_guest("yum -y install %s"%pkgs_string)
537 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
540 def plc_configure(self):
542 tmpname='%s.plc-config-tty'%(self.name())
543 fileconf=open(tmpname,'w')
544 for var in [ 'PLC_NAME',
549 'PLC_MAIL_SUPPORT_ADDRESS',
552 # Above line was added for integrating SFA Testing
558 'PLC_RESERVATION_GRANULARITY',
561 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
562 fileconf.write('w\n')
563 fileconf.write('q\n')
565 utils.system('cat %s'%tmpname)
566 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
567 utils.system('rm %s'%tmpname)
572 self.run_in_guest('service plc start')
577 self.run_in_guest('service plc stop')
581 "start the PLC vserver"
585 # stores the keys from the config for further use
586 def keys_store(self):
587 "stores test users ssh keys in keys/"
588 for key_spec in self.plc_spec['keys']:
589 TestKey(self,key_spec).store_key()
592 def keys_clean(self):
593 "removes keys cached in keys/"
594 utils.system("rm -rf ./keys")
597 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
598 # for later direct access to the nodes
599 def keys_fetch(self):
600 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
602 if not os.path.isdir(dir):
604 vservername=self.vservername
606 prefix = 'debug_ssh_key'
607 for ext in [ 'pub', 'rsa' ] :
608 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
609 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
610 if self.test_ssh.fetch(src,dst) != 0: overall=False
614 "create sites with PLCAPI"
615 return self.do_sites()
617 def delete_sites (self):
618 "delete sites with PLCAPI"
619 return self.do_sites(action="delete")
621 def do_sites (self,action="add"):
622 for site_spec in self.plc_spec['sites']:
623 test_site = TestSite (self,site_spec)
624 if (action != "add"):
625 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
626 test_site.delete_site()
627 # deleted with the site
628 #test_site.delete_users()
631 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
632 test_site.create_site()
633 test_site.create_users()
636 def delete_all_sites (self):
637 "Delete all sites in PLC, and related objects"
638 print 'auth_root',self.auth_root()
639 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
640 for site_id in site_ids:
641 print 'Deleting site_id',site_id
642 self.apiserver.DeleteSite(self.auth_root(),site_id)
646 "create nodes with PLCAPI"
647 return self.do_nodes()
648 def delete_nodes (self):
649 "delete nodes with PLCAPI"
650 return self.do_nodes(action="delete")
652 def do_nodes (self,action="add"):
653 for site_spec in self.plc_spec['sites']:
654 test_site = TestSite (self,site_spec)
656 utils.header("Deleting nodes in site %s"%test_site.name())
657 for node_spec in site_spec['nodes']:
658 test_node=TestNode(self,test_site,node_spec)
659 utils.header("Deleting %s"%test_node.name())
660 test_node.delete_node()
662 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
663 for node_spec in site_spec['nodes']:
664 utils.pprint('Creating node %s'%node_spec,node_spec)
665 test_node = TestNode (self,test_site,node_spec)
666 test_node.create_node ()
669 def nodegroups (self):
670 "create nodegroups with PLCAPI"
671 return self.do_nodegroups("add")
672 def delete_nodegroups (self):
673 "delete nodegroups with PLCAPI"
674 return self.do_nodegroups("delete")
678 def translate_timestamp (start,grain,timestamp):
679 if timestamp < TestPlc.YEAR: return start+timestamp*grain
680 else: return timestamp
683 def timestamp_printable (timestamp):
684 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
687 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
689 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
690 print 'API answered grain=',grain
691 start=(now/grain)*grain
693 # find out all nodes that are reservable
694 nodes=self.all_reservable_nodenames()
696 utils.header ("No reservable node found - proceeding without leases")
699 # attach them to the leases as specified in plc_specs
700 # this is where the 'leases' field gets interpreted as relative of absolute
701 for lease_spec in self.plc_spec['leases']:
702 # skip the ones that come with a null slice id
703 if not lease_spec['slice']: continue
704 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
705 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
706 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
707 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
708 if lease_addition['errors']:
709 utils.header("Cannot create leases, %s"%lease_addition['errors'])
712 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
713 (nodes,lease_spec['slice'],
714 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
715 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
719 def delete_leases (self):
720 "remove all leases in the myplc side"
721 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
722 utils.header("Cleaning leases %r"%lease_ids)
723 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
726 def list_leases (self):
727 "list all leases known to the myplc"
728 leases = self.apiserver.GetLeases(self.auth_root())
731 current=l['t_until']>=now
732 if self.options.verbose or current:
733 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
734 TestPlc.timestamp_printable(l['t_from']),
735 TestPlc.timestamp_printable(l['t_until'])))
738 # create nodegroups if needed, and populate
739 def do_nodegroups (self, action="add"):
740 # 1st pass to scan contents
742 for site_spec in self.plc_spec['sites']:
743 test_site = TestSite (self,site_spec)
744 for node_spec in site_spec['nodes']:
745 test_node=TestNode (self,test_site,node_spec)
746 if node_spec.has_key('nodegroups'):
747 nodegroupnames=node_spec['nodegroups']
748 if isinstance(nodegroupnames,StringTypes):
749 nodegroupnames = [ nodegroupnames ]
750 for nodegroupname in nodegroupnames:
751 if not groups_dict.has_key(nodegroupname):
752 groups_dict[nodegroupname]=[]
753 groups_dict[nodegroupname].append(test_node.name())
754 auth=self.auth_root()
756 for (nodegroupname,group_nodes) in groups_dict.iteritems():
758 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
759 # first, check if the nodetagtype is here
760 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
762 tag_type_id = tag_types[0]['tag_type_id']
764 tag_type_id = self.apiserver.AddTagType(auth,
765 {'tagname':nodegroupname,
766 'description': 'for nodegroup %s'%nodegroupname,
768 print 'located tag (type)',nodegroupname,'as',tag_type_id
770 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
772 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
773 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
774 # set node tag on all nodes, value='yes'
775 for nodename in group_nodes:
777 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
779 traceback.print_exc()
780 print 'node',nodename,'seems to already have tag',nodegroupname
783 expect_yes = self.apiserver.GetNodeTags(auth,
784 {'hostname':nodename,
785 'tagname':nodegroupname},
786 ['value'])[0]['value']
787 if expect_yes != "yes":
788 print 'Mismatch node tag on node',nodename,'got',expect_yes
791 if not self.options.dry_run:
792 print 'Cannot find tag',nodegroupname,'on node',nodename
796 print 'cleaning nodegroup',nodegroupname
797 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
799 traceback.print_exc()
803 # return a list of tuples (nodename,qemuname)
804 def all_node_infos (self) :
806 for site_spec in self.plc_spec['sites']:
807 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
808 for node_spec in site_spec['nodes'] ]
811 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
812 def all_reservable_nodenames (self):
814 for site_spec in self.plc_spec['sites']:
815 for node_spec in site_spec['nodes']:
816 node_fields=node_spec['node_fields']
817 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
818 res.append(node_fields['hostname'])
821 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
822 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
823 if self.options.dry_run:
827 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
828 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
829 # the nodes that haven't checked yet - start with a full list and shrink over time
830 tocheck = self.all_hostnames()
831 utils.header("checking nodes %r"%tocheck)
832 # create a dict hostname -> status
833 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
836 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
838 for array in tocheck_status:
839 hostname=array['hostname']
840 boot_state=array['boot_state']
841 if boot_state == target_boot_state:
842 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
844 # if it's a real node, never mind
845 (site_spec,node_spec)=self.locate_hostname(hostname)
846 if TestNode.is_real_model(node_spec['node_fields']['model']):
847 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
849 boot_state = target_boot_state
850 elif datetime.datetime.now() > graceout:
851 utils.header ("%s still in '%s' state"%(hostname,boot_state))
852 graceout=datetime.datetime.now()+datetime.timedelta(1)
853 status[hostname] = boot_state
855 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
858 if datetime.datetime.now() > timeout:
859 for hostname in tocheck:
860 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
862 # otherwise, sleep for a while
864 # only useful in empty plcs
867 def nodes_booted(self):
868 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
870 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
872 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
873 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
874 vservername=self.vservername
877 local_key = "keys/%(vservername)s-debug.rsa"%locals()
880 local_key = "keys/key1.rsa"
881 node_infos = self.all_node_infos()
882 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
883 for (nodename,qemuname) in node_infos:
884 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
885 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
886 (timeout_minutes,silent_minutes,period))
888 for node_info in node_infos:
889 (hostname,qemuname) = node_info
890 # try to run 'hostname' in the node
891 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
892 # don't spam logs - show the command only after the grace period
893 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
895 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
897 node_infos.remove(node_info)
899 # we will have tried real nodes once, in case they're up - but if not, just skip
900 (site_spec,node_spec)=self.locate_hostname(hostname)
901 if TestNode.is_real_model(node_spec['node_fields']['model']):
902 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
903 node_infos.remove(node_info)
906 if datetime.datetime.now() > timeout:
907 for (hostname,qemuname) in node_infos:
908 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
910 # otherwise, sleep for a while
912 # only useful in empty plcs
915 def ssh_node_debug(self):
916 "Tries to ssh into nodes in debug mode with the debug ssh key"
917 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
919 def ssh_node_boot(self):
920 "Tries to ssh into nodes in production mode with the root ssh key"
921 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
924 def qemu_local_init (self):
925 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
929 "all nodes: invoke GetBootMedium and store result locally"
932 def qemu_local_config (self):
933 "all nodes: compute qemu config qemu.conf and store it locally"
936 def nodestate_reinstall (self):
937 "all nodes: mark PLCAPI boot_state as reinstall"
940 def nodestate_safeboot (self):
941 "all nodes: mark PLCAPI boot_state as safeboot"
944 def nodestate_boot (self):
945 "all nodes: mark PLCAPI boot_state as boot"
948 def nodestate_show (self):
949 "all nodes: show PLCAPI boot_state"
952 def qemu_export (self):
953 "all nodes: push local node-dep directory on the qemu box"
956 ### check hooks : invoke scripts from hooks/{node,slice}
957 def check_hooks_node (self):
958 return self.locate_first_node().check_hooks()
959 def check_hooks_sliver (self) :
960 return self.locate_first_sliver().check_hooks()
962 def check_hooks (self):
963 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
964 return self.check_hooks_node() and self.check_hooks_sliver()
967 def do_check_initscripts(self):
969 for slice_spec in self.plc_spec['slices']:
970 if not slice_spec.has_key('initscriptname'):
972 initscript=slice_spec['initscriptname']
973 for nodename in slice_spec['nodenames']:
974 (site,node) = self.locate_node (nodename)
975 # xxx - passing the wrong site - probably harmless
976 test_site = TestSite (self,site)
977 test_slice = TestSlice (self,test_site,slice_spec)
978 test_node = TestNode (self,test_site,node)
979 test_sliver = TestSliver (self, test_node, test_slice)
980 if not test_sliver.check_initscript(initscript):
984 def check_initscripts(self):
985 "check that the initscripts have triggered"
986 return self.do_check_initscripts()
988 def initscripts (self):
989 "create initscripts with PLCAPI"
990 for initscript in self.plc_spec['initscripts']:
991 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
992 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
995 def delete_initscripts (self):
996 "delete initscripts with PLCAPI"
997 for initscript in self.plc_spec['initscripts']:
998 initscript_name = initscript['initscript_fields']['name']
999 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1001 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1002 print initscript_name,'deleted'
1004 print 'deletion went wrong - probably did not exist'
1009 "create slices with PLCAPI"
1010 return self.do_slices()
1012 def delete_slices (self):
1013 "delete slices with PLCAPI"
1014 return self.do_slices("delete")
1016 def do_slices (self, action="add"):
1017 for slice in self.plc_spec['slices']:
1018 site_spec = self.locate_site (slice['sitename'])
1019 test_site = TestSite(self,site_spec)
1020 test_slice=TestSlice(self,test_site,slice)
1022 utils.header("Deleting slices in site %s"%test_site.name())
1023 test_slice.delete_slice()
1025 utils.pprint("Creating slice",slice)
1026 test_slice.create_slice()
1027 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1031 def ssh_slice(self):
1032 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1036 def keys_clear_known_hosts (self):
1037 "remove test nodes entries from the local known_hosts file"
1041 def qemu_start (self) :
1042 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1045 def check_tcp (self):
1046 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1047 specs = self.plc_spec['tcp_test']
1052 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1053 if not s_test_sliver.run_tcp_server(port,timeout=10):
1057 # idem for the client side
1058 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1059 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1063 def plcsh_stress_test (self):
1064 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1065 # install the stress-test in the plc image
1066 location = "/usr/share/plc_api/plcsh_stress_test.py"
1067 remote="/vservers/%s/%s"%(self.vservername,location)
1068 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1070 command += " -- --check"
1071 if self.options.size == 1:
1072 command += " --tiny"
1073 return ( self.run_in_guest(command) == 0)
1075 # populate runs the same utility without slightly different options
1076 # in particular runs with --preserve (dont cleanup) and without --check
1077 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1080 def sfa_install(self):
1081 "yum install sfa, sfa-plc and sfa-client"
1083 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1084 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1087 def sfa_dbclean(self):
1088 "thoroughly wipes off the SFA database"
1089 self.run_in_guest("sfa-nuke-plc.py")==0
1092 def sfa_plcclean(self):
1093 "cleans the PLC entries that were created as a side effect of running the script"
1095 sfa_spec=self.plc_spec['sfa']
1097 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1098 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1099 except: print "Slice %s already absent from PLC db"%slicename
1101 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1102 try: self.apiserver.DeletePerson(self.auth_root(),username)
1103 except: print "User %s already absent from PLC db"%username
1105 print "REMEMBER TO RUN sfa_import AGAIN"
1108 def sfa_uninstall(self):
1109 "uses rpm to uninstall sfa - ignore result"
1110 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1111 self.run_in_guest("rm -rf /var/lib/sfa")
1112 self.run_in_guest("rm -rf /etc/sfa")
1113 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1115 self.run_in_guest("rpm -e --noscripts sfa-plc")
1119 def sfa_utest_install(self):
1120 "yum install sfa-tests"
1122 self.run_in_guest("yum -y install sfa-tests")
1123 return self.run_in_guest("rpm -q sfa-tests")==0
1125 def sfa_utest_run(self):
1127 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1131 dirname="conf.%s"%self.plc_spec['name']
1132 if not os.path.isdir(dirname):
1133 utils.system("mkdir -p %s"%dirname)
1134 if not os.path.isdir(dirname):
1135 raise "Cannot create config dir for plc %s"%self.name()
1138 def conffile(self,filename):
1139 return "%s/%s"%(self.confdir(),filename)
1140 def confsubdir(self,dirname,clean):
1141 subdirname="%s/%s"%(self.confdir(),dirname)
1143 utils.system("rm -rf %s"%subdirname)
1144 if not os.path.isdir(subdirname):
1145 utils.system("mkdir -p %s"%subdirname)
1146 if not os.path.isdir(subdirname):
1147 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1150 def conffile_clean (self,filename):
1151 filename=self.conffile(filename)
1152 return utils.system("rm -rf %s"%filename)==0
1155 def sfa_configure(self):
1156 "run sfa-config-tty"
1157 tmpname=self.conffile("sfa-config-tty")
1158 fileconf=open(tmpname,'w')
1159 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1160 'SFA_INTERFACE_HRN',
1161 # 'SFA_REGISTRY_LEVEL1_AUTH',
1162 'SFA_REGISTRY_HOST',
1163 'SFA_AGGREGATE_HOST',
1169 'SFA_PLC_DB_PASSWORD',
1172 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1173 # the way plc_config handles booleans just sucks..
1174 for var in ['SFA_API_DEBUG']:
1176 if self.plc_spec['sfa'][var]: val='true'
1177 fileconf.write ('e %s\n%s\n'%(var,val))
1178 fileconf.write('w\n')
1179 fileconf.write('R\n')
1180 fileconf.write('q\n')
1182 utils.system('cat %s'%tmpname)
1183 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1186 def aggregate_xml_line(self):
1187 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1188 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1190 def registry_xml_line(self):
1191 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1192 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1195 # a cross step that takes all other plcs in argument
1196 def cross_sfa_configure(self, other_plcs):
1197 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1198 # of course with a single plc, other_plcs is an empty list
1201 agg_fname=self.conffile("agg.xml")
1202 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1203 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1204 utils.header ("(Over)wrote %s"%agg_fname)
1205 reg_fname=self.conffile("reg.xml")
1206 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1207 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1208 utils.header ("(Over)wrote %s"%reg_fname)
1209 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1210 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1212 def sfa_import(self):
1214 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1215 return self.run_in_guest('sfa-import-plc.py')==0
1216 # not needed anymore
1217 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1219 def sfa_start(self):
1221 return self.run_in_guest('service sfa start')==0
1223 def sfi_configure(self):
1224 "Create /root/.sfi on the plc side"
1225 sfa_spec=self.plc_spec['sfa']
1226 "sfi client configuration"
1227 dir_name=self.confsubdir("dot-sfi",clean=True)
1228 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1229 fileconf=open(file_name,'w')
1230 fileconf.write (self.plc_spec['keys'][0]['private'])
1232 utils.header ("(Over)wrote %s"%file_name)
1234 file_name=dir_name + os.sep + 'sfi_config'
1235 fileconf=open(file_name,'w')
1236 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1237 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1238 fileconf.write('\n')
1239 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1240 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1241 fileconf.write('\n')
1242 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1243 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1244 fileconf.write('\n')
1245 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1246 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1247 fileconf.write('\n')
1249 utils.header ("(Over)wrote %s"%file_name)
1251 file_name=dir_name + os.sep + 'person.xml'
1252 fileconf=open(file_name,'w')
1253 for record in sfa_spec['sfa_person_xml']:
1254 person_record=record
1255 fileconf.write(person_record)
1256 fileconf.write('\n')
1258 utils.header ("(Over)wrote %s"%file_name)
1260 file_name=dir_name + os.sep + 'slice.xml'
1261 fileconf=open(file_name,'w')
1262 for record in sfa_spec['sfa_slice_xml']:
1264 #slice_record=sfa_spec['sfa_slice_xml']
1265 fileconf.write(slice_record)
1266 fileconf.write('\n')
1267 utils.header ("(Over)wrote %s"%file_name)
1270 file_name=dir_name + os.sep + 'slice.rspec'
1271 fileconf=open(file_name,'w')
1273 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1275 fileconf.write(slice_rspec)
1276 fileconf.write('\n')
1278 utils.header ("(Over)wrote %s"%file_name)
1280 # push to the remote root's .sfi
1281 location = "root/.sfi"
1282 remote="/vservers/%s/%s"%(self.vservername,location)
1283 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1287 def sfi_clean (self):
1288 "clean up /root/.sfi on the plc side"
1289 self.run_in_guest("rm -rf /root/.sfi")
1292 def sfa_add_user(self):
1293 "run sfi.py add using person.xml"
1294 return TestUserSfa(self).add_user()
1296 def sfa_update_user(self):
1297 "run sfi.py update using person.xml"
1298 return TestUserSfa(self).update_user()
1301 def sfa_add_slice(self):
1302 "run sfi.py add (on Registry) from slice.xml"
1306 def sfa_discover(self):
1307 "discover resources into resouces_in.rspec"
1311 def sfa_create_slice(self):
1312 "run sfi.py create (on SM) - 1st time"
1316 def sfa_update_slice(self):
1317 "run sfi.py create (on SM) on existing object"
1321 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1322 sfa_spec=self.plc_spec['sfa']
1323 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1325 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1326 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1327 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1328 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1331 def ssh_slice_sfa(self):
1332 "tries to ssh-enter the SFA slice"
1335 def sfa_delete_user(self):
1336 "run sfi.py delete (on SM) for user"
1337 test_user_sfa=TestUserSfa(self)
1338 return test_user_sfa.delete_user()
1341 def sfa_delete_slice(self):
1342 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1347 self.run_in_guest('service sfa stop')==0
1350 def populate (self):
1351 "creates random entries in the PLCAPI"
1352 # install the stress-test in the plc image
1353 location = "/usr/share/plc_api/plcsh_stress_test.py"
1354 remote="/vservers/%s/%s"%(self.vservername,location)
1355 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1357 command += " -- --preserve --short-names"
1358 local = (self.run_in_guest(command) == 0);
1359 # second run with --foreign
1360 command += ' --foreign'
1361 remote = (self.run_in_guest(command) == 0);
1362 return ( local and remote)
1364 def gather_logs (self):
1365 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1366 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1367 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1368 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1369 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1370 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1372 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1373 self.gather_var_logs ()
1375 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1376 self.gather_pgsql_logs ()
1378 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1379 for site_spec in self.plc_spec['sites']:
1380 test_site = TestSite (self,site_spec)
1381 for node_spec in site_spec['nodes']:
1382 test_node=TestNode(self,test_site,node_spec)
1383 test_node.gather_qemu_logs()
1385 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1386 self.gather_nodes_var_logs()
1388 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1389 self.gather_slivers_var_logs()
1392 def gather_slivers_var_logs(self):
1393 for test_sliver in self.all_sliver_objs():
1394 remote = test_sliver.tar_var_logs()
1395 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1396 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1397 utils.system(command)
1400 def gather_var_logs (self):
1401 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1402 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1403 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1404 utils.system(command)
1405 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1406 utils.system(command)
1408 def gather_pgsql_logs (self):
1409 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1410 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1411 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1412 utils.system(command)
1414 def gather_nodes_var_logs (self):
1415 for site_spec in self.plc_spec['sites']:
1416 test_site = TestSite (self,site_spec)
1417 for node_spec in site_spec['nodes']:
1418 test_node=TestNode(self,test_site,node_spec)
1419 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1420 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1421 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1422 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1423 utils.system(command)
1426 # returns the filename to use for sql dump/restore, using options.dbname if set
1427 def dbfile (self, database):
1428 # uses options.dbname if it is found
1430 name=self.options.dbname
1431 if not isinstance(name,StringTypes):
1434 t=datetime.datetime.now()
1437 return "/root/%s-%s.sql"%(database,name)
1439 def plc_db_dump(self):
1440 'dump the planetlab5 DB in /root in the PLC - filename has time'
1441 dump=self.dbfile("planetab5")
1442 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1443 utils.header('Dumped planetlab5 database in %s'%dump)
1446 def plc_db_restore(self):
1447 'restore the planetlab5 DB - looks broken, but run -n might help'
1448 dump=self.dbfile("planetab5")
1449 ##stop httpd service
1450 self.run_in_guest('service httpd stop')
1451 # xxx - need another wrapper
1452 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1453 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1454 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1455 ##starting httpd service
1456 self.run_in_guest('service httpd start')
1458 utils.header('Database restored from ' + dump)
1461 def standby_1(): pass
1463 def standby_2(): pass
1465 def standby_3(): pass
1467 def standby_4(): pass
1469 def standby_5(): pass
1471 def standby_6(): pass
1473 def standby_7(): pass
1475 def standby_8(): pass
1477 def standby_9(): pass
1479 def standby_10(): pass
1481 def standby_11(): pass
1483 def standby_12(): pass
1485 def standby_13(): pass
1487 def standby_14(): pass
1489 def standby_15(): pass
1491 def standby_16(): pass
1493 def standby_17(): pass
1495 def standby_18(): pass
1497 def standby_19(): pass
1499 def standby_20(): pass