1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'show', 'local_pre', SEP,
90 'vs_delete','vs_create','plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', 'qemu_export', 'qemu_kill_all', 'qemu_start', SEP,
94 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
95 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', 'sfa_create_slice@1', SEPSFA,
96 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', SEPSFA,
97 'sfa_utest_install@1','sfa_utest_run@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
101 'ssh_node_boot', 'ssh_slice', 'check_initscripts', SEP,
102 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
103 'check_tcp', 'check_hooks@1', SEP,
104 'force_gather_logs', 'force_local_post', SEP,
108 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
109 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_mine', SEP,
116 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
117 'plc_db_dump' , 'plc_db_restore', SEP,
118 'standby_1 through 20',SEP,
122 def printable_steps (list):
123 single_line=" ".join(list)+" "
124 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
126 def valid_step (step):
127 return step != SEP and step != SEPSFA
129 # turn off the sfa-related steps when build has skipped SFA
130 # this is originally for centos5 as recent SFAs won't build on this platformb
132 def check_whether_build_has_sfa (rpms_url):
133 # warning, we're now building 'sface' so let's be a bit more picky
134 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
135 # full builds are expected to return with 0 here
137 # move all steps containing 'sfa' from default_steps to other_steps
138 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
139 TestPlc.other_steps += sfa_steps
140 for step in sfa_steps: TestPlc.default_steps.remove(step)
142 def __init__ (self,plc_spec,options):
143 self.plc_spec=plc_spec
145 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
152 raise Exception,'chroot-based myplc testing is deprecated'
153 self.apiserver=TestApiserver(self.url,options.dry_run)
156 name=self.plc_spec['name']
157 return "%s.%s"%(name,self.vservername)
160 return self.plc_spec['hostname']
163 return self.test_ssh.is_local()
165 # define the API methods on this object through xmlrpc
166 # would help, but not strictly necessary
170 def actual_command_in_guest (self,command):
171 return self.test_ssh.actual_command(self.host_to_guest(command))
173 def start_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
176 def stop_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
179 def run_in_guest (self,command):
180 return utils.system(self.actual_command_in_guest(command))
182 def run_in_host (self,command):
183 return self.test_ssh.run_in_buildname(command)
185 #command gets run in the vserver
186 def host_to_guest(self,command):
187 return "vserver %s exec %s"%(self.vservername,command)
189 #start/stop the vserver
190 def start_guest_in_host(self):
191 return "vserver %s start"%(self.vservername)
193 def stop_guest_in_host(self):
194 return "vserver %s stop"%(self.vservername)
197 def run_in_guest_piped (self,local,remote):
198 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
200 def auth_root (self):
201 return {'Username':self.plc_spec['PLC_ROOT_USER'],
202 'AuthMethod':'password',
203 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
204 'Role' : self.plc_spec['role']
206 def locate_site (self,sitename):
207 for site in self.plc_spec['sites']:
208 if site['site_fields']['name'] == sitename:
210 if site['site_fields']['login_base'] == sitename:
212 raise Exception,"Cannot locate site %s"%sitename
214 def locate_node (self,nodename):
215 for site in self.plc_spec['sites']:
216 for node in site['nodes']:
217 if node['name'] == nodename:
219 raise Exception,"Cannot locate node %s"%nodename
221 def locate_hostname (self,hostname):
222 for site in self.plc_spec['sites']:
223 for node in site['nodes']:
224 if node['node_fields']['hostname'] == hostname:
226 raise Exception,"Cannot locate hostname %s"%hostname
228 def locate_key (self,keyname):
229 for key in self.plc_spec['keys']:
230 if key['name'] == keyname:
232 raise Exception,"Cannot locate key %s"%keyname
234 def locate_slice (self, slicename):
235 for slice in self.plc_spec['slices']:
236 if slice['slice_fields']['name'] == slicename:
238 raise Exception,"Cannot locate slice %s"%slicename
240 def all_sliver_objs (self):
242 for slice_spec in self.plc_spec['slices']:
243 slicename = slice_spec['slice_fields']['name']
244 for nodename in slice_spec['nodenames']:
245 result.append(self.locate_sliver_obj (nodename,slicename))
248 def locate_sliver_obj (self,nodename,slicename):
249 (site,node) = self.locate_node(nodename)
250 slice = self.locate_slice (slicename)
252 test_site = TestSite (self, site)
253 test_node = TestNode (self, test_site,node)
254 # xxx the slice site is assumed to be the node site - mhh - probably harmless
255 test_slice = TestSlice (self, test_site, slice)
256 return TestSliver (self, test_node, test_slice)
258 def locate_first_node(self):
259 nodename=self.plc_spec['slices'][0]['nodenames'][0]
260 (site,node) = self.locate_node(nodename)
261 test_site = TestSite (self, site)
262 test_node = TestNode (self, test_site,node)
265 def locate_first_sliver (self):
266 slice_spec=self.plc_spec['slices'][0]
267 slicename=slice_spec['slice_fields']['name']
268 nodename=slice_spec['nodenames'][0]
269 return self.locate_sliver_obj(nodename,slicename)
271 # all different hostboxes used in this plc
272 def gather_hostBoxes(self):
273 # maps on sites and nodes, return [ (host_box,test_node) ]
275 for site_spec in self.plc_spec['sites']:
276 test_site = TestSite (self,site_spec)
277 for node_spec in site_spec['nodes']:
278 test_node = TestNode (self, test_site, node_spec)
279 if not test_node.is_real():
280 tuples.append( (test_node.host_box(),test_node) )
281 # transform into a dict { 'host_box' -> [ test_node .. ] }
283 for (box,node) in tuples:
284 if not result.has_key(box):
287 result[box].append(node)
290 # a step for checking this stuff
291 def show_boxes (self):
292 'print summary of nodes location'
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 print box,":"," + ".join( [ node.name() for node in nodes ] )
297 # make this a valid step
298 def qemu_kill_all(self):
299 'kill all qemu instances on the qemu boxes involved by this setup'
300 # this is the brute force version, kill all qemus on that host box
301 for (box,nodes) in self.gather_hostBoxes().iteritems():
302 # pass the first nodename, as we don't push template-qemu on testboxes
303 nodedir=nodes[0].nodedir()
304 TestBox(box,self.options.buildname).qemu_kill_all(nodedir)
307 # make this a valid step
308 def qemu_list_all(self):
309 'list all qemu instances on the qemu boxes involved by this setup'
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # this is the brute force version, kill all qemus on that host box
312 TestBox(box,self.options.buildname).qemu_list_all()
315 # kill only the right qemus
316 def qemu_list_mine(self):
317 'list qemu instances for our nodes'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 # the fine-grain version
324 # kill only the right qemus
325 def qemu_kill_mine(self):
326 'kill the qemu instances for our nodes'
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # the fine-grain version
333 #################### display config
335 "show test configuration after localization"
336 self.display_pass (1)
337 self.display_pass (2)
341 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
342 def display_pass (self,passno):
343 for (key,val) in self.plc_spec.iteritems():
344 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
348 self.display_site_spec(site)
349 for node in site['nodes']:
350 self.display_node_spec(node)
351 elif key=='initscripts':
352 for initscript in val:
353 self.display_initscript_spec (initscript)
356 self.display_slice_spec (slice)
359 self.display_key_spec (key)
361 if key not in ['sites','initscripts','slices','keys', 'sfa']:
362 print '+ ',key,':',val
364 def display_site_spec (self,site):
365 print '+ ======== site',site['site_fields']['name']
366 for (k,v) in site.iteritems():
367 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
370 print '+ ','nodes : ',
372 print node['node_fields']['hostname'],'',
378 print user['name'],'',
380 elif k == 'site_fields':
381 print '+ login_base',':',v['login_base']
382 elif k == 'address_fields':
388 def display_initscript_spec (self,initscript):
389 print '+ ======== initscript',initscript['initscript_fields']['name']
391 def display_key_spec (self,key):
392 print '+ ======== key',key['name']
394 def display_slice_spec (self,slice):
395 print '+ ======== slice',slice['slice_fields']['name']
396 for (k,v) in slice.iteritems():
409 elif k=='slice_fields':
410 print '+ fields',':',
411 print 'max_nodes=',v['max_nodes'],
416 def display_node_spec (self,node):
417 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
418 print "hostname=",node['node_fields']['hostname'],
419 print "ip=",node['interface_fields']['ip']
420 if self.options.verbose:
421 utils.pprint("node details",node,depth=3)
423 # another entry point for just showing the boxes involved
424 def display_mapping (self):
425 TestPlc.display_mapping_plc(self.plc_spec)
429 def display_mapping_plc (plc_spec):
430 print '+ MyPLC',plc_spec['name']
431 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
432 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
433 for site_spec in plc_spec['sites']:
434 for node_spec in site_spec['nodes']:
435 TestPlc.display_mapping_node(node_spec)
438 def display_mapping_node (node_spec):
439 print '+ NODE %s'%(node_spec['name'])
440 print '+\tqemu box %s'%node_spec['host_box']
441 print '+\thostname=%s'%node_spec['node_fields']['hostname']
443 def local_pre (self):
444 "run site-dependant pre-test script as defined in LocalTestResources"
445 from LocalTestResources import local_resources
446 return local_resources.step_pre(self)
448 def local_post (self):
449 "run site-dependant post-test script as defined in LocalTestResources"
450 from LocalTestResources import local_resources
451 return local_resources.step_post(self)
453 def local_list (self):
454 "run site-dependant list script as defined in LocalTestResources"
455 from LocalTestResources import local_resources
456 return local_resources.step_list(self)
458 def local_rel (self):
459 "run site-dependant release script as defined in LocalTestResources"
460 from LocalTestResources import local_resources
461 return local_resources.step_release(self)
463 def local_rel_plc (self):
464 "run site-dependant release script as defined in LocalTestResources"
465 from LocalTestResources import local_resources
466 return local_resources.step_release_plc(self)
468 def local_rel_qemu (self):
469 "run site-dependant release script as defined in LocalTestResources"
470 from LocalTestResources import local_resources
471 return local_resources.step_release_qemu(self)
474 "vserver delete the test myplc"
475 self.run_in_host("vserver --silent %s delete"%self.vservername)
479 # historically the build was being fetched by the tests
480 # now the build pushes itself as a subdir of the tests workdir
481 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
482 def vs_create (self):
483 "vserver creation (no install done)"
484 # push the local build/ dir to the testplc box
486 # a full path for the local calls
487 build_dir=os.path.dirname(sys.argv[0])
488 # sometimes this is empty - set to "." in such a case
489 if not build_dir: build_dir="."
490 build_dir += "/build"
492 # use a standard name - will be relative to remote buildname
494 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
495 self.test_ssh.rmdir(build_dir)
496 self.test_ssh.copy(build_dir,recursive=True)
497 # the repo url is taken from arch-rpms-url
498 # with the last step (i386) removed
499 repo_url = self.options.arch_rpms_url
500 for level in [ 'arch' ]:
501 repo_url = os.path.dirname(repo_url)
502 # pass the vbuild-nightly options to vtest-init-vserver
504 test_env_options += " -p %s"%self.options.personality
505 test_env_options += " -d %s"%self.options.pldistro
506 test_env_options += " -f %s"%self.options.fcdistro
507 script="vtest-init-vserver.sh"
508 vserver_name = self.vservername
509 vserver_options="--netdev eth0 --interface %s"%self.vserverip
511 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
512 vserver_options += " --hostname %s"%vserver_hostname
514 print "Cannot reverse lookup %s"%self.vserverip
515 print "This is considered fatal, as this might pollute the test results"
517 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
518 return self.run_in_host(create_vserver) == 0
521 def plc_install(self):
522 "yum install myplc, noderepo, and the plain bootstrapfs"
524 # workaround for getting pgsql8.2 on centos5
525 if self.options.fcdistro == "centos5":
526 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
529 if self.options.personality == "linux32":
531 elif self.options.personality == "linux64":
534 raise Exception, "Unsupported personality %r"%self.options.personality
535 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
538 pkgs_list.append ("slicerepo-%s"%nodefamily)
539 pkgs_list.append ("myplc")
540 pkgs_list.append ("noderepo-%s"%nodefamily)
541 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
542 pkgs_string=" ".join(pkgs_list)
543 self.run_in_guest("yum -y install %s"%pkgs_string)
544 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
547 def plc_configure(self):
549 tmpname='%s.plc-config-tty'%(self.name())
550 fileconf=open(tmpname,'w')
551 for var in [ 'PLC_NAME',
556 'PLC_MAIL_SUPPORT_ADDRESS',
559 # Above line was added for integrating SFA Testing
565 'PLC_RESERVATION_GRANULARITY',
568 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
569 fileconf.write('w\n')
570 fileconf.write('q\n')
572 utils.system('cat %s'%tmpname)
573 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
574 utils.system('rm %s'%tmpname)
579 self.run_in_guest('service plc start')
584 self.run_in_guest('service plc stop')
588 "start the PLC vserver"
593 "stop the PLC vserver"
597 # stores the keys from the config for further use
598 def keys_store(self):
599 "stores test users ssh keys in keys/"
600 for key_spec in self.plc_spec['keys']:
601 TestKey(self,key_spec).store_key()
604 def keys_clean(self):
605 "removes keys cached in keys/"
606 utils.system("rm -rf ./keys")
609 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
610 # for later direct access to the nodes
611 def keys_fetch(self):
612 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
614 if not os.path.isdir(dir):
616 vservername=self.vservername
618 prefix = 'debug_ssh_key'
619 for ext in [ 'pub', 'rsa' ] :
620 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
621 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
622 if self.test_ssh.fetch(src,dst) != 0: overall=False
626 "create sites with PLCAPI"
627 return self.do_sites()
629 def delete_sites (self):
630 "delete sites with PLCAPI"
631 return self.do_sites(action="delete")
633 def do_sites (self,action="add"):
634 for site_spec in self.plc_spec['sites']:
635 test_site = TestSite (self,site_spec)
636 if (action != "add"):
637 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
638 test_site.delete_site()
639 # deleted with the site
640 #test_site.delete_users()
643 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
644 test_site.create_site()
645 test_site.create_users()
648 def delete_all_sites (self):
649 "Delete all sites in PLC, and related objects"
650 print 'auth_root',self.auth_root()
651 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
652 for site_id in site_ids:
653 print 'Deleting site_id',site_id
654 self.apiserver.DeleteSite(self.auth_root(),site_id)
658 "create nodes with PLCAPI"
659 return self.do_nodes()
660 def delete_nodes (self):
661 "delete nodes with PLCAPI"
662 return self.do_nodes(action="delete")
664 def do_nodes (self,action="add"):
665 for site_spec in self.plc_spec['sites']:
666 test_site = TestSite (self,site_spec)
668 utils.header("Deleting nodes in site %s"%test_site.name())
669 for node_spec in site_spec['nodes']:
670 test_node=TestNode(self,test_site,node_spec)
671 utils.header("Deleting %s"%test_node.name())
672 test_node.delete_node()
674 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
675 for node_spec in site_spec['nodes']:
676 utils.pprint('Creating node %s'%node_spec,node_spec)
677 test_node = TestNode (self,test_site,node_spec)
678 test_node.create_node ()
681 def nodegroups (self):
682 "create nodegroups with PLCAPI"
683 return self.do_nodegroups("add")
684 def delete_nodegroups (self):
685 "delete nodegroups with PLCAPI"
686 return self.do_nodegroups("delete")
690 def translate_timestamp (start,grain,timestamp):
691 if timestamp < TestPlc.YEAR: return start+timestamp*grain
692 else: return timestamp
695 def timestamp_printable (timestamp):
696 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
699 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
701 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
702 print 'API answered grain=',grain
703 start=(now/grain)*grain
705 # find out all nodes that are reservable
706 nodes=self.all_reservable_nodenames()
708 utils.header ("No reservable node found - proceeding without leases")
711 # attach them to the leases as specified in plc_specs
712 # this is where the 'leases' field gets interpreted as relative of absolute
713 for lease_spec in self.plc_spec['leases']:
714 # skip the ones that come with a null slice id
715 if not lease_spec['slice']: continue
716 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
717 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
718 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
719 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
720 if lease_addition['errors']:
721 utils.header("Cannot create leases, %s"%lease_addition['errors'])
724 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
725 (nodes,lease_spec['slice'],
726 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
727 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
731 def delete_leases (self):
732 "remove all leases in the myplc side"
733 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
734 utils.header("Cleaning leases %r"%lease_ids)
735 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
738 def list_leases (self):
739 "list all leases known to the myplc"
740 leases = self.apiserver.GetLeases(self.auth_root())
743 current=l['t_until']>=now
744 if self.options.verbose or current:
745 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
746 TestPlc.timestamp_printable(l['t_from']),
747 TestPlc.timestamp_printable(l['t_until'])))
750 # create nodegroups if needed, and populate
751 def do_nodegroups (self, action="add"):
752 # 1st pass to scan contents
754 for site_spec in self.plc_spec['sites']:
755 test_site = TestSite (self,site_spec)
756 for node_spec in site_spec['nodes']:
757 test_node=TestNode (self,test_site,node_spec)
758 if node_spec.has_key('nodegroups'):
759 nodegroupnames=node_spec['nodegroups']
760 if isinstance(nodegroupnames,StringTypes):
761 nodegroupnames = [ nodegroupnames ]
762 for nodegroupname in nodegroupnames:
763 if not groups_dict.has_key(nodegroupname):
764 groups_dict[nodegroupname]=[]
765 groups_dict[nodegroupname].append(test_node.name())
766 auth=self.auth_root()
768 for (nodegroupname,group_nodes) in groups_dict.iteritems():
770 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
771 # first, check if the nodetagtype is here
772 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
774 tag_type_id = tag_types[0]['tag_type_id']
776 tag_type_id = self.apiserver.AddTagType(auth,
777 {'tagname':nodegroupname,
778 'description': 'for nodegroup %s'%nodegroupname,
780 print 'located tag (type)',nodegroupname,'as',tag_type_id
782 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
784 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
785 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
786 # set node tag on all nodes, value='yes'
787 for nodename in group_nodes:
789 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
791 traceback.print_exc()
792 print 'node',nodename,'seems to already have tag',nodegroupname
795 expect_yes = self.apiserver.GetNodeTags(auth,
796 {'hostname':nodename,
797 'tagname':nodegroupname},
798 ['value'])[0]['value']
799 if expect_yes != "yes":
800 print 'Mismatch node tag on node',nodename,'got',expect_yes
803 if not self.options.dry_run:
804 print 'Cannot find tag',nodegroupname,'on node',nodename
808 print 'cleaning nodegroup',nodegroupname
809 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
811 traceback.print_exc()
815 # return a list of tuples (nodename,qemuname)
816 def all_node_infos (self) :
818 for site_spec in self.plc_spec['sites']:
819 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
820 for node_spec in site_spec['nodes'] ]
823 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
824 def all_reservable_nodenames (self):
826 for site_spec in self.plc_spec['sites']:
827 for node_spec in site_spec['nodes']:
828 node_fields=node_spec['node_fields']
829 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
830 res.append(node_fields['hostname'])
833 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
834 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
835 if self.options.dry_run:
839 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
840 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
841 # the nodes that haven't checked yet - start with a full list and shrink over time
842 tocheck = self.all_hostnames()
843 utils.header("checking nodes %r"%tocheck)
844 # create a dict hostname -> status
845 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
848 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
850 for array in tocheck_status:
851 hostname=array['hostname']
852 boot_state=array['boot_state']
853 if boot_state == target_boot_state:
854 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
856 # if it's a real node, never mind
857 (site_spec,node_spec)=self.locate_hostname(hostname)
858 if TestNode.is_real_model(node_spec['node_fields']['model']):
859 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
861 boot_state = target_boot_state
862 elif datetime.datetime.now() > graceout:
863 utils.header ("%s still in '%s' state"%(hostname,boot_state))
864 graceout=datetime.datetime.now()+datetime.timedelta(1)
865 status[hostname] = boot_state
867 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
870 if datetime.datetime.now() > timeout:
871 for hostname in tocheck:
872 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
874 # otherwise, sleep for a while
876 # only useful in empty plcs
879 def nodes_booted(self):
880 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
882 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
884 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
885 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
886 vservername=self.vservername
889 local_key = "keys/%(vservername)s-debug.rsa"%locals()
892 local_key = "keys/key1.rsa"
893 node_infos = self.all_node_infos()
894 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
895 for (nodename,qemuname) in node_infos:
896 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
897 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
898 (timeout_minutes,silent_minutes,period))
900 for node_info in node_infos:
901 (hostname,qemuname) = node_info
902 # try to run 'hostname' in the node
903 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
904 # don't spam logs - show the command only after the grace period
905 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
907 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
909 node_infos.remove(node_info)
911 # we will have tried real nodes once, in case they're up - but if not, just skip
912 (site_spec,node_spec)=self.locate_hostname(hostname)
913 if TestNode.is_real_model(node_spec['node_fields']['model']):
914 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
915 node_infos.remove(node_info)
918 if datetime.datetime.now() > timeout:
919 for (hostname,qemuname) in node_infos:
920 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
922 # otherwise, sleep for a while
924 # only useful in empty plcs
927 def ssh_node_debug(self):
928 "Tries to ssh into nodes in debug mode with the debug ssh key"
929 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
931 def ssh_node_boot(self):
932 "Tries to ssh into nodes in production mode with the root ssh key"
933 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
936 def qemu_local_init (self):
937 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
941 "all nodes: invoke GetBootMedium and store result locally"
944 def qemu_local_config (self):
945 "all nodes: compute qemu config qemu.conf and store it locally"
948 def nodestate_reinstall (self):
949 "all nodes: mark PLCAPI boot_state as reinstall"
952 def nodestate_safeboot (self):
953 "all nodes: mark PLCAPI boot_state as safeboot"
956 def nodestate_boot (self):
957 "all nodes: mark PLCAPI boot_state as boot"
960 def nodestate_show (self):
961 "all nodes: show PLCAPI boot_state"
964 def qemu_export (self):
965 "all nodes: push local node-dep directory on the qemu box"
968 ### check hooks : invoke scripts from hooks/{node,slice}
969 def check_hooks_node (self):
970 return self.locate_first_node().check_hooks()
971 def check_hooks_sliver (self) :
972 return self.locate_first_sliver().check_hooks()
974 def check_hooks (self):
975 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
976 return self.check_hooks_node() and self.check_hooks_sliver()
979 def do_check_initscripts(self):
981 for slice_spec in self.plc_spec['slices']:
982 if not slice_spec.has_key('initscriptname'):
984 initscript=slice_spec['initscriptname']
985 for nodename in slice_spec['nodenames']:
986 (site,node) = self.locate_node (nodename)
987 # xxx - passing the wrong site - probably harmless
988 test_site = TestSite (self,site)
989 test_slice = TestSlice (self,test_site,slice_spec)
990 test_node = TestNode (self,test_site,node)
991 test_sliver = TestSliver (self, test_node, test_slice)
992 if not test_sliver.check_initscript(initscript):
996 def check_initscripts(self):
997 "check that the initscripts have triggered"
998 return self.do_check_initscripts()
1000 def initscripts (self):
1001 "create initscripts with PLCAPI"
1002 for initscript in self.plc_spec['initscripts']:
1003 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1004 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1007 def delete_initscripts (self):
1008 "delete initscripts with PLCAPI"
1009 for initscript in self.plc_spec['initscripts']:
1010 initscript_name = initscript['initscript_fields']['name']
1011 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1013 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1014 print initscript_name,'deleted'
1016 print 'deletion went wrong - probably did not exist'
1021 "create slices with PLCAPI"
1022 return self.do_slices()
1024 def delete_slices (self):
1025 "delete slices with PLCAPI"
1026 return self.do_slices("delete")
1028 def do_slices (self, action="add"):
1029 for slice in self.plc_spec['slices']:
1030 site_spec = self.locate_site (slice['sitename'])
1031 test_site = TestSite(self,site_spec)
1032 test_slice=TestSlice(self,test_site,slice)
1034 utils.header("Deleting slices in site %s"%test_site.name())
1035 test_slice.delete_slice()
1037 utils.pprint("Creating slice",slice)
1038 test_slice.create_slice()
1039 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1043 def ssh_slice(self):
1044 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1048 def keys_clear_known_hosts (self):
1049 "remove test nodes entries from the local known_hosts file"
1053 def qemu_start (self) :
1054 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1057 def check_tcp (self):
1058 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1059 specs = self.plc_spec['tcp_test']
1064 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1065 if not s_test_sliver.run_tcp_server(port,timeout=10):
1069 # idem for the client side
1070 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1071 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1075 def plcsh_stress_test (self):
1076 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1077 # install the stress-test in the plc image
1078 location = "/usr/share/plc_api/plcsh_stress_test.py"
1079 remote="/vservers/%s/%s"%(self.vservername,location)
1080 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1082 command += " -- --check"
1083 if self.options.size == 1:
1084 command += " --tiny"
1085 return ( self.run_in_guest(command) == 0)
1087 # populate runs the same utility without slightly different options
1088 # in particular runs with --preserve (dont cleanup) and without --check
1089 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1092 def sfa_install(self):
1093 "yum install sfa, sfa-plc and sfa-client"
1095 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1096 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1099 def sfa_dbclean(self):
1100 "thoroughly wipes off the SFA database"
1101 self.run_in_guest("sfa-nuke-plc.py")==0
1104 def sfa_plcclean(self):
1105 "cleans the PLC entries that were created as a side effect of running the script"
1107 sfa_spec=self.plc_spec['sfa']
1109 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1110 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1111 except: print "Slice %s already absent from PLC db"%slicename
1113 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1114 try: self.apiserver.DeletePerson(self.auth_root(),username)
1115 except: print "User %s already absent from PLC db"%username
1117 print "REMEMBER TO RUN sfa_import AGAIN"
1120 def sfa_uninstall(self):
1121 "uses rpm to uninstall sfa - ignore result"
1122 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1123 self.run_in_guest("rm -rf /var/lib/sfa")
1124 self.run_in_guest("rm -rf /etc/sfa")
1125 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1127 self.run_in_guest("rpm -e --noscripts sfa-plc")
1131 def sfa_utest_install(self):
1132 "yum install sfa-tests"
1134 self.run_in_guest("yum -y install sfa-tests")
1135 return self.run_in_guest("rpm -q sfa-tests")==0
1137 def sfa_utest_run(self):
1139 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1143 dirname="conf.%s"%self.plc_spec['name']
1144 if not os.path.isdir(dirname):
1145 utils.system("mkdir -p %s"%dirname)
1146 if not os.path.isdir(dirname):
1147 raise "Cannot create config dir for plc %s"%self.name()
1150 def conffile(self,filename):
1151 return "%s/%s"%(self.confdir(),filename)
1152 def confsubdir(self,dirname,clean):
1153 subdirname="%s/%s"%(self.confdir(),dirname)
1155 utils.system("rm -rf %s"%subdirname)
1156 if not os.path.isdir(subdirname):
1157 utils.system("mkdir -p %s"%subdirname)
1158 if not os.path.isdir(subdirname):
1159 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1162 def conffile_clean (self,filename):
1163 filename=self.conffile(filename)
1164 return utils.system("rm -rf %s"%filename)==0
1167 def sfa_configure(self):
1168 "run sfa-config-tty"
1169 tmpname=self.conffile("sfa-config-tty")
1170 fileconf=open(tmpname,'w')
1171 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1172 'SFA_INTERFACE_HRN',
1173 # 'SFA_REGISTRY_LEVEL1_AUTH',
1174 'SFA_REGISTRY_HOST',
1175 'SFA_AGGREGATE_HOST',
1181 'SFA_PLC_DB_PASSWORD',
1184 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1185 # the way plc_config handles booleans just sucks..
1186 for var in ['SFA_API_DEBUG']:
1188 if self.plc_spec['sfa'][var]: val='true'
1189 fileconf.write ('e %s\n%s\n'%(var,val))
1190 fileconf.write('w\n')
1191 fileconf.write('R\n')
1192 fileconf.write('q\n')
1194 utils.system('cat %s'%tmpname)
1195 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1198 def aggregate_xml_line(self):
1199 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1200 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1202 def registry_xml_line(self):
1203 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1204 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1207 # a cross step that takes all other plcs in argument
1208 def cross_sfa_configure(self, other_plcs):
1209 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1210 # of course with a single plc, other_plcs is an empty list
1213 agg_fname=self.conffile("agg.xml")
1214 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1215 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1216 utils.header ("(Over)wrote %s"%agg_fname)
1217 reg_fname=self.conffile("reg.xml")
1218 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1219 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1220 utils.header ("(Over)wrote %s"%reg_fname)
1221 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1222 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1224 def sfa_import(self):
1226 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1227 return self.run_in_guest('sfa-import-plc.py')==0
1228 # not needed anymore
1229 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1231 def sfa_start(self):
1233 return self.run_in_guest('service sfa start')==0
1235 def sfi_configure(self):
1236 "Create /root/.sfi on the plc side"
1237 sfa_spec=self.plc_spec['sfa']
1238 "sfi client configuration"
1239 dir_name=self.confsubdir("dot-sfi",clean=True)
1240 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1241 fileconf=open(file_name,'w')
1242 fileconf.write (self.plc_spec['keys'][0]['private'])
1244 utils.header ("(Over)wrote %s"%file_name)
1246 file_name=dir_name + os.sep + 'sfi_config'
1247 fileconf=open(file_name,'w')
1248 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1249 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1250 fileconf.write('\n')
1251 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1252 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1253 fileconf.write('\n')
1254 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1255 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1256 fileconf.write('\n')
1257 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1258 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1259 fileconf.write('\n')
1261 utils.header ("(Over)wrote %s"%file_name)
1263 file_name=dir_name + os.sep + 'person.xml'
1264 fileconf=open(file_name,'w')
1265 for record in sfa_spec['sfa_person_xml']:
1266 person_record=record
1267 fileconf.write(person_record)
1268 fileconf.write('\n')
1270 utils.header ("(Over)wrote %s"%file_name)
1272 file_name=dir_name + os.sep + 'slice.xml'
1273 fileconf=open(file_name,'w')
1274 for record in sfa_spec['sfa_slice_xml']:
1276 #slice_record=sfa_spec['sfa_slice_xml']
1277 fileconf.write(slice_record)
1278 fileconf.write('\n')
1279 utils.header ("(Over)wrote %s"%file_name)
1282 file_name=dir_name + os.sep + 'slice.rspec'
1283 fileconf=open(file_name,'w')
1285 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1287 fileconf.write(slice_rspec)
1288 fileconf.write('\n')
1290 utils.header ("(Over)wrote %s"%file_name)
1292 # push to the remote root's .sfi
1293 location = "root/.sfi"
1294 remote="/vservers/%s/%s"%(self.vservername,location)
1295 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1299 def sfi_clean (self):
1300 "clean up /root/.sfi on the plc side"
1301 self.run_in_guest("rm -rf /root/.sfi")
1304 def sfa_add_user(self):
1305 "run sfi.py add using person.xml"
1306 return TestUserSfa(self).add_user()
1308 def sfa_update_user(self):
1309 "run sfi.py update using person.xml"
1310 return TestUserSfa(self).update_user()
1313 def sfa_add_slice(self):
1314 "run sfi.py add (on Registry) from slice.xml"
1318 def sfa_discover(self):
1319 "discover resources into resouces_in.rspec"
1323 def sfa_create_slice(self):
1324 "run sfi.py create (on SM) - 1st time"
1328 def sfa_update_slice(self):
1329 "run sfi.py create (on SM) on existing object"
1333 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1334 sfa_spec=self.plc_spec['sfa']
1335 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1337 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1338 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1339 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1340 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1343 def ssh_slice_sfa(self):
1344 "tries to ssh-enter the SFA slice"
1347 def sfa_delete_user(self):
1348 "run sfi.py delete (on SM) for user"
1349 test_user_sfa=TestUserSfa(self)
1350 return test_user_sfa.delete_user()
1353 def sfa_delete_slice(self):
1354 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1359 self.run_in_guest('service sfa stop')==0
1362 def populate (self):
1363 "creates random entries in the PLCAPI"
1364 # install the stress-test in the plc image
1365 location = "/usr/share/plc_api/plcsh_stress_test.py"
1366 remote="/vservers/%s/%s"%(self.vservername,location)
1367 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1369 command += " -- --preserve --short-names"
1370 local = (self.run_in_guest(command) == 0);
1371 # second run with --foreign
1372 command += ' --foreign'
1373 remote = (self.run_in_guest(command) == 0);
1374 return ( local and remote)
1376 def gather_logs (self):
1377 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1378 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1379 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1380 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1381 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1382 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1384 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1385 self.gather_var_logs ()
1387 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1388 self.gather_pgsql_logs ()
1390 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1391 for site_spec in self.plc_spec['sites']:
1392 test_site = TestSite (self,site_spec)
1393 for node_spec in site_spec['nodes']:
1394 test_node=TestNode(self,test_site,node_spec)
1395 test_node.gather_qemu_logs()
1397 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1398 self.gather_nodes_var_logs()
1400 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1401 self.gather_slivers_var_logs()
1404 def gather_slivers_var_logs(self):
1405 for test_sliver in self.all_sliver_objs():
1406 remote = test_sliver.tar_var_logs()
1407 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1408 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1409 utils.system(command)
1412 def gather_var_logs (self):
1413 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1414 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1415 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1416 utils.system(command)
1417 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1418 utils.system(command)
1420 def gather_pgsql_logs (self):
1421 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1422 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1423 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1424 utils.system(command)
1426 def gather_nodes_var_logs (self):
1427 for site_spec in self.plc_spec['sites']:
1428 test_site = TestSite (self,site_spec)
1429 for node_spec in site_spec['nodes']:
1430 test_node=TestNode(self,test_site,node_spec)
1431 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1432 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1433 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1434 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1435 utils.system(command)
1438 # returns the filename to use for sql dump/restore, using options.dbname if set
1439 def dbfile (self, database):
1440 # uses options.dbname if it is found
1442 name=self.options.dbname
1443 if not isinstance(name,StringTypes):
1446 t=datetime.datetime.now()
1449 return "/root/%s-%s.sql"%(database,name)
1451 def plc_db_dump(self):
1452 'dump the planetlab5 DB in /root in the PLC - filename has time'
1453 dump=self.dbfile("planetab5")
1454 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1455 utils.header('Dumped planetlab5 database in %s'%dump)
1458 def plc_db_restore(self):
1459 'restore the planetlab5 DB - looks broken, but run -n might help'
1460 dump=self.dbfile("planetab5")
1461 ##stop httpd service
1462 self.run_in_guest('service httpd stop')
1463 # xxx - need another wrapper
1464 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1465 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1466 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1467 ##starting httpd service
1468 self.run_in_guest('service httpd start')
1470 utils.header('Database restored from ' + dump)
1473 def standby_1(): pass
1475 def standby_2(): pass
1477 def standby_3(): pass
1479 def standby_4(): pass
1481 def standby_5(): pass
1483 def standby_6(): pass
1485 def standby_7(): pass
1487 def standby_8(): pass
1489 def standby_9(): pass
1491 def standby_10(): pass
1493 def standby_11(): pass
1495 def standby_12(): pass
1497 def standby_13(): pass
1499 def standby_14(): pass
1501 def standby_15(): pass
1503 def standby_16(): pass
1505 def standby_17(): pass
1507 def standby_18(): pass
1509 def standby_19(): pass
1511 def standby_20(): pass