1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'show', 'local_pre', SEP,
90 'vs_delete','vs_create','plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', 'qemu_export', 'qemu_kill_all', 'qemu_start', SEP,
94 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
95 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', 'sfa_create_slice@1', SEPSFA,
96 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
97 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
98 # but as the stress test might take a while, we sometimes missed the debug mode..
99 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
100 'ssh_node_boot', 'ssh_slice', 'check_initscripts', SEP,
101 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
102 'check_tcp', 'check_hooks@1', SEP,
103 'force_gather_logs', 'force_local_post', SEP,
107 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
108 'plc_stop', 'vs_start', 'vs_stop', SEP,
109 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
110 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
111 'delete_leases', 'list_leases', SEP,
113 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
114 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_mine', SEP,
115 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
116 'plc_db_dump' , 'plc_db_restore', SEP,
117 'standby_1 through 20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this is originally for centos5 as recent SFAs won't build on this platformb
131 def check_whether_build_has_sfa (rpms_url):
132 # warning, we're now building 'sface' so let's be a bit more picky
133 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
134 # full builds are expected to return with 0 here
136 # move all steps containing 'sfa' from default_steps to other_steps
137 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
138 TestPlc.other_steps += sfa_steps
139 for step in sfa_steps: TestPlc.default_steps.remove(step)
141 def __init__ (self,plc_spec,options):
142 self.plc_spec=plc_spec
144 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
146 self.vserverip=plc_spec['vserverip']
147 self.vservername=plc_spec['vservername']
148 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
151 raise Exception,'chroot-based myplc testing is deprecated'
152 self.apiserver=TestApiserver(self.url,options.dry_run)
155 name=self.plc_spec['name']
156 return "%s.%s"%(name,self.vservername)
159 return self.plc_spec['hostname']
162 return self.test_ssh.is_local()
164 # define the API methods on this object through xmlrpc
165 # would help, but not strictly necessary
169 def actual_command_in_guest (self,command):
170 return self.test_ssh.actual_command(self.host_to_guest(command))
172 def start_guest (self):
173 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
175 def stop_guest (self):
176 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
178 def run_in_guest (self,command):
179 return utils.system(self.actual_command_in_guest(command))
181 def run_in_host (self,command):
182 return self.test_ssh.run_in_buildname(command)
184 #command gets run in the vserver
185 def host_to_guest(self,command):
186 return "vserver %s exec %s"%(self.vservername,command)
188 #start/stop the vserver
189 def start_guest_in_host(self):
190 return "vserver %s start"%(self.vservername)
192 def stop_guest_in_host(self):
193 return "vserver %s stop"%(self.vservername)
196 def run_in_guest_piped (self,local,remote):
197 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
199 def auth_root (self):
200 return {'Username':self.plc_spec['PLC_ROOT_USER'],
201 'AuthMethod':'password',
202 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
203 'Role' : self.plc_spec['role']
205 def locate_site (self,sitename):
206 for site in self.plc_spec['sites']:
207 if site['site_fields']['name'] == sitename:
209 if site['site_fields']['login_base'] == sitename:
211 raise Exception,"Cannot locate site %s"%sitename
213 def locate_node (self,nodename):
214 for site in self.plc_spec['sites']:
215 for node in site['nodes']:
216 if node['name'] == nodename:
218 raise Exception,"Cannot locate node %s"%nodename
220 def locate_hostname (self,hostname):
221 for site in self.plc_spec['sites']:
222 for node in site['nodes']:
223 if node['node_fields']['hostname'] == hostname:
225 raise Exception,"Cannot locate hostname %s"%hostname
227 def locate_key (self,keyname):
228 for key in self.plc_spec['keys']:
229 if key['name'] == keyname:
231 raise Exception,"Cannot locate key %s"%keyname
233 def locate_slice (self, slicename):
234 for slice in self.plc_spec['slices']:
235 if slice['slice_fields']['name'] == slicename:
237 raise Exception,"Cannot locate slice %s"%slicename
239 def all_sliver_objs (self):
241 for slice_spec in self.plc_spec['slices']:
242 slicename = slice_spec['slice_fields']['name']
243 for nodename in slice_spec['nodenames']:
244 result.append(self.locate_sliver_obj (nodename,slicename))
247 def locate_sliver_obj (self,nodename,slicename):
248 (site,node) = self.locate_node(nodename)
249 slice = self.locate_slice (slicename)
251 test_site = TestSite (self, site)
252 test_node = TestNode (self, test_site,node)
253 # xxx the slice site is assumed to be the node site - mhh - probably harmless
254 test_slice = TestSlice (self, test_site, slice)
255 return TestSliver (self, test_node, test_slice)
257 def locate_first_node(self):
258 nodename=self.plc_spec['slices'][0]['nodenames'][0]
259 (site,node) = self.locate_node(nodename)
260 test_site = TestSite (self, site)
261 test_node = TestNode (self, test_site,node)
264 def locate_first_sliver (self):
265 slice_spec=self.plc_spec['slices'][0]
266 slicename=slice_spec['slice_fields']['name']
267 nodename=slice_spec['nodenames'][0]
268 return self.locate_sliver_obj(nodename,slicename)
270 # all different hostboxes used in this plc
271 def gather_hostBoxes(self):
272 # maps on sites and nodes, return [ (host_box,test_node) ]
274 for site_spec in self.plc_spec['sites']:
275 test_site = TestSite (self,site_spec)
276 for node_spec in site_spec['nodes']:
277 test_node = TestNode (self, test_site, node_spec)
278 if not test_node.is_real():
279 tuples.append( (test_node.host_box(),test_node) )
280 # transform into a dict { 'host_box' -> [ test_node .. ] }
282 for (box,node) in tuples:
283 if not result.has_key(box):
286 result[box].append(node)
289 # a step for checking this stuff
290 def show_boxes (self):
291 'print summary of nodes location'
292 for (box,nodes) in self.gather_hostBoxes().iteritems():
293 print box,":"," + ".join( [ node.name() for node in nodes ] )
296 # make this a valid step
297 def qemu_kill_all(self):
298 'kill all qemu instances on the qemu boxes involved by this setup'
299 # this is the brute force version, kill all qemus on that host box
300 for (box,nodes) in self.gather_hostBoxes().iteritems():
301 # pass the first nodename, as we don't push template-qemu on testboxes
302 nodedir=nodes[0].nodedir()
303 TestBox(box,self.options.buildname).qemu_kill_all(nodedir)
306 # make this a valid step
307 def qemu_list_all(self):
308 'list all qemu instances on the qemu boxes involved by this setup'
309 for (box,nodes) in self.gather_hostBoxes().iteritems():
310 # this is the brute force version, kill all qemus on that host box
311 TestBox(box,self.options.buildname).qemu_list_all()
314 # kill only the right qemus
315 def qemu_list_mine(self):
316 'list qemu instances for our nodes'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 # the fine-grain version
323 # kill only the right qemus
324 def qemu_kill_mine(self):
325 'kill the qemu instances for our nodes'
326 for (box,nodes) in self.gather_hostBoxes().iteritems():
327 # the fine-grain version
332 #################### display config
334 "show test configuration after localization"
335 self.display_pass (1)
336 self.display_pass (2)
340 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
341 def display_pass (self,passno):
342 for (key,val) in self.plc_spec.iteritems():
343 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
347 self.display_site_spec(site)
348 for node in site['nodes']:
349 self.display_node_spec(node)
350 elif key=='initscripts':
351 for initscript in val:
352 self.display_initscript_spec (initscript)
355 self.display_slice_spec (slice)
358 self.display_key_spec (key)
360 if key not in ['sites','initscripts','slices','keys', 'sfa']:
361 print '+ ',key,':',val
363 def display_site_spec (self,site):
364 print '+ ======== site',site['site_fields']['name']
365 for (k,v) in site.iteritems():
366 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
369 print '+ ','nodes : ',
371 print node['node_fields']['hostname'],'',
377 print user['name'],'',
379 elif k == 'site_fields':
380 print '+ login_base',':',v['login_base']
381 elif k == 'address_fields':
387 def display_initscript_spec (self,initscript):
388 print '+ ======== initscript',initscript['initscript_fields']['name']
390 def display_key_spec (self,key):
391 print '+ ======== key',key['name']
393 def display_slice_spec (self,slice):
394 print '+ ======== slice',slice['slice_fields']['name']
395 for (k,v) in slice.iteritems():
408 elif k=='slice_fields':
409 print '+ fields',':',
410 print 'max_nodes=',v['max_nodes'],
415 def display_node_spec (self,node):
416 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
417 print "hostname=",node['node_fields']['hostname'],
418 print "ip=",node['interface_fields']['ip']
419 if self.options.verbose:
420 utils.pprint("node details",node,depth=3)
422 # another entry point for just showing the boxes involved
423 def display_mapping (self):
424 TestPlc.display_mapping_plc(self.plc_spec)
428 def display_mapping_plc (plc_spec):
429 print '+ MyPLC',plc_spec['name']
430 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
431 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
432 for site_spec in plc_spec['sites']:
433 for node_spec in site_spec['nodes']:
434 TestPlc.display_mapping_node(node_spec)
437 def display_mapping_node (node_spec):
438 print '+ NODE %s'%(node_spec['name'])
439 print '+\tqemu box %s'%node_spec['host_box']
440 print '+\thostname=%s'%node_spec['node_fields']['hostname']
442 def local_pre (self):
443 "run site-dependant pre-test script as defined in LocalTestResources"
444 from LocalTestResources import local_resources
445 return local_resources.step_pre(self)
447 def local_post (self):
448 "run site-dependant post-test script as defined in LocalTestResources"
449 from LocalTestResources import local_resources
450 return local_resources.step_post(self)
452 def local_list (self):
453 "run site-dependant list script as defined in LocalTestResources"
454 from LocalTestResources import local_resources
455 return local_resources.step_list(self)
457 def local_rel (self):
458 "run site-dependant release script as defined in LocalTestResources"
459 from LocalTestResources import local_resources
460 return local_resources.step_release(self)
462 def local_rel_plc (self):
463 "run site-dependant release script as defined in LocalTestResources"
464 from LocalTestResources import local_resources
465 return local_resources.step_release_plc(self)
467 def local_rel_qemu (self):
468 "run site-dependant release script as defined in LocalTestResources"
469 from LocalTestResources import local_resources
470 return local_resources.step_release_qemu(self)
473 "vserver delete the test myplc"
474 self.run_in_host("vserver --silent %s delete"%self.vservername)
478 # historically the build was being fetched by the tests
479 # now the build pushes itself as a subdir of the tests workdir
480 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
481 def vs_create (self):
482 "vserver creation (no install done)"
483 # push the local build/ dir to the testplc box
485 # a full path for the local calls
486 build_dir=os.path.dirname(sys.argv[0])
487 # sometimes this is empty - set to "." in such a case
488 if not build_dir: build_dir="."
489 build_dir += "/build"
491 # use a standard name - will be relative to remote buildname
493 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
494 self.test_ssh.rmdir(build_dir)
495 self.test_ssh.copy(build_dir,recursive=True)
496 # the repo url is taken from arch-rpms-url
497 # with the last step (i386) removed
498 repo_url = self.options.arch_rpms_url
499 for level in [ 'arch' ]:
500 repo_url = os.path.dirname(repo_url)
501 # pass the vbuild-nightly options to vtest-init-vserver
503 test_env_options += " -p %s"%self.options.personality
504 test_env_options += " -d %s"%self.options.pldistro
505 test_env_options += " -f %s"%self.options.fcdistro
506 script="vtest-init-vserver.sh"
507 vserver_name = self.vservername
508 vserver_options="--netdev eth0 --interface %s"%self.vserverip
510 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
511 vserver_options += " --hostname %s"%vserver_hostname
513 print "Cannot reverse lookup %s"%self.vserverip
514 print "This is considered fatal, as this might pollute the test results"
516 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
517 return self.run_in_host(create_vserver) == 0
520 def plc_install(self):
521 "yum install myplc, noderepo, and the plain bootstrapfs"
523 # workaround for getting pgsql8.2 on centos5
524 if self.options.fcdistro == "centos5":
525 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
528 if self.options.personality == "linux32":
530 elif self.options.personality == "linux64":
533 raise Exception, "Unsupported personality %r"%self.options.personality
534 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
537 pkgs_list.append ("slicerepo-%s"%nodefamily)
538 pkgs_list.append ("myplc")
539 pkgs_list.append ("noderepo-%s"%nodefamily)
540 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
541 pkgs_string=" ".join(pkgs_list)
542 self.run_in_guest("yum -y install %s"%pkgs_string)
543 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
546 def plc_configure(self):
548 tmpname='%s.plc-config-tty'%(self.name())
549 fileconf=open(tmpname,'w')
550 for var in [ 'PLC_NAME',
555 'PLC_MAIL_SUPPORT_ADDRESS',
558 # Above line was added for integrating SFA Testing
564 'PLC_RESERVATION_GRANULARITY',
566 'PLC_OMF_XMPP_SERVER',
568 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
569 fileconf.write('w\n')
570 fileconf.write('q\n')
572 utils.system('cat %s'%tmpname)
573 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
574 utils.system('rm %s'%tmpname)
579 self.run_in_guest('service plc start')
584 self.run_in_guest('service plc stop')
588 "start the PLC vserver"
593 "stop the PLC vserver"
597 # stores the keys from the config for further use
598 def keys_store(self):
599 "stores test users ssh keys in keys/"
600 for key_spec in self.plc_spec['keys']:
601 TestKey(self,key_spec).store_key()
604 def keys_clean(self):
605 "removes keys cached in keys/"
606 utils.system("rm -rf ./keys")
609 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
610 # for later direct access to the nodes
611 def keys_fetch(self):
612 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
614 if not os.path.isdir(dir):
616 vservername=self.vservername
618 prefix = 'debug_ssh_key'
619 for ext in [ 'pub', 'rsa' ] :
620 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
621 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
622 if self.test_ssh.fetch(src,dst) != 0: overall=False
626 "create sites with PLCAPI"
627 return self.do_sites()
629 def delete_sites (self):
630 "delete sites with PLCAPI"
631 return self.do_sites(action="delete")
633 def do_sites (self,action="add"):
634 for site_spec in self.plc_spec['sites']:
635 test_site = TestSite (self,site_spec)
636 if (action != "add"):
637 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
638 test_site.delete_site()
639 # deleted with the site
640 #test_site.delete_users()
643 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
644 test_site.create_site()
645 test_site.create_users()
648 def delete_all_sites (self):
649 "Delete all sites in PLC, and related objects"
650 print 'auth_root',self.auth_root()
651 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
652 for site_id in site_ids:
653 print 'Deleting site_id',site_id
654 self.apiserver.DeleteSite(self.auth_root(),site_id)
658 "create nodes with PLCAPI"
659 return self.do_nodes()
660 def delete_nodes (self):
661 "delete nodes with PLCAPI"
662 return self.do_nodes(action="delete")
664 def do_nodes (self,action="add"):
665 for site_spec in self.plc_spec['sites']:
666 test_site = TestSite (self,site_spec)
668 utils.header("Deleting nodes in site %s"%test_site.name())
669 for node_spec in site_spec['nodes']:
670 test_node=TestNode(self,test_site,node_spec)
671 utils.header("Deleting %s"%test_node.name())
672 test_node.delete_node()
674 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
675 for node_spec in site_spec['nodes']:
676 utils.pprint('Creating node %s'%node_spec,node_spec)
677 test_node = TestNode (self,test_site,node_spec)
678 test_node.create_node ()
681 def nodegroups (self):
682 "create nodegroups with PLCAPI"
683 return self.do_nodegroups("add")
684 def delete_nodegroups (self):
685 "delete nodegroups with PLCAPI"
686 return self.do_nodegroups("delete")
690 def translate_timestamp (start,grain,timestamp):
691 if timestamp < TestPlc.YEAR: return start+timestamp*grain
692 else: return timestamp
695 def timestamp_printable (timestamp):
696 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
699 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
701 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
702 print 'API answered grain=',grain
703 start=(now/grain)*grain
705 # find out all nodes that are reservable
706 nodes=self.all_reservable_nodenames()
708 utils.header ("No reservable node found - proceeding without leases")
711 # attach them to the leases as specified in plc_specs
712 # this is where the 'leases' field gets interpreted as relative of absolute
713 for lease_spec in self.plc_spec['leases']:
714 # skip the ones that come with a null slice id
715 if not lease_spec['slice']: continue
716 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
717 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
718 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
719 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
720 if lease_addition['errors']:
721 utils.header("Cannot create leases, %s"%lease_addition['errors'])
724 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
725 (nodes,lease_spec['slice'],
726 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
727 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
731 def delete_leases (self):
732 "remove all leases in the myplc side"
733 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
734 utils.header("Cleaning leases %r"%lease_ids)
735 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
738 def list_leases (self):
739 "list all leases known to the myplc"
740 leases = self.apiserver.GetLeases(self.auth_root())
743 current=l['t_until']>=now
744 if self.options.verbose or current:
745 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
746 TestPlc.timestamp_printable(l['t_from']),
747 TestPlc.timestamp_printable(l['t_until'])))
750 # create nodegroups if needed, and populate
751 def do_nodegroups (self, action="add"):
752 # 1st pass to scan contents
754 for site_spec in self.plc_spec['sites']:
755 test_site = TestSite (self,site_spec)
756 for node_spec in site_spec['nodes']:
757 test_node=TestNode (self,test_site,node_spec)
758 if node_spec.has_key('nodegroups'):
759 nodegroupnames=node_spec['nodegroups']
760 if isinstance(nodegroupnames,StringTypes):
761 nodegroupnames = [ nodegroupnames ]
762 for nodegroupname in nodegroupnames:
763 if not groups_dict.has_key(nodegroupname):
764 groups_dict[nodegroupname]=[]
765 groups_dict[nodegroupname].append(test_node.name())
766 auth=self.auth_root()
768 for (nodegroupname,group_nodes) in groups_dict.iteritems():
770 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
771 # first, check if the nodetagtype is here
772 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
774 tag_type_id = tag_types[0]['tag_type_id']
776 tag_type_id = self.apiserver.AddTagType(auth,
777 {'tagname':nodegroupname,
778 'description': 'for nodegroup %s'%nodegroupname,
780 print 'located tag (type)',nodegroupname,'as',tag_type_id
782 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
784 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
785 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
786 # set node tag on all nodes, value='yes'
787 for nodename in group_nodes:
789 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
791 traceback.print_exc()
792 print 'node',nodename,'seems to already have tag',nodegroupname
795 expect_yes = self.apiserver.GetNodeTags(auth,
796 {'hostname':nodename,
797 'tagname':nodegroupname},
798 ['value'])[0]['value']
799 if expect_yes != "yes":
800 print 'Mismatch node tag on node',nodename,'got',expect_yes
803 if not self.options.dry_run:
804 print 'Cannot find tag',nodegroupname,'on node',nodename
808 print 'cleaning nodegroup',nodegroupname
809 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
811 traceback.print_exc()
815 # return a list of tuples (nodename,qemuname)
816 def all_node_infos (self) :
818 for site_spec in self.plc_spec['sites']:
819 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
820 for node_spec in site_spec['nodes'] ]
823 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
824 def all_reservable_nodenames (self):
826 for site_spec in self.plc_spec['sites']:
827 for node_spec in site_spec['nodes']:
828 node_fields=node_spec['node_fields']
829 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
830 res.append(node_fields['hostname'])
833 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
834 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
835 if self.options.dry_run:
839 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
840 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
841 # the nodes that haven't checked yet - start with a full list and shrink over time
842 tocheck = self.all_hostnames()
843 utils.header("checking nodes %r"%tocheck)
844 # create a dict hostname -> status
845 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
848 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
850 for array in tocheck_status:
851 hostname=array['hostname']
852 boot_state=array['boot_state']
853 if boot_state == target_boot_state:
854 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
856 # if it's a real node, never mind
857 (site_spec,node_spec)=self.locate_hostname(hostname)
858 if TestNode.is_real_model(node_spec['node_fields']['model']):
859 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
861 boot_state = target_boot_state
862 elif datetime.datetime.now() > graceout:
863 utils.header ("%s still in '%s' state"%(hostname,boot_state))
864 graceout=datetime.datetime.now()+datetime.timedelta(1)
865 status[hostname] = boot_state
867 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
870 if datetime.datetime.now() > timeout:
871 for hostname in tocheck:
872 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
874 # otherwise, sleep for a while
876 # only useful in empty plcs
879 def nodes_booted(self):
880 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
882 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
884 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
885 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
886 vservername=self.vservername
889 local_key = "keys/%(vservername)s-debug.rsa"%locals()
892 local_key = "keys/key1.rsa"
893 node_infos = self.all_node_infos()
894 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
895 for (nodename,qemuname) in node_infos:
896 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
897 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
898 (timeout_minutes,silent_minutes,period))
900 for node_info in node_infos:
901 (hostname,qemuname) = node_info
902 # try to run 'hostname' in the node
903 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
904 # don't spam logs - show the command only after the grace period
905 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
907 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
909 node_infos.remove(node_info)
911 # we will have tried real nodes once, in case they're up - but if not, just skip
912 (site_spec,node_spec)=self.locate_hostname(hostname)
913 if TestNode.is_real_model(node_spec['node_fields']['model']):
914 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
915 node_infos.remove(node_info)
918 if datetime.datetime.now() > timeout:
919 for (hostname,qemuname) in node_infos:
920 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
922 # otherwise, sleep for a while
924 # only useful in empty plcs
927 def ssh_node_debug(self):
928 "Tries to ssh into nodes in debug mode with the debug ssh key"
929 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
931 def ssh_node_boot(self):
932 "Tries to ssh into nodes in production mode with the root ssh key"
933 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
936 def qemu_local_init (self):
937 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
941 "all nodes: invoke GetBootMedium and store result locally"
944 def qemu_local_config (self):
945 "all nodes: compute qemu config qemu.conf and store it locally"
948 def nodestate_reinstall (self):
949 "all nodes: mark PLCAPI boot_state as reinstall"
952 def nodestate_safeboot (self):
953 "all nodes: mark PLCAPI boot_state as safeboot"
956 def nodestate_boot (self):
957 "all nodes: mark PLCAPI boot_state as boot"
960 def nodestate_show (self):
961 "all nodes: show PLCAPI boot_state"
964 def qemu_export (self):
965 "all nodes: push local node-dep directory on the qemu box"
968 ### check hooks : invoke scripts from hooks/{node,slice}
969 def check_hooks_node (self):
970 return self.locate_first_node().check_hooks()
971 def check_hooks_sliver (self) :
972 return self.locate_first_sliver().check_hooks()
974 def check_hooks (self):
975 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
976 return self.check_hooks_node() and self.check_hooks_sliver()
979 def do_check_initscripts(self):
981 for slice_spec in self.plc_spec['slices']:
982 if not slice_spec.has_key('initscriptstamp'):
984 stamp=slice_spec['initscriptstamp']
985 for nodename in slice_spec['nodenames']:
986 (site,node) = self.locate_node (nodename)
987 # xxx - passing the wrong site - probably harmless
988 test_site = TestSite (self,site)
989 test_slice = TestSlice (self,test_site,slice_spec)
990 test_node = TestNode (self,test_site,node)
991 test_sliver = TestSliver (self, test_node, test_slice)
992 if not test_sliver.check_initscript_stamp(stamp):
996 def check_initscripts(self):
997 "check that the initscripts have triggered"
998 return self.do_check_initscripts()
1000 def initscripts (self):
1001 "create initscripts with PLCAPI"
1002 for initscript in self.plc_spec['initscripts']:
1003 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1004 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1007 def delete_initscripts (self):
1008 "delete initscripts with PLCAPI"
1009 for initscript in self.plc_spec['initscripts']:
1010 initscript_name = initscript['initscript_fields']['name']
1011 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1013 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1014 print initscript_name,'deleted'
1016 print 'deletion went wrong - probably did not exist'
1021 "create slices with PLCAPI"
1022 return self.do_slices()
1024 def delete_slices (self):
1025 "delete slices with PLCAPI"
1026 return self.do_slices("delete")
1028 def do_slices (self, action="add"):
1029 for slice in self.plc_spec['slices']:
1030 site_spec = self.locate_site (slice['sitename'])
1031 test_site = TestSite(self,site_spec)
1032 test_slice=TestSlice(self,test_site,slice)
1034 utils.header("Deleting slices in site %s"%test_site.name())
1035 test_slice.delete_slice()
1037 utils.pprint("Creating slice",slice)
1038 test_slice.create_slice()
1039 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1043 def ssh_slice(self):
1044 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1048 def keys_clear_known_hosts (self):
1049 "remove test nodes entries from the local known_hosts file"
1053 def qemu_start (self) :
1054 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1057 def check_tcp (self):
1058 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1059 specs = self.plc_spec['tcp_test']
1064 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1065 if not s_test_sliver.run_tcp_server(port,timeout=10):
1069 # idem for the client side
1070 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1071 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1075 def plcsh_stress_test (self):
1076 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1077 # install the stress-test in the plc image
1078 location = "/usr/share/plc_api/plcsh_stress_test.py"
1079 remote="/vservers/%s/%s"%(self.vservername,location)
1080 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1082 command += " -- --check"
1083 if self.options.size == 1:
1084 command += " --tiny"
1085 return ( self.run_in_guest(command) == 0)
1087 # populate runs the same utility without slightly different options
1088 # in particular runs with --preserve (dont cleanup) and without --check
1089 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1092 def sfa_install(self):
1093 "yum install sfa, sfa-plc and sfa-client"
1095 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1096 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1099 def sfa_dbclean(self):
1100 "thoroughly wipes off the SFA database"
1101 self.run_in_guest("sfa-nuke-plc.py")==0
1104 def sfa_plcclean(self):
1105 "cleans the PLC entries that were created as a side effect of running the script"
1107 sfa_spec=self.plc_spec['sfa']
1109 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1110 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1111 except: print "Slice %s already absent from PLC db"%slicename
1113 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1114 try: self.apiserver.DeletePerson(self.auth_root(),username)
1115 except: print "User %s already absent from PLC db"%username
1117 print "REMEMBER TO RUN sfa_import AGAIN"
1120 def sfa_uninstall(self):
1121 "uses rpm to uninstall sfa - ignore result"
1122 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1123 self.run_in_guest("rm -rf /var/lib/sfa")
1124 self.run_in_guest("rm -rf /etc/sfa")
1125 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1127 self.run_in_guest("rpm -e --noscripts sfa-plc")
1130 ### run unit tests for SFA
1131 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1132 # Running Transaction
1133 # Transaction couldn't start:
1134 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1135 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1136 # no matter how many Gbs are available on the testplc
1137 # could not figure out what's wrong, so...
1138 # if the yum install phase fails, consider the test is successful
1139 # other combinations will eventually run it hopefully
1140 def sfa_utest(self):
1141 "yum install sfa-tests and run SFA unittests"
1142 self.run_in_guest("yum -y install sfa-tests")
1143 # failed to install - forget it
1144 if self.run_in_guest("rpm -q sfa-tests")!=0:
1145 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1147 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1151 dirname="conf.%s"%self.plc_spec['name']
1152 if not os.path.isdir(dirname):
1153 utils.system("mkdir -p %s"%dirname)
1154 if not os.path.isdir(dirname):
1155 raise "Cannot create config dir for plc %s"%self.name()
1158 def conffile(self,filename):
1159 return "%s/%s"%(self.confdir(),filename)
1160 def confsubdir(self,dirname,clean):
1161 subdirname="%s/%s"%(self.confdir(),dirname)
1163 utils.system("rm -rf %s"%subdirname)
1164 if not os.path.isdir(subdirname):
1165 utils.system("mkdir -p %s"%subdirname)
1166 if not os.path.isdir(subdirname):
1167 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1170 def conffile_clean (self,filename):
1171 filename=self.conffile(filename)
1172 return utils.system("rm -rf %s"%filename)==0
1175 def sfa_configure(self):
1176 "run sfa-config-tty"
1177 tmpname=self.conffile("sfa-config-tty")
1178 fileconf=open(tmpname,'w')
1179 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1180 'SFA_INTERFACE_HRN',
1181 # 'SFA_REGISTRY_LEVEL1_AUTH',
1182 'SFA_REGISTRY_HOST',
1183 'SFA_AGGREGATE_HOST',
1189 'SFA_PLC_DB_PASSWORD',
1192 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1193 # the way plc_config handles booleans just sucks..
1194 for var in ['SFA_API_DEBUG']:
1196 if self.plc_spec['sfa'][var]: val='true'
1197 fileconf.write ('e %s\n%s\n'%(var,val))
1198 fileconf.write('w\n')
1199 fileconf.write('R\n')
1200 fileconf.write('q\n')
1202 utils.system('cat %s'%tmpname)
1203 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1206 def aggregate_xml_line(self):
1207 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1208 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1210 def registry_xml_line(self):
1211 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1212 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1215 # a cross step that takes all other plcs in argument
1216 def cross_sfa_configure(self, other_plcs):
1217 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1218 # of course with a single plc, other_plcs is an empty list
1221 agg_fname=self.conffile("agg.xml")
1222 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1223 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1224 utils.header ("(Over)wrote %s"%agg_fname)
1225 reg_fname=self.conffile("reg.xml")
1226 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1227 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1228 utils.header ("(Over)wrote %s"%reg_fname)
1229 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1230 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1232 def sfa_import(self):
1234 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1235 return self.run_in_guest('sfa-import-plc.py')==0
1236 # not needed anymore
1237 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1239 def sfa_start(self):
1241 return self.run_in_guest('service sfa start')==0
1243 def sfi_configure(self):
1244 "Create /root/.sfi on the plc side"
1245 sfa_spec=self.plc_spec['sfa']
1246 "sfi client configuration"
1247 dir_name=self.confsubdir("dot-sfi",clean=True)
1248 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1249 fileconf=open(file_name,'w')
1250 fileconf.write (self.plc_spec['keys'][0]['private'])
1252 utils.header ("(Over)wrote %s"%file_name)
1254 file_name=dir_name + os.sep + 'sfi_config'
1255 fileconf=open(file_name,'w')
1256 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1257 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1258 fileconf.write('\n')
1259 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1260 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1261 fileconf.write('\n')
1262 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1263 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1264 fileconf.write('\n')
1265 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1266 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1267 fileconf.write('\n')
1269 utils.header ("(Over)wrote %s"%file_name)
1271 file_name=dir_name + os.sep + 'person.xml'
1272 fileconf=open(file_name,'w')
1273 for record in sfa_spec['sfa_person_xml']:
1274 person_record=record
1275 fileconf.write(person_record)
1276 fileconf.write('\n')
1278 utils.header ("(Over)wrote %s"%file_name)
1280 file_name=dir_name + os.sep + 'slice.xml'
1281 fileconf=open(file_name,'w')
1282 for record in sfa_spec['sfa_slice_xml']:
1284 #slice_record=sfa_spec['sfa_slice_xml']
1285 fileconf.write(slice_record)
1286 fileconf.write('\n')
1287 utils.header ("(Over)wrote %s"%file_name)
1290 file_name=dir_name + os.sep + 'slice.rspec'
1291 fileconf=open(file_name,'w')
1293 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1295 fileconf.write(slice_rspec)
1296 fileconf.write('\n')
1298 utils.header ("(Over)wrote %s"%file_name)
1300 # push to the remote root's .sfi
1301 location = "root/.sfi"
1302 remote="/vservers/%s/%s"%(self.vservername,location)
1303 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1307 def sfi_clean (self):
1308 "clean up /root/.sfi on the plc side"
1309 self.run_in_guest("rm -rf /root/.sfi")
1312 def sfa_add_user(self):
1313 "run sfi.py add using person.xml"
1314 return TestUserSfa(self).add_user()
1316 def sfa_update_user(self):
1317 "run sfi.py update using person.xml"
1318 return TestUserSfa(self).update_user()
1321 def sfa_add_slice(self):
1322 "run sfi.py add (on Registry) from slice.xml"
1326 def sfa_discover(self):
1327 "discover resources into resouces_in.rspec"
1331 def sfa_create_slice(self):
1332 "run sfi.py create (on SM) - 1st time"
1336 def sfa_update_slice(self):
1337 "run sfi.py create (on SM) on existing object"
1341 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1342 sfa_spec=self.plc_spec['sfa']
1343 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1345 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1346 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1347 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1348 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1351 def ssh_slice_sfa(self):
1352 "tries to ssh-enter the SFA slice"
1355 def sfa_delete_user(self):
1356 "run sfi.py delete (on SM) for user"
1357 test_user_sfa=TestUserSfa(self)
1358 return test_user_sfa.delete_user()
1361 def sfa_delete_slice(self):
1362 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1367 self.run_in_guest('service sfa stop')==0
1370 def populate (self):
1371 "creates random entries in the PLCAPI"
1372 # install the stress-test in the plc image
1373 location = "/usr/share/plc_api/plcsh_stress_test.py"
1374 remote="/vservers/%s/%s"%(self.vservername,location)
1375 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1377 command += " -- --preserve --short-names"
1378 local = (self.run_in_guest(command) == 0);
1379 # second run with --foreign
1380 command += ' --foreign'
1381 remote = (self.run_in_guest(command) == 0);
1382 return ( local and remote)
1384 def gather_logs (self):
1385 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1386 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1387 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1388 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1389 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1390 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1392 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1393 self.gather_var_logs ()
1395 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1396 self.gather_pgsql_logs ()
1398 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1399 for site_spec in self.plc_spec['sites']:
1400 test_site = TestSite (self,site_spec)
1401 for node_spec in site_spec['nodes']:
1402 test_node=TestNode(self,test_site,node_spec)
1403 test_node.gather_qemu_logs()
1405 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1406 self.gather_nodes_var_logs()
1408 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1409 self.gather_slivers_var_logs()
1412 def gather_slivers_var_logs(self):
1413 for test_sliver in self.all_sliver_objs():
1414 remote = test_sliver.tar_var_logs()
1415 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1416 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1417 utils.system(command)
1420 def gather_var_logs (self):
1421 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1422 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1423 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1424 utils.system(command)
1425 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1426 utils.system(command)
1428 def gather_pgsql_logs (self):
1429 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1430 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1431 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1432 utils.system(command)
1434 def gather_nodes_var_logs (self):
1435 for site_spec in self.plc_spec['sites']:
1436 test_site = TestSite (self,site_spec)
1437 for node_spec in site_spec['nodes']:
1438 test_node=TestNode(self,test_site,node_spec)
1439 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1440 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1441 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1442 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1443 utils.system(command)
1446 # returns the filename to use for sql dump/restore, using options.dbname if set
1447 def dbfile (self, database):
1448 # uses options.dbname if it is found
1450 name=self.options.dbname
1451 if not isinstance(name,StringTypes):
1454 t=datetime.datetime.now()
1457 return "/root/%s-%s.sql"%(database,name)
1459 def plc_db_dump(self):
1460 'dump the planetlab5 DB in /root in the PLC - filename has time'
1461 dump=self.dbfile("planetab5")
1462 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1463 utils.header('Dumped planetlab5 database in %s'%dump)
1466 def plc_db_restore(self):
1467 'restore the planetlab5 DB - looks broken, but run -n might help'
1468 dump=self.dbfile("planetab5")
1469 ##stop httpd service
1470 self.run_in_guest('service httpd stop')
1471 # xxx - need another wrapper
1472 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1473 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1474 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1475 ##starting httpd service
1476 self.run_in_guest('service httpd start')
1478 utils.header('Database restored from ' + dump)
1481 def standby_1(): pass
1483 def standby_2(): pass
1485 def standby_3(): pass
1487 def standby_4(): pass
1489 def standby_5(): pass
1491 def standby_6(): pass
1493 def standby_7(): pass
1495 def standby_8(): pass
1497 def standby_9(): pass
1499 def standby_10(): pass
1501 def standby_11(): pass
1503 def standby_12(): pass
1505 def standby_13(): pass
1507 def standby_14(): pass
1509 def standby_15(): pass
1511 def standby_16(): pass
1513 def standby_17(): pass
1515 def standby_18(): pass
1517 def standby_19(): pass
1519 def standby_20(): pass