1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'show', 'local_pre', SEP,
90 'vs_delete','vs_create','plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', 'qemu_export', 'qemu_kill_all', 'qemu_start', SEP,
94 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
95 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
96 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
97 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
101 'ssh_node_boot', 'ssh_slice', 'check_initscripts', SEP,
102 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
103 'check_tcp', 'check_hooks@1', SEP,
104 'force_gather_logs', 'force_local_post', SEP,
108 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
109 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_mine', SEP,
116 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
117 'plc_db_dump' , 'plc_db_restore', SEP,
118 'standby_1 through 20',SEP,
122 def printable_steps (list):
123 single_line=" ".join(list)+" "
124 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
126 def valid_step (step):
127 return step != SEP and step != SEPSFA
129 # turn off the sfa-related steps when build has skipped SFA
130 # this is originally for centos5 as recent SFAs won't build on this platformb
132 def check_whether_build_has_sfa (rpms_url):
133 # warning, we're now building 'sface' so let's be a bit more picky
134 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
135 # full builds are expected to return with 0 here
137 # move all steps containing 'sfa' from default_steps to other_steps
138 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
139 TestPlc.other_steps += sfa_steps
140 for step in sfa_steps: TestPlc.default_steps.remove(step)
142 def __init__ (self,plc_spec,options):
143 self.plc_spec=plc_spec
145 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
152 raise Exception,'chroot-based myplc testing is deprecated'
153 self.apiserver=TestApiserver(self.url,options.dry_run)
156 name=self.plc_spec['name']
157 return "%s.%s"%(name,self.vservername)
160 return self.plc_spec['hostname']
163 return self.test_ssh.is_local()
165 # define the API methods on this object through xmlrpc
166 # would help, but not strictly necessary
170 def actual_command_in_guest (self,command):
171 return self.test_ssh.actual_command(self.host_to_guest(command))
173 def start_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
176 def stop_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
179 def run_in_guest (self,command):
180 return utils.system(self.actual_command_in_guest(command))
182 def run_in_host (self,command):
183 return self.test_ssh.run_in_buildname(command)
185 #command gets run in the vserver
186 def host_to_guest(self,command):
187 return "vserver %s exec %s"%(self.vservername,command)
189 #start/stop the vserver
190 def start_guest_in_host(self):
191 return "vserver %s start"%(self.vservername)
193 def stop_guest_in_host(self):
194 return "vserver %s stop"%(self.vservername)
197 def run_in_guest_piped (self,local,remote):
198 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
200 def auth_root (self):
201 return {'Username':self.plc_spec['PLC_ROOT_USER'],
202 'AuthMethod':'password',
203 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
204 'Role' : self.plc_spec['role']
206 def locate_site (self,sitename):
207 for site in self.plc_spec['sites']:
208 if site['site_fields']['name'] == sitename:
210 if site['site_fields']['login_base'] == sitename:
212 raise Exception,"Cannot locate site %s"%sitename
214 def locate_node (self,nodename):
215 for site in self.plc_spec['sites']:
216 for node in site['nodes']:
217 if node['name'] == nodename:
219 raise Exception,"Cannot locate node %s"%nodename
221 def locate_hostname (self,hostname):
222 for site in self.plc_spec['sites']:
223 for node in site['nodes']:
224 if node['node_fields']['hostname'] == hostname:
226 raise Exception,"Cannot locate hostname %s"%hostname
228 def locate_key (self,keyname):
229 for key in self.plc_spec['keys']:
230 if key['name'] == keyname:
232 raise Exception,"Cannot locate key %s"%keyname
234 def locate_slice (self, slicename):
235 for slice in self.plc_spec['slices']:
236 if slice['slice_fields']['name'] == slicename:
238 raise Exception,"Cannot locate slice %s"%slicename
240 def all_sliver_objs (self):
242 for slice_spec in self.plc_spec['slices']:
243 slicename = slice_spec['slice_fields']['name']
244 for nodename in slice_spec['nodenames']:
245 result.append(self.locate_sliver_obj (nodename,slicename))
248 def locate_sliver_obj (self,nodename,slicename):
249 (site,node) = self.locate_node(nodename)
250 slice = self.locate_slice (slicename)
252 test_site = TestSite (self, site)
253 test_node = TestNode (self, test_site,node)
254 # xxx the slice site is assumed to be the node site - mhh - probably harmless
255 test_slice = TestSlice (self, test_site, slice)
256 return TestSliver (self, test_node, test_slice)
258 def locate_first_node(self):
259 nodename=self.plc_spec['slices'][0]['nodenames'][0]
260 (site,node) = self.locate_node(nodename)
261 test_site = TestSite (self, site)
262 test_node = TestNode (self, test_site,node)
265 def locate_first_sliver (self):
266 slice_spec=self.plc_spec['slices'][0]
267 slicename=slice_spec['slice_fields']['name']
268 nodename=slice_spec['nodenames'][0]
269 return self.locate_sliver_obj(nodename,slicename)
271 # all different hostboxes used in this plc
272 def gather_hostBoxes(self):
273 # maps on sites and nodes, return [ (host_box,test_node) ]
275 for site_spec in self.plc_spec['sites']:
276 test_site = TestSite (self,site_spec)
277 for node_spec in site_spec['nodes']:
278 test_node = TestNode (self, test_site, node_spec)
279 if not test_node.is_real():
280 tuples.append( (test_node.host_box(),test_node) )
281 # transform into a dict { 'host_box' -> [ test_node .. ] }
283 for (box,node) in tuples:
284 if not result.has_key(box):
287 result[box].append(node)
290 # a step for checking this stuff
291 def show_boxes (self):
292 'print summary of nodes location'
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 print box,":"," + ".join( [ node.name() for node in nodes ] )
297 # make this a valid step
298 def qemu_kill_all(self):
299 'kill all qemu instances on the qemu boxes involved by this setup'
300 # this is the brute force version, kill all qemus on that host box
301 for (box,nodes) in self.gather_hostBoxes().iteritems():
302 # pass the first nodename, as we don't push template-qemu on testboxes
303 nodedir=nodes[0].nodedir()
304 TestBox(box,self.options.buildname).qemu_kill_all(nodedir)
307 # make this a valid step
308 def qemu_list_all(self):
309 'list all qemu instances on the qemu boxes involved by this setup'
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # this is the brute force version, kill all qemus on that host box
312 TestBox(box,self.options.buildname).qemu_list_all()
315 # kill only the right qemus
316 def qemu_list_mine(self):
317 'list qemu instances for our nodes'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 # the fine-grain version
324 # kill only the right qemus
325 def qemu_kill_mine(self):
326 'kill the qemu instances for our nodes'
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # the fine-grain version
333 #################### display config
335 "show test configuration after localization"
336 self.display_pass (1)
337 self.display_pass (2)
341 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
342 def display_pass (self,passno):
343 for (key,val) in self.plc_spec.iteritems():
344 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
348 self.display_site_spec(site)
349 for node in site['nodes']:
350 self.display_node_spec(node)
351 elif key=='initscripts':
352 for initscript in val:
353 self.display_initscript_spec (initscript)
356 self.display_slice_spec (slice)
359 self.display_key_spec (key)
361 if key not in ['sites','initscripts','slices','keys', 'sfa']:
362 print '+ ',key,':',val
364 def display_site_spec (self,site):
365 print '+ ======== site',site['site_fields']['name']
366 for (k,v) in site.iteritems():
367 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
370 print '+ ','nodes : ',
372 print node['node_fields']['hostname'],'',
378 print user['name'],'',
380 elif k == 'site_fields':
381 print '+ login_base',':',v['login_base']
382 elif k == 'address_fields':
388 def display_initscript_spec (self,initscript):
389 print '+ ======== initscript',initscript['initscript_fields']['name']
391 def display_key_spec (self,key):
392 print '+ ======== key',key['name']
394 def display_slice_spec (self,slice):
395 print '+ ======== slice',slice['slice_fields']['name']
396 for (k,v) in slice.iteritems():
409 elif k=='slice_fields':
410 print '+ fields',':',
411 print 'max_nodes=',v['max_nodes'],
416 def display_node_spec (self,node):
417 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
418 print "hostname=",node['node_fields']['hostname'],
419 print "ip=",node['interface_fields']['ip']
420 if self.options.verbose:
421 utils.pprint("node details",node,depth=3)
423 # another entry point for just showing the boxes involved
424 def display_mapping (self):
425 TestPlc.display_mapping_plc(self.plc_spec)
429 def display_mapping_plc (plc_spec):
430 print '+ MyPLC',plc_spec['name']
431 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
432 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
433 for site_spec in plc_spec['sites']:
434 for node_spec in site_spec['nodes']:
435 TestPlc.display_mapping_node(node_spec)
438 def display_mapping_node (node_spec):
439 print '+ NODE %s'%(node_spec['name'])
440 print '+\tqemu box %s'%node_spec['host_box']
441 print '+\thostname=%s'%node_spec['node_fields']['hostname']
443 def local_pre (self):
444 "run site-dependant pre-test script as defined in LocalTestResources"
445 from LocalTestResources import local_resources
446 return local_resources.step_pre(self)
448 def local_post (self):
449 "run site-dependant post-test script as defined in LocalTestResources"
450 from LocalTestResources import local_resources
451 return local_resources.step_post(self)
453 def local_list (self):
454 "run site-dependant list script as defined in LocalTestResources"
455 from LocalTestResources import local_resources
456 return local_resources.step_list(self)
458 def local_rel (self):
459 "run site-dependant release script as defined in LocalTestResources"
460 from LocalTestResources import local_resources
461 return local_resources.step_release(self)
463 def local_rel_plc (self):
464 "run site-dependant release script as defined in LocalTestResources"
465 from LocalTestResources import local_resources
466 return local_resources.step_release_plc(self)
468 def local_rel_qemu (self):
469 "run site-dependant release script as defined in LocalTestResources"
470 from LocalTestResources import local_resources
471 return local_resources.step_release_qemu(self)
474 "vserver delete the test myplc"
475 self.run_in_host("vserver --silent %s delete"%self.vservername)
479 # historically the build was being fetched by the tests
480 # now the build pushes itself as a subdir of the tests workdir
481 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
482 def vs_create (self):
483 "vserver creation (no install done)"
484 # push the local build/ dir to the testplc box
486 # a full path for the local calls
487 build_dir=os.path.dirname(sys.argv[0])
488 # sometimes this is empty - set to "." in such a case
489 if not build_dir: build_dir="."
490 build_dir += "/build"
492 # use a standard name - will be relative to remote buildname
494 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
495 self.test_ssh.rmdir(build_dir)
496 self.test_ssh.copy(build_dir,recursive=True)
497 # the repo url is taken from arch-rpms-url
498 # with the last step (i386) removed
499 repo_url = self.options.arch_rpms_url
500 for level in [ 'arch' ]:
501 repo_url = os.path.dirname(repo_url)
502 # pass the vbuild-nightly options to vtest-init-vserver
504 test_env_options += " -p %s"%self.options.personality
505 test_env_options += " -d %s"%self.options.pldistro
506 test_env_options += " -f %s"%self.options.fcdistro
507 script="vtest-init-vserver.sh"
508 vserver_name = self.vservername
509 vserver_options="--netdev eth0 --interface %s"%self.vserverip
511 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
512 vserver_options += " --hostname %s"%vserver_hostname
514 print "Cannot reverse lookup %s"%self.vserverip
515 print "This is considered fatal, as this might pollute the test results"
517 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
518 return self.run_in_host(create_vserver) == 0
521 def plc_install(self):
522 "yum install myplc, noderepo, and the plain bootstrapfs"
524 # workaround for getting pgsql8.2 on centos5
525 if self.options.fcdistro == "centos5":
526 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
529 if self.options.personality == "linux32":
531 elif self.options.personality == "linux64":
534 raise Exception, "Unsupported personality %r"%self.options.personality
535 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
538 pkgs_list.append ("slicerepo-%s"%nodefamily)
539 pkgs_list.append ("myplc")
540 pkgs_list.append ("noderepo-%s"%nodefamily)
541 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
542 pkgs_string=" ".join(pkgs_list)
543 self.run_in_guest("yum -y install %s"%pkgs_string)
544 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
547 def plc_configure(self):
549 tmpname='%s.plc-config-tty'%(self.name())
550 fileconf=open(tmpname,'w')
551 for var in [ 'PLC_NAME',
556 'PLC_MAIL_SUPPORT_ADDRESS',
559 # Above line was added for integrating SFA Testing
565 'PLC_RESERVATION_GRANULARITY',
567 'PLC_OMF_XMPP_SERVER',
569 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
570 fileconf.write('w\n')
571 fileconf.write('q\n')
573 utils.system('cat %s'%tmpname)
574 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
575 utils.system('rm %s'%tmpname)
580 self.run_in_guest('service plc start')
585 self.run_in_guest('service plc stop')
589 "start the PLC vserver"
594 "stop the PLC vserver"
598 # stores the keys from the config for further use
599 def keys_store(self):
600 "stores test users ssh keys in keys/"
601 for key_spec in self.plc_spec['keys']:
602 TestKey(self,key_spec).store_key()
605 def keys_clean(self):
606 "removes keys cached in keys/"
607 utils.system("rm -rf ./keys")
610 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
611 # for later direct access to the nodes
612 def keys_fetch(self):
613 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
615 if not os.path.isdir(dir):
617 vservername=self.vservername
619 prefix = 'debug_ssh_key'
620 for ext in [ 'pub', 'rsa' ] :
621 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
622 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
623 if self.test_ssh.fetch(src,dst) != 0: overall=False
627 "create sites with PLCAPI"
628 return self.do_sites()
630 def delete_sites (self):
631 "delete sites with PLCAPI"
632 return self.do_sites(action="delete")
634 def do_sites (self,action="add"):
635 for site_spec in self.plc_spec['sites']:
636 test_site = TestSite (self,site_spec)
637 if (action != "add"):
638 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
639 test_site.delete_site()
640 # deleted with the site
641 #test_site.delete_users()
644 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
645 test_site.create_site()
646 test_site.create_users()
649 def delete_all_sites (self):
650 "Delete all sites in PLC, and related objects"
651 print 'auth_root',self.auth_root()
652 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
653 for site_id in site_ids:
654 print 'Deleting site_id',site_id
655 self.apiserver.DeleteSite(self.auth_root(),site_id)
659 "create nodes with PLCAPI"
660 return self.do_nodes()
661 def delete_nodes (self):
662 "delete nodes with PLCAPI"
663 return self.do_nodes(action="delete")
665 def do_nodes (self,action="add"):
666 for site_spec in self.plc_spec['sites']:
667 test_site = TestSite (self,site_spec)
669 utils.header("Deleting nodes in site %s"%test_site.name())
670 for node_spec in site_spec['nodes']:
671 test_node=TestNode(self,test_site,node_spec)
672 utils.header("Deleting %s"%test_node.name())
673 test_node.delete_node()
675 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
676 for node_spec in site_spec['nodes']:
677 utils.pprint('Creating node %s'%node_spec,node_spec)
678 test_node = TestNode (self,test_site,node_spec)
679 test_node.create_node ()
682 def nodegroups (self):
683 "create nodegroups with PLCAPI"
684 return self.do_nodegroups("add")
685 def delete_nodegroups (self):
686 "delete nodegroups with PLCAPI"
687 return self.do_nodegroups("delete")
691 def translate_timestamp (start,grain,timestamp):
692 if timestamp < TestPlc.YEAR: return start+timestamp*grain
693 else: return timestamp
696 def timestamp_printable (timestamp):
697 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
700 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
702 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
703 print 'API answered grain=',grain
704 start=(now/grain)*grain
706 # find out all nodes that are reservable
707 nodes=self.all_reservable_nodenames()
709 utils.header ("No reservable node found - proceeding without leases")
712 # attach them to the leases as specified in plc_specs
713 # this is where the 'leases' field gets interpreted as relative of absolute
714 for lease_spec in self.plc_spec['leases']:
715 # skip the ones that come with a null slice id
716 if not lease_spec['slice']: continue
717 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
718 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
719 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
720 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
721 if lease_addition['errors']:
722 utils.header("Cannot create leases, %s"%lease_addition['errors'])
725 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
726 (nodes,lease_spec['slice'],
727 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
728 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
732 def delete_leases (self):
733 "remove all leases in the myplc side"
734 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
735 utils.header("Cleaning leases %r"%lease_ids)
736 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
739 def list_leases (self):
740 "list all leases known to the myplc"
741 leases = self.apiserver.GetLeases(self.auth_root())
744 current=l['t_until']>=now
745 if self.options.verbose or current:
746 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
747 TestPlc.timestamp_printable(l['t_from']),
748 TestPlc.timestamp_printable(l['t_until'])))
751 # create nodegroups if needed, and populate
752 def do_nodegroups (self, action="add"):
753 # 1st pass to scan contents
755 for site_spec in self.plc_spec['sites']:
756 test_site = TestSite (self,site_spec)
757 for node_spec in site_spec['nodes']:
758 test_node=TestNode (self,test_site,node_spec)
759 if node_spec.has_key('nodegroups'):
760 nodegroupnames=node_spec['nodegroups']
761 if isinstance(nodegroupnames,StringTypes):
762 nodegroupnames = [ nodegroupnames ]
763 for nodegroupname in nodegroupnames:
764 if not groups_dict.has_key(nodegroupname):
765 groups_dict[nodegroupname]=[]
766 groups_dict[nodegroupname].append(test_node.name())
767 auth=self.auth_root()
769 for (nodegroupname,group_nodes) in groups_dict.iteritems():
771 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
772 # first, check if the nodetagtype is here
773 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
775 tag_type_id = tag_types[0]['tag_type_id']
777 tag_type_id = self.apiserver.AddTagType(auth,
778 {'tagname':nodegroupname,
779 'description': 'for nodegroup %s'%nodegroupname,
781 print 'located tag (type)',nodegroupname,'as',tag_type_id
783 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
785 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
786 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
787 # set node tag on all nodes, value='yes'
788 for nodename in group_nodes:
790 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
792 traceback.print_exc()
793 print 'node',nodename,'seems to already have tag',nodegroupname
796 expect_yes = self.apiserver.GetNodeTags(auth,
797 {'hostname':nodename,
798 'tagname':nodegroupname},
799 ['value'])[0]['value']
800 if expect_yes != "yes":
801 print 'Mismatch node tag on node',nodename,'got',expect_yes
804 if not self.options.dry_run:
805 print 'Cannot find tag',nodegroupname,'on node',nodename
809 print 'cleaning nodegroup',nodegroupname
810 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
812 traceback.print_exc()
816 # return a list of tuples (nodename,qemuname)
817 def all_node_infos (self) :
819 for site_spec in self.plc_spec['sites']:
820 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
821 for node_spec in site_spec['nodes'] ]
824 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
825 def all_reservable_nodenames (self):
827 for site_spec in self.plc_spec['sites']:
828 for node_spec in site_spec['nodes']:
829 node_fields=node_spec['node_fields']
830 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
831 res.append(node_fields['hostname'])
834 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
835 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
836 if self.options.dry_run:
840 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
841 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
842 # the nodes that haven't checked yet - start with a full list and shrink over time
843 tocheck = self.all_hostnames()
844 utils.header("checking nodes %r"%tocheck)
845 # create a dict hostname -> status
846 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
849 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
851 for array in tocheck_status:
852 hostname=array['hostname']
853 boot_state=array['boot_state']
854 if boot_state == target_boot_state:
855 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
857 # if it's a real node, never mind
858 (site_spec,node_spec)=self.locate_hostname(hostname)
859 if TestNode.is_real_model(node_spec['node_fields']['model']):
860 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
862 boot_state = target_boot_state
863 elif datetime.datetime.now() > graceout:
864 utils.header ("%s still in '%s' state"%(hostname,boot_state))
865 graceout=datetime.datetime.now()+datetime.timedelta(1)
866 status[hostname] = boot_state
868 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
871 if datetime.datetime.now() > timeout:
872 for hostname in tocheck:
873 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
875 # otherwise, sleep for a while
877 # only useful in empty plcs
880 def nodes_booted(self):
881 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
883 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
885 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
886 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
887 vservername=self.vservername
890 local_key = "keys/%(vservername)s-debug.rsa"%locals()
893 local_key = "keys/key1.rsa"
894 node_infos = self.all_node_infos()
895 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
896 for (nodename,qemuname) in node_infos:
897 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
898 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
899 (timeout_minutes,silent_minutes,period))
901 for node_info in node_infos:
902 (hostname,qemuname) = node_info
903 # try to run 'hostname' in the node
904 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
905 # don't spam logs - show the command only after the grace period
906 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
908 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
910 node_infos.remove(node_info)
912 # we will have tried real nodes once, in case they're up - but if not, just skip
913 (site_spec,node_spec)=self.locate_hostname(hostname)
914 if TestNode.is_real_model(node_spec['node_fields']['model']):
915 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
916 node_infos.remove(node_info)
919 if datetime.datetime.now() > timeout:
920 for (hostname,qemuname) in node_infos:
921 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
923 # otherwise, sleep for a while
925 # only useful in empty plcs
928 def ssh_node_debug(self):
929 "Tries to ssh into nodes in debug mode with the debug ssh key"
930 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
932 def ssh_node_boot(self):
933 "Tries to ssh into nodes in production mode with the root ssh key"
934 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
937 def qemu_local_init (self):
938 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
942 "all nodes: invoke GetBootMedium and store result locally"
945 def qemu_local_config (self):
946 "all nodes: compute qemu config qemu.conf and store it locally"
949 def nodestate_reinstall (self):
950 "all nodes: mark PLCAPI boot_state as reinstall"
953 def nodestate_safeboot (self):
954 "all nodes: mark PLCAPI boot_state as safeboot"
957 def nodestate_boot (self):
958 "all nodes: mark PLCAPI boot_state as boot"
961 def nodestate_show (self):
962 "all nodes: show PLCAPI boot_state"
965 def qemu_export (self):
966 "all nodes: push local node-dep directory on the qemu box"
969 ### check hooks : invoke scripts from hooks/{node,slice}
970 def check_hooks_node (self):
971 return self.locate_first_node().check_hooks()
972 def check_hooks_sliver (self) :
973 return self.locate_first_sliver().check_hooks()
975 def check_hooks (self):
976 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
977 return self.check_hooks_node() and self.check_hooks_sliver()
980 def do_check_initscripts(self):
982 for slice_spec in self.plc_spec['slices']:
983 if not slice_spec.has_key('initscriptstamp'):
985 stamp=slice_spec['initscriptstamp']
986 for nodename in slice_spec['nodenames']:
987 (site,node) = self.locate_node (nodename)
988 # xxx - passing the wrong site - probably harmless
989 test_site = TestSite (self,site)
990 test_slice = TestSlice (self,test_site,slice_spec)
991 test_node = TestNode (self,test_site,node)
992 test_sliver = TestSliver (self, test_node, test_slice)
993 if not test_sliver.check_initscript_stamp(stamp):
997 def check_initscripts(self):
998 "check that the initscripts have triggered"
999 return self.do_check_initscripts()
1001 def initscripts (self):
1002 "create initscripts with PLCAPI"
1003 for initscript in self.plc_spec['initscripts']:
1004 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1005 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1008 def delete_initscripts (self):
1009 "delete initscripts with PLCAPI"
1010 for initscript in self.plc_spec['initscripts']:
1011 initscript_name = initscript['initscript_fields']['name']
1012 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1014 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1015 print initscript_name,'deleted'
1017 print 'deletion went wrong - probably did not exist'
1022 "create slices with PLCAPI"
1023 return self.do_slices()
1025 def delete_slices (self):
1026 "delete slices with PLCAPI"
1027 return self.do_slices("delete")
1029 def do_slices (self, action="add"):
1030 for slice in self.plc_spec['slices']:
1031 site_spec = self.locate_site (slice['sitename'])
1032 test_site = TestSite(self,site_spec)
1033 test_slice=TestSlice(self,test_site,slice)
1035 utils.header("Deleting slices in site %s"%test_site.name())
1036 test_slice.delete_slice()
1038 utils.pprint("Creating slice",slice)
1039 test_slice.create_slice()
1040 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1044 def ssh_slice(self):
1045 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1049 def keys_clear_known_hosts (self):
1050 "remove test nodes entries from the local known_hosts file"
1054 def qemu_start (self) :
1055 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1058 def check_tcp (self):
1059 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1060 specs = self.plc_spec['tcp_test']
1065 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1066 if not s_test_sliver.run_tcp_server(port,timeout=10):
1070 # idem for the client side
1071 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1072 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1076 def plcsh_stress_test (self):
1077 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1078 # install the stress-test in the plc image
1079 location = "/usr/share/plc_api/plcsh_stress_test.py"
1080 remote="/vservers/%s/%s"%(self.vservername,location)
1081 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1083 command += " -- --check"
1084 if self.options.size == 1:
1085 command += " --tiny"
1086 return ( self.run_in_guest(command) == 0)
1088 # populate runs the same utility without slightly different options
1089 # in particular runs with --preserve (dont cleanup) and without --check
1090 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1093 def sfa_install(self):
1094 "yum install sfa, sfa-plc and sfa-client"
1096 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1097 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1100 def sfa_dbclean(self):
1101 "thoroughly wipes off the SFA database"
1102 self.run_in_guest("sfa-nuke-plc.py")==0
1105 def sfa_plcclean(self):
1106 "cleans the PLC entries that were created as a side effect of running the script"
1108 sfa_spec=self.plc_spec['sfa']
1110 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1111 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1112 except: print "Slice %s already absent from PLC db"%slicename
1114 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1115 try: self.apiserver.DeletePerson(self.auth_root(),username)
1116 except: print "User %s already absent from PLC db"%username
1118 print "REMEMBER TO RUN sfa_import AGAIN"
1121 def sfa_uninstall(self):
1122 "uses rpm to uninstall sfa - ignore result"
1123 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1124 self.run_in_guest("rm -rf /var/lib/sfa")
1125 self.run_in_guest("rm -rf /etc/sfa")
1126 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1128 self.run_in_guest("rpm -e --noscripts sfa-plc")
1131 ### run unit tests for SFA
1132 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1133 # Running Transaction
1134 # Transaction couldn't start:
1135 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1136 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1137 # no matter how many Gbs are available on the testplc
1138 # could not figure out what's wrong, so...
1139 # if the yum install phase fails, consider the test is successful
1140 # other combinations will eventually run it hopefully
1141 def sfa_utest(self):
1142 "yum install sfa-tests and run SFA unittests"
1143 self.run_in_guest("yum -y install sfa-tests")
1144 # failed to install - forget it
1145 if self.run_in_guest("rpm -q sfa-tests")!=0:
1146 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1148 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1152 dirname="conf.%s"%self.plc_spec['name']
1153 if not os.path.isdir(dirname):
1154 utils.system("mkdir -p %s"%dirname)
1155 if not os.path.isdir(dirname):
1156 raise "Cannot create config dir for plc %s"%self.name()
1159 def conffile(self,filename):
1160 return "%s/%s"%(self.confdir(),filename)
1161 def confsubdir(self,dirname,clean):
1162 subdirname="%s/%s"%(self.confdir(),dirname)
1164 utils.system("rm -rf %s"%subdirname)
1165 if not os.path.isdir(subdirname):
1166 utils.system("mkdir -p %s"%subdirname)
1167 if not os.path.isdir(subdirname):
1168 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1171 def conffile_clean (self,filename):
1172 filename=self.conffile(filename)
1173 return utils.system("rm -rf %s"%filename)==0
1176 def sfa_configure(self):
1177 "run sfa-config-tty"
1178 tmpname=self.conffile("sfa-config-tty")
1179 fileconf=open(tmpname,'w')
1180 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1181 'SFA_INTERFACE_HRN',
1182 # 'SFA_REGISTRY_LEVEL1_AUTH',
1183 'SFA_REGISTRY_HOST',
1184 'SFA_AGGREGATE_HOST',
1190 'SFA_PLC_DB_PASSWORD',
1193 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1194 # the way plc_config handles booleans just sucks..
1195 for var in ['SFA_API_DEBUG']:
1197 if self.plc_spec['sfa'][var]: val='true'
1198 fileconf.write ('e %s\n%s\n'%(var,val))
1199 fileconf.write('w\n')
1200 fileconf.write('R\n')
1201 fileconf.write('q\n')
1203 utils.system('cat %s'%tmpname)
1204 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1207 def aggregate_xml_line(self):
1208 return '<aggregate addr="%s" hrn="%s" port="12347"/>' % \
1209 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1211 def registry_xml_line(self):
1212 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1213 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1216 # a cross step that takes all other plcs in argument
1217 def cross_sfa_configure(self, other_plcs):
1218 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1219 # of course with a single plc, other_plcs is an empty list
1222 agg_fname=self.conffile("agg.xml")
1223 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1224 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1225 utils.header ("(Over)wrote %s"%agg_fname)
1226 reg_fname=self.conffile("reg.xml")
1227 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1228 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1229 utils.header ("(Over)wrote %s"%reg_fname)
1230 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1231 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1233 def sfa_import(self):
1235 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1236 return self.run_in_guest('sfa-import-plc.py')==0
1237 # not needed anymore
1238 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1240 def sfa_start(self):
1242 return self.run_in_guest('service sfa start')==0
1244 def sfi_configure(self):
1245 "Create /root/.sfi on the plc side"
1246 sfa_spec=self.plc_spec['sfa']
1247 "sfi client configuration"
1248 dir_name=self.confsubdir("dot-sfi",clean=True)
1249 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1250 fileconf=open(file_name,'w')
1251 fileconf.write (self.plc_spec['keys'][0]['private'])
1253 utils.header ("(Over)wrote %s"%file_name)
1255 file_name=dir_name + os.sep + 'sfi_config'
1256 fileconf=open(file_name,'w')
1257 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1258 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1259 fileconf.write('\n')
1260 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1261 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1262 fileconf.write('\n')
1263 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1264 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1265 fileconf.write('\n')
1266 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1267 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1268 fileconf.write('\n')
1270 utils.header ("(Over)wrote %s"%file_name)
1272 file_name=dir_name + os.sep + 'person.xml'
1273 fileconf=open(file_name,'w')
1274 for record in sfa_spec['sfa_person_xml']:
1275 person_record=record
1276 fileconf.write(person_record)
1277 fileconf.write('\n')
1279 utils.header ("(Over)wrote %s"%file_name)
1281 file_name=dir_name + os.sep + 'slice.xml'
1282 fileconf=open(file_name,'w')
1283 for record in sfa_spec['sfa_slice_xml']:
1285 #slice_record=sfa_spec['sfa_slice_xml']
1286 fileconf.write(slice_record)
1287 fileconf.write('\n')
1288 utils.header ("(Over)wrote %s"%file_name)
1291 file_name=dir_name + os.sep + 'slice.rspec'
1292 fileconf=open(file_name,'w')
1294 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1296 fileconf.write(slice_rspec)
1297 fileconf.write('\n')
1299 utils.header ("(Over)wrote %s"%file_name)
1301 # push to the remote root's .sfi
1302 location = "root/.sfi"
1303 remote="/vservers/%s/%s"%(self.vservername,location)
1304 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1308 def sfi_clean (self):
1309 "clean up /root/.sfi on the plc side"
1310 self.run_in_guest("rm -rf /root/.sfi")
1313 def sfa_add_user(self):
1314 "run sfi.py add using person.xml"
1315 return TestUserSfa(self).add_user()
1317 def sfa_update_user(self):
1318 "run sfi.py update using person.xml"
1319 return TestUserSfa(self).update_user()
1322 def sfa_add_slice(self):
1323 "run sfi.py add (on Registry) from slice.xml"
1327 def sfa_discover(self):
1328 "discover resources into resouces_in.rspec"
1332 def sfa_create_slice(self):
1333 "run sfi.py create (on SM) - 1st time"
1337 def sfa_check_slice_plc(self):
1338 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1342 def sfa_update_slice(self):
1343 "run sfi.py create (on SM) on existing object"
1347 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1348 sfa_spec=self.plc_spec['sfa']
1349 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1351 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1352 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1353 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1354 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1357 def ssh_slice_sfa(self):
1358 "tries to ssh-enter the SFA slice"
1361 def sfa_delete_user(self):
1362 "run sfi.py delete (on SM) for user"
1363 test_user_sfa=TestUserSfa(self)
1364 return test_user_sfa.delete_user()
1367 def sfa_delete_slice(self):
1368 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1373 self.run_in_guest('service sfa stop')==0
1376 def populate (self):
1377 "creates random entries in the PLCAPI"
1378 # install the stress-test in the plc image
1379 location = "/usr/share/plc_api/plcsh_stress_test.py"
1380 remote="/vservers/%s/%s"%(self.vservername,location)
1381 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1383 command += " -- --preserve --short-names"
1384 local = (self.run_in_guest(command) == 0);
1385 # second run with --foreign
1386 command += ' --foreign'
1387 remote = (self.run_in_guest(command) == 0);
1388 return ( local and remote)
1390 def gather_logs (self):
1391 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1392 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1393 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1394 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1395 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1396 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1398 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1399 self.gather_var_logs ()
1401 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1402 self.gather_pgsql_logs ()
1404 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1405 for site_spec in self.plc_spec['sites']:
1406 test_site = TestSite (self,site_spec)
1407 for node_spec in site_spec['nodes']:
1408 test_node=TestNode(self,test_site,node_spec)
1409 test_node.gather_qemu_logs()
1411 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1412 self.gather_nodes_var_logs()
1414 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1415 self.gather_slivers_var_logs()
1418 def gather_slivers_var_logs(self):
1419 for test_sliver in self.all_sliver_objs():
1420 remote = test_sliver.tar_var_logs()
1421 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1422 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1423 utils.system(command)
1426 def gather_var_logs (self):
1427 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1428 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1429 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1430 utils.system(command)
1431 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1432 utils.system(command)
1434 def gather_pgsql_logs (self):
1435 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1436 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1437 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1438 utils.system(command)
1440 def gather_nodes_var_logs (self):
1441 for site_spec in self.plc_spec['sites']:
1442 test_site = TestSite (self,site_spec)
1443 for node_spec in site_spec['nodes']:
1444 test_node=TestNode(self,test_site,node_spec)
1445 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1446 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1447 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1448 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1449 utils.system(command)
1452 # returns the filename to use for sql dump/restore, using options.dbname if set
1453 def dbfile (self, database):
1454 # uses options.dbname if it is found
1456 name=self.options.dbname
1457 if not isinstance(name,StringTypes):
1460 t=datetime.datetime.now()
1463 return "/root/%s-%s.sql"%(database,name)
1465 def plc_db_dump(self):
1466 'dump the planetlab5 DB in /root in the PLC - filename has time'
1467 dump=self.dbfile("planetab5")
1468 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1469 utils.header('Dumped planetlab5 database in %s'%dump)
1472 def plc_db_restore(self):
1473 'restore the planetlab5 DB - looks broken, but run -n might help'
1474 dump=self.dbfile("planetab5")
1475 ##stop httpd service
1476 self.run_in_guest('service httpd stop')
1477 # xxx - need another wrapper
1478 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1479 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1480 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1481 ##starting httpd service
1482 self.run_in_guest('service httpd start')
1484 utils.header('Database restored from ' + dump)
1487 def standby_1(): pass
1489 def standby_2(): pass
1491 def standby_3(): pass
1493 def standby_4(): pass
1495 def standby_5(): pass
1497 def standby_6(): pass
1499 def standby_7(): pass
1501 def standby_8(): pass
1503 def standby_9(): pass
1505 def standby_10(): pass
1507 def standby_11(): pass
1509 def standby_12(): pass
1511 def standby_13(): pass
1513 def standby_14(): pass
1515 def standby_15(): pass
1517 def standby_16(): pass
1519 def standby_17(): pass
1521 def standby_18(): pass
1523 def standby_19(): pass
1525 def standby_20(): pass