1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
90 'vs_delete','timestamp_vs','vs_create', SEP,
91 'plc_install', 'plc_configure', 'plc_start', SEP,
92 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
93 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
94 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
95 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
96 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
97 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
98 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
99 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
100 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
101 # but as the stress test might take a while, we sometimes missed the debug mode..
102 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
103 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
104 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
106 'force_gather_logs', SEP,
111 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
112 'plc_stop', 'vs_start', 'vs_stop', SEP,
113 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
114 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
115 'delete_leases', 'list_leases', SEP,
117 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
118 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
119 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
120 'plc_db_dump' , 'plc_db_restore', SEP,
121 'standby_1 through 20',SEP,
125 def printable_steps (list):
126 single_line=" ".join(list)+" "
127 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
129 def valid_step (step):
130 return step != SEP and step != SEPSFA
132 # turn off the sfa-related steps when build has skipped SFA
133 # this is originally for centos5 as recent SFAs won't build on this platformb
135 def check_whether_build_has_sfa (rpms_url):
136 # warning, we're now building 'sface' so let's be a bit more picky
137 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
138 # full builds are expected to return with 0 here
140 # move all steps containing 'sfa' from default_steps to other_steps
141 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
142 TestPlc.other_steps += sfa_steps
143 for step in sfa_steps: TestPlc.default_steps.remove(step)
145 def __init__ (self,plc_spec,options):
146 self.plc_spec=plc_spec
148 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
155 raise Exception,'chroot-based myplc testing is deprecated'
156 self.apiserver=TestApiserver(self.url,options.dry_run)
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the vserver
189 def host_to_guest(self,command):
190 return "vserver %s exec %s"%(self.vservername,command)
192 #start/stop the vserver
193 def start_guest_in_host(self):
194 return "vserver %s start"%(self.vservername)
196 def stop_guest_in_host(self):
197 return "vserver %s stop"%(self.vservername)
200 def run_in_guest_piped (self,local,remote):
201 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
203 def auth_root (self):
204 return {'Username':self.plc_spec['PLC_ROOT_USER'],
205 'AuthMethod':'password',
206 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
207 'Role' : self.plc_spec['role']
209 def locate_site (self,sitename):
210 for site in self.plc_spec['sites']:
211 if site['site_fields']['name'] == sitename:
213 if site['site_fields']['login_base'] == sitename:
215 raise Exception,"Cannot locate site %s"%sitename
217 def locate_node (self,nodename):
218 for site in self.plc_spec['sites']:
219 for node in site['nodes']:
220 if node['name'] == nodename:
222 raise Exception,"Cannot locate node %s"%nodename
224 def locate_hostname (self,hostname):
225 for site in self.plc_spec['sites']:
226 for node in site['nodes']:
227 if node['node_fields']['hostname'] == hostname:
229 raise Exception,"Cannot locate hostname %s"%hostname
231 def locate_key (self,keyname):
232 for key in self.plc_spec['keys']:
233 if key['name'] == keyname:
235 raise Exception,"Cannot locate key %s"%keyname
237 def locate_slice (self, slicename):
238 for slice in self.plc_spec['slices']:
239 if slice['slice_fields']['name'] == slicename:
241 raise Exception,"Cannot locate slice %s"%slicename
243 def all_sliver_objs (self):
245 for slice_spec in self.plc_spec['slices']:
246 slicename = slice_spec['slice_fields']['name']
247 for nodename in slice_spec['nodenames']:
248 result.append(self.locate_sliver_obj (nodename,slicename))
251 def locate_sliver_obj (self,nodename,slicename):
252 (site,node) = self.locate_node(nodename)
253 slice = self.locate_slice (slicename)
255 test_site = TestSite (self, site)
256 test_node = TestNode (self, test_site,node)
257 # xxx the slice site is assumed to be the node site - mhh - probably harmless
258 test_slice = TestSlice (self, test_site, slice)
259 return TestSliver (self, test_node, test_slice)
261 def locate_first_node(self):
262 nodename=self.plc_spec['slices'][0]['nodenames'][0]
263 (site,node) = self.locate_node(nodename)
264 test_site = TestSite (self, site)
265 test_node = TestNode (self, test_site,node)
268 def locate_first_sliver (self):
269 slice_spec=self.plc_spec['slices'][0]
270 slicename=slice_spec['slice_fields']['name']
271 nodename=slice_spec['nodenames'][0]
272 return self.locate_sliver_obj(nodename,slicename)
274 # all different hostboxes used in this plc
275 def gather_hostBoxes(self):
276 # maps on sites and nodes, return [ (host_box,test_node) ]
278 for site_spec in self.plc_spec['sites']:
279 test_site = TestSite (self,site_spec)
280 for node_spec in site_spec['nodes']:
281 test_node = TestNode (self, test_site, node_spec)
282 if not test_node.is_real():
283 tuples.append( (test_node.host_box(),test_node) )
284 # transform into a dict { 'host_box' -> [ test_node .. ] }
286 for (box,node) in tuples:
287 if not result.has_key(box):
290 result[box].append(node)
293 # a step for checking this stuff
294 def show_boxes (self):
295 'print summary of nodes location'
296 for (box,nodes) in self.gather_hostBoxes().iteritems():
297 print box,":"," + ".join( [ node.name() for node in nodes ] )
300 # make this a valid step
301 def qemu_kill_all(self):
302 'kill all qemu instances on the qemu boxes involved by this setup'
303 # this is the brute force version, kill all qemus on that host box
304 for (box,nodes) in self.gather_hostBoxes().iteritems():
305 # pass the first nodename, as we don't push template-qemu on testboxes
306 nodedir=nodes[0].nodedir()
307 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
310 # make this a valid step
311 def qemu_list_all(self):
312 'list all qemu instances on the qemu boxes involved by this setup'
313 for (box,nodes) in self.gather_hostBoxes().iteritems():
314 # this is the brute force version, kill all qemus on that host box
315 TestBoxQemu(box,self.options.buildname).qemu_list_all()
318 # kill only the right qemus
319 def qemu_list_mine(self):
320 'list qemu instances for our nodes'
321 for (box,nodes) in self.gather_hostBoxes().iteritems():
322 # the fine-grain version
327 # kill only the right qemus
328 def qemu_kill_mine(self):
329 'kill the qemu instances for our nodes'
330 for (box,nodes) in self.gather_hostBoxes().iteritems():
331 # the fine-grain version
336 #################### display config
338 "show test configuration after localization"
339 self.display_pass (1)
340 self.display_pass (2)
344 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
345 def display_pass (self,passno):
346 for (key,val) in self.plc_spec.iteritems():
347 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
351 self.display_site_spec(site)
352 for node in site['nodes']:
353 self.display_node_spec(node)
354 elif key=='initscripts':
355 for initscript in val:
356 self.display_initscript_spec (initscript)
359 self.display_slice_spec (slice)
362 self.display_key_spec (key)
364 if key not in ['sites','initscripts','slices','keys', 'sfa']:
365 print '+ ',key,':',val
367 def display_site_spec (self,site):
368 print '+ ======== site',site['site_fields']['name']
369 for (k,v) in site.iteritems():
370 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
373 print '+ ','nodes : ',
375 print node['node_fields']['hostname'],'',
381 print user['name'],'',
383 elif k == 'site_fields':
384 print '+ login_base',':',v['login_base']
385 elif k == 'address_fields':
391 def display_initscript_spec (self,initscript):
392 print '+ ======== initscript',initscript['initscript_fields']['name']
394 def display_key_spec (self,key):
395 print '+ ======== key',key['name']
397 def display_slice_spec (self,slice):
398 print '+ ======== slice',slice['slice_fields']['name']
399 for (k,v) in slice.iteritems():
412 elif k=='slice_fields':
413 print '+ fields',':',
414 print 'max_nodes=',v['max_nodes'],
419 def display_node_spec (self,node):
420 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
421 print "hostname=",node['node_fields']['hostname'],
422 print "ip=",node['interface_fields']['ip']
423 if self.options.verbose:
424 utils.pprint("node details",node,depth=3)
426 # another entry point for just showing the boxes involved
427 def display_mapping (self):
428 TestPlc.display_mapping_plc(self.plc_spec)
432 def display_mapping_plc (plc_spec):
433 print '+ MyPLC',plc_spec['name']
434 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
435 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
436 for site_spec in plc_spec['sites']:
437 for node_spec in site_spec['nodes']:
438 TestPlc.display_mapping_node(node_spec)
441 def display_mapping_node (node_spec):
442 print '+ NODE %s'%(node_spec['name'])
443 print '+\tqemu box %s'%node_spec['host_box']
444 print '+\thostname=%s'%node_spec['node_fields']['hostname']
446 # write a timestamp in /vservers/<>.timestamp
447 # cannot be inside the vserver, that causes vserver .. build to cough
448 def timestamp_vs (self):
450 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
452 def local_pre (self):
453 "run site-dependant pre-test script as defined in LocalTestResources"
454 from LocalTestResources import local_resources
455 return local_resources.step_pre(self)
457 def local_post (self):
458 "run site-dependant post-test script as defined in LocalTestResources"
459 from LocalTestResources import local_resources
460 return local_resources.step_post(self)
462 def local_list (self):
463 "run site-dependant list script as defined in LocalTestResources"
464 from LocalTestResources import local_resources
465 return local_resources.step_list(self)
467 def local_rel (self):
468 "run site-dependant release script as defined in LocalTestResources"
469 from LocalTestResources import local_resources
470 return local_resources.step_release(self)
472 def local_rel_plc (self):
473 "run site-dependant release script as defined in LocalTestResources"
474 from LocalTestResources import local_resources
475 return local_resources.step_release_plc(self)
477 def local_rel_qemu (self):
478 "run site-dependant release script as defined in LocalTestResources"
479 from LocalTestResources import local_resources
480 return local_resources.step_release_qemu(self)
483 "vserver delete the test myplc"
484 self.run_in_host("vserver --silent %s delete"%self.vservername)
485 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
489 # historically the build was being fetched by the tests
490 # now the build pushes itself as a subdir of the tests workdir
491 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
492 def vs_create (self):
493 "vserver creation (no install done)"
494 # push the local build/ dir to the testplc box
496 # a full path for the local calls
497 build_dir=os.path.dirname(sys.argv[0])
498 # sometimes this is empty - set to "." in such a case
499 if not build_dir: build_dir="."
500 build_dir += "/build"
502 # use a standard name - will be relative to remote buildname
504 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
505 self.test_ssh.rmdir(build_dir)
506 self.test_ssh.copy(build_dir,recursive=True)
507 # the repo url is taken from arch-rpms-url
508 # with the last step (i386) removed
509 repo_url = self.options.arch_rpms_url
510 for level in [ 'arch' ]:
511 repo_url = os.path.dirname(repo_url)
512 # pass the vbuild-nightly options to vtest-init-vserver
514 test_env_options += " -p %s"%self.options.personality
515 test_env_options += " -d %s"%self.options.pldistro
516 test_env_options += " -f %s"%self.options.fcdistro
517 script="vtest-init-vserver.sh"
518 vserver_name = self.vservername
519 vserver_options="--netdev eth0 --interface %s"%self.vserverip
521 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
522 vserver_options += " --hostname %s"%vserver_hostname
524 print "Cannot reverse lookup %s"%self.vserverip
525 print "This is considered fatal, as this might pollute the test results"
527 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
528 return self.run_in_host(create_vserver) == 0
531 def plc_install(self):
532 "yum install myplc, noderepo, and the plain bootstrapfs"
534 # workaround for getting pgsql8.2 on centos5
535 if self.options.fcdistro == "centos5":
536 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
539 if self.options.personality == "linux32":
541 elif self.options.personality == "linux64":
544 raise Exception, "Unsupported personality %r"%self.options.personality
545 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
548 pkgs_list.append ("slicerepo-%s"%nodefamily)
549 pkgs_list.append ("myplc")
550 pkgs_list.append ("noderepo-%s"%nodefamily)
551 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
552 pkgs_string=" ".join(pkgs_list)
553 self.run_in_guest("yum -y install %s"%pkgs_string)
554 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
557 def plc_configure(self):
559 tmpname='%s.plc-config-tty'%(self.name())
560 fileconf=open(tmpname,'w')
561 for var in [ 'PLC_NAME',
566 'PLC_MAIL_SUPPORT_ADDRESS',
569 # Above line was added for integrating SFA Testing
575 'PLC_RESERVATION_GRANULARITY',
577 'PLC_OMF_XMPP_SERVER',
579 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
580 fileconf.write('w\n')
581 fileconf.write('q\n')
583 utils.system('cat %s'%tmpname)
584 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
585 utils.system('rm %s'%tmpname)
590 self.run_in_guest('service plc start')
595 self.run_in_guest('service plc stop')
599 "start the PLC vserver"
604 "stop the PLC vserver"
608 # stores the keys from the config for further use
609 def keys_store(self):
610 "stores test users ssh keys in keys/"
611 for key_spec in self.plc_spec['keys']:
612 TestKey(self,key_spec).store_key()
615 def keys_clean(self):
616 "removes keys cached in keys/"
617 utils.system("rm -rf ./keys")
620 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
621 # for later direct access to the nodes
622 def keys_fetch(self):
623 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
625 if not os.path.isdir(dir):
627 vservername=self.vservername
629 prefix = 'debug_ssh_key'
630 for ext in [ 'pub', 'rsa' ] :
631 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
632 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
633 if self.test_ssh.fetch(src,dst) != 0: overall=False
637 "create sites with PLCAPI"
638 return self.do_sites()
640 def delete_sites (self):
641 "delete sites with PLCAPI"
642 return self.do_sites(action="delete")
644 def do_sites (self,action="add"):
645 for site_spec in self.plc_spec['sites']:
646 test_site = TestSite (self,site_spec)
647 if (action != "add"):
648 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
649 test_site.delete_site()
650 # deleted with the site
651 #test_site.delete_users()
654 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
655 test_site.create_site()
656 test_site.create_users()
659 def delete_all_sites (self):
660 "Delete all sites in PLC, and related objects"
661 print 'auth_root',self.auth_root()
662 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
663 for site_id in site_ids:
664 print 'Deleting site_id',site_id
665 self.apiserver.DeleteSite(self.auth_root(),site_id)
669 "create nodes with PLCAPI"
670 return self.do_nodes()
671 def delete_nodes (self):
672 "delete nodes with PLCAPI"
673 return self.do_nodes(action="delete")
675 def do_nodes (self,action="add"):
676 for site_spec in self.plc_spec['sites']:
677 test_site = TestSite (self,site_spec)
679 utils.header("Deleting nodes in site %s"%test_site.name())
680 for node_spec in site_spec['nodes']:
681 test_node=TestNode(self,test_site,node_spec)
682 utils.header("Deleting %s"%test_node.name())
683 test_node.delete_node()
685 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
686 for node_spec in site_spec['nodes']:
687 utils.pprint('Creating node %s'%node_spec,node_spec)
688 test_node = TestNode (self,test_site,node_spec)
689 test_node.create_node ()
692 def nodegroups (self):
693 "create nodegroups with PLCAPI"
694 return self.do_nodegroups("add")
695 def delete_nodegroups (self):
696 "delete nodegroups with PLCAPI"
697 return self.do_nodegroups("delete")
701 def translate_timestamp (start,grain,timestamp):
702 if timestamp < TestPlc.YEAR: return start+timestamp*grain
703 else: return timestamp
706 def timestamp_printable (timestamp):
707 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
710 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
712 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
713 print 'API answered grain=',grain
714 start=(now/grain)*grain
716 # find out all nodes that are reservable
717 nodes=self.all_reservable_nodenames()
719 utils.header ("No reservable node found - proceeding without leases")
722 # attach them to the leases as specified in plc_specs
723 # this is where the 'leases' field gets interpreted as relative of absolute
724 for lease_spec in self.plc_spec['leases']:
725 # skip the ones that come with a null slice id
726 if not lease_spec['slice']: continue
727 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
728 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
729 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
730 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
731 if lease_addition['errors']:
732 utils.header("Cannot create leases, %s"%lease_addition['errors'])
735 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
736 (nodes,lease_spec['slice'],
737 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
738 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
742 def delete_leases (self):
743 "remove all leases in the myplc side"
744 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
745 utils.header("Cleaning leases %r"%lease_ids)
746 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
749 def list_leases (self):
750 "list all leases known to the myplc"
751 leases = self.apiserver.GetLeases(self.auth_root())
754 current=l['t_until']>=now
755 if self.options.verbose or current:
756 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
757 TestPlc.timestamp_printable(l['t_from']),
758 TestPlc.timestamp_printable(l['t_until'])))
761 # create nodegroups if needed, and populate
762 def do_nodegroups (self, action="add"):
763 # 1st pass to scan contents
765 for site_spec in self.plc_spec['sites']:
766 test_site = TestSite (self,site_spec)
767 for node_spec in site_spec['nodes']:
768 test_node=TestNode (self,test_site,node_spec)
769 if node_spec.has_key('nodegroups'):
770 nodegroupnames=node_spec['nodegroups']
771 if isinstance(nodegroupnames,StringTypes):
772 nodegroupnames = [ nodegroupnames ]
773 for nodegroupname in nodegroupnames:
774 if not groups_dict.has_key(nodegroupname):
775 groups_dict[nodegroupname]=[]
776 groups_dict[nodegroupname].append(test_node.name())
777 auth=self.auth_root()
779 for (nodegroupname,group_nodes) in groups_dict.iteritems():
781 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
782 # first, check if the nodetagtype is here
783 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
785 tag_type_id = tag_types[0]['tag_type_id']
787 tag_type_id = self.apiserver.AddTagType(auth,
788 {'tagname':nodegroupname,
789 'description': 'for nodegroup %s'%nodegroupname,
791 print 'located tag (type)',nodegroupname,'as',tag_type_id
793 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
795 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
796 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
797 # set node tag on all nodes, value='yes'
798 for nodename in group_nodes:
800 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
802 traceback.print_exc()
803 print 'node',nodename,'seems to already have tag',nodegroupname
806 expect_yes = self.apiserver.GetNodeTags(auth,
807 {'hostname':nodename,
808 'tagname':nodegroupname},
809 ['value'])[0]['value']
810 if expect_yes != "yes":
811 print 'Mismatch node tag on node',nodename,'got',expect_yes
814 if not self.options.dry_run:
815 print 'Cannot find tag',nodegroupname,'on node',nodename
819 print 'cleaning nodegroup',nodegroupname
820 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
822 traceback.print_exc()
826 # return a list of tuples (nodename,qemuname)
827 def all_node_infos (self) :
829 for site_spec in self.plc_spec['sites']:
830 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
831 for node_spec in site_spec['nodes'] ]
834 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
835 def all_reservable_nodenames (self):
837 for site_spec in self.plc_spec['sites']:
838 for node_spec in site_spec['nodes']:
839 node_fields=node_spec['node_fields']
840 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
841 res.append(node_fields['hostname'])
844 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
845 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
846 if self.options.dry_run:
850 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
851 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
852 # the nodes that haven't checked yet - start with a full list and shrink over time
853 tocheck = self.all_hostnames()
854 utils.header("checking nodes %r"%tocheck)
855 # create a dict hostname -> status
856 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
859 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
861 for array in tocheck_status:
862 hostname=array['hostname']
863 boot_state=array['boot_state']
864 if boot_state == target_boot_state:
865 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
867 # if it's a real node, never mind
868 (site_spec,node_spec)=self.locate_hostname(hostname)
869 if TestNode.is_real_model(node_spec['node_fields']['model']):
870 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
872 boot_state = target_boot_state
873 elif datetime.datetime.now() > graceout:
874 utils.header ("%s still in '%s' state"%(hostname,boot_state))
875 graceout=datetime.datetime.now()+datetime.timedelta(1)
876 status[hostname] = boot_state
878 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
881 if datetime.datetime.now() > timeout:
882 for hostname in tocheck:
883 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
885 # otherwise, sleep for a while
887 # only useful in empty plcs
890 def nodes_booted(self):
891 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
893 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
895 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
896 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
897 vservername=self.vservername
900 local_key = "keys/%(vservername)s-debug.rsa"%locals()
903 local_key = "keys/key1.rsa"
904 node_infos = self.all_node_infos()
905 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
906 for (nodename,qemuname) in node_infos:
907 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
908 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
909 (timeout_minutes,silent_minutes,period))
911 for node_info in node_infos:
912 (hostname,qemuname) = node_info
913 # try to run 'hostname' in the node
914 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
915 # don't spam logs - show the command only after the grace period
916 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
918 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
920 node_infos.remove(node_info)
922 # we will have tried real nodes once, in case they're up - but if not, just skip
923 (site_spec,node_spec)=self.locate_hostname(hostname)
924 if TestNode.is_real_model(node_spec['node_fields']['model']):
925 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
926 node_infos.remove(node_info)
929 if datetime.datetime.now() > timeout:
930 for (hostname,qemuname) in node_infos:
931 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
933 # otherwise, sleep for a while
935 # only useful in empty plcs
938 def ssh_node_debug(self):
939 "Tries to ssh into nodes in debug mode with the debug ssh key"
940 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
942 def ssh_node_boot(self):
943 "Tries to ssh into nodes in production mode with the root ssh key"
944 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
947 def qemu_local_init (self):
948 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
952 "all nodes: invoke GetBootMedium and store result locally"
955 def qemu_local_config (self):
956 "all nodes: compute qemu config qemu.conf and store it locally"
959 def nodestate_reinstall (self):
960 "all nodes: mark PLCAPI boot_state as reinstall"
963 def nodestate_safeboot (self):
964 "all nodes: mark PLCAPI boot_state as safeboot"
967 def nodestate_boot (self):
968 "all nodes: mark PLCAPI boot_state as boot"
971 def nodestate_show (self):
972 "all nodes: show PLCAPI boot_state"
975 def qemu_export (self):
976 "all nodes: push local node-dep directory on the qemu box"
979 ### check hooks : invoke scripts from hooks/{node,slice}
980 def check_hooks_node (self):
981 return self.locate_first_node().check_hooks()
982 def check_hooks_sliver (self) :
983 return self.locate_first_sliver().check_hooks()
985 def check_hooks (self):
986 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
987 return self.check_hooks_node() and self.check_hooks_sliver()
990 def do_check_initscripts(self):
992 for slice_spec in self.plc_spec['slices']:
993 if not slice_spec.has_key('initscriptstamp'):
995 stamp=slice_spec['initscriptstamp']
996 for nodename in slice_spec['nodenames']:
997 (site,node) = self.locate_node (nodename)
998 # xxx - passing the wrong site - probably harmless
999 test_site = TestSite (self,site)
1000 test_slice = TestSlice (self,test_site,slice_spec)
1001 test_node = TestNode (self,test_site,node)
1002 test_sliver = TestSliver (self, test_node, test_slice)
1003 if not test_sliver.check_initscript_stamp(stamp):
1007 def check_initscripts(self):
1008 "check that the initscripts have triggered"
1009 return self.do_check_initscripts()
1011 def initscripts (self):
1012 "create initscripts with PLCAPI"
1013 for initscript in self.plc_spec['initscripts']:
1014 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1015 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1018 def delete_initscripts (self):
1019 "delete initscripts with PLCAPI"
1020 for initscript in self.plc_spec['initscripts']:
1021 initscript_name = initscript['initscript_fields']['name']
1022 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1024 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1025 print initscript_name,'deleted'
1027 print 'deletion went wrong - probably did not exist'
1032 "create slices with PLCAPI"
1033 return self.do_slices()
1035 def delete_slices (self):
1036 "delete slices with PLCAPI"
1037 return self.do_slices("delete")
1039 def do_slices (self, action="add"):
1040 for slice in self.plc_spec['slices']:
1041 site_spec = self.locate_site (slice['sitename'])
1042 test_site = TestSite(self,site_spec)
1043 test_slice=TestSlice(self,test_site,slice)
1045 utils.header("Deleting slices in site %s"%test_site.name())
1046 test_slice.delete_slice()
1048 utils.pprint("Creating slice",slice)
1049 test_slice.create_slice()
1050 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1054 def ssh_slice(self):
1055 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1059 def keys_clear_known_hosts (self):
1060 "remove test nodes entries from the local known_hosts file"
1064 def qemu_start (self) :
1065 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1069 def timestamp_qemu (self) :
1070 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1073 def check_tcp (self):
1074 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1075 specs = self.plc_spec['tcp_test']
1080 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1081 if not s_test_sliver.run_tcp_server(port,timeout=10):
1085 # idem for the client side
1086 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1087 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1091 def plcsh_stress_test (self):
1092 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1093 # install the stress-test in the plc image
1094 location = "/usr/share/plc_api/plcsh_stress_test.py"
1095 remote="/vservers/%s/%s"%(self.vservername,location)
1096 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1098 command += " -- --check"
1099 if self.options.size == 1:
1100 command += " --tiny"
1101 return ( self.run_in_guest(command) == 0)
1103 # populate runs the same utility without slightly different options
1104 # in particular runs with --preserve (dont cleanup) and without --check
1105 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1108 def sfa_install(self):
1109 "yum install sfa, sfa-plc and sfa-client"
1111 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1112 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1115 def sfa_dbclean(self):
1116 "thoroughly wipes off the SFA database"
1117 self.run_in_guest("sfa-nuke-plc.py")==0
1120 def sfa_plcclean(self):
1121 "cleans the PLC entries that were created as a side effect of running the script"
1123 sfa_spec=self.plc_spec['sfa']
1125 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1126 slicename='%s_%s'%(sfa_spec['login_base'],sfa_slice_spec['slicename'])
1127 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1128 except: print "Slice %s already absent from PLC db"%slicename
1130 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1131 try: self.apiserver.DeletePerson(self.auth_root(),username)
1132 except: print "User %s already absent from PLC db"%username
1134 print "REMEMBER TO RUN sfa_import AGAIN"
1137 def sfa_uninstall(self):
1138 "uses rpm to uninstall sfa - ignore result"
1139 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1140 self.run_in_guest("rm -rf /var/lib/sfa")
1141 self.run_in_guest("rm -rf /etc/sfa")
1142 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1144 self.run_in_guest("rpm -e --noscripts sfa-plc")
1147 ### run unit tests for SFA
1148 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1149 # Running Transaction
1150 # Transaction couldn't start:
1151 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1152 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1153 # no matter how many Gbs are available on the testplc
1154 # could not figure out what's wrong, so...
1155 # if the yum install phase fails, consider the test is successful
1156 # other combinations will eventually run it hopefully
1157 def sfa_utest(self):
1158 "yum install sfa-tests and run SFA unittests"
1159 self.run_in_guest("yum -y install sfa-tests")
1160 # failed to install - forget it
1161 if self.run_in_guest("rpm -q sfa-tests")!=0:
1162 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1164 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1168 dirname="conf.%s"%self.plc_spec['name']
1169 if not os.path.isdir(dirname):
1170 utils.system("mkdir -p %s"%dirname)
1171 if not os.path.isdir(dirname):
1172 raise "Cannot create config dir for plc %s"%self.name()
1175 def conffile(self,filename):
1176 return "%s/%s"%(self.confdir(),filename)
1177 def confsubdir(self,dirname,clean,dry_run=False):
1178 subdirname="%s/%s"%(self.confdir(),dirname)
1180 utils.system("rm -rf %s"%subdirname)
1181 if not os.path.isdir(subdirname):
1182 utils.system("mkdir -p %s"%subdirname)
1183 if not dry_run and not os.path.isdir(subdirname):
1184 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1187 def conffile_clean (self,filename):
1188 filename=self.conffile(filename)
1189 return utils.system("rm -rf %s"%filename)==0
1192 def sfa_configure(self):
1193 "run sfa-config-tty"
1194 tmpname=self.conffile("sfa-config-tty")
1195 fileconf=open(tmpname,'w')
1196 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1197 'SFA_INTERFACE_HRN',
1198 # 'SFA_REGISTRY_LEVEL1_AUTH',
1199 'SFA_REGISTRY_HOST',
1200 'SFA_AGGREGATE_HOST',
1206 'SFA_PLC_DB_PASSWORD',
1209 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1210 # the way plc_config handles booleans just sucks..
1211 for var in ['SFA_API_DEBUG']:
1213 if self.plc_spec['sfa'][var]: val='true'
1214 fileconf.write ('e %s\n%s\n'%(var,val))
1215 fileconf.write('w\n')
1216 fileconf.write('R\n')
1217 fileconf.write('q\n')
1219 utils.system('cat %s'%tmpname)
1220 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1223 def aggregate_xml_line(self):
1224 port=self.plc_spec['sfa']['neighbours-port']
1225 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1226 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1228 def registry_xml_line(self):
1229 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1230 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1233 # a cross step that takes all other plcs in argument
1234 def cross_sfa_configure(self, other_plcs):
1235 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1236 # of course with a single plc, other_plcs is an empty list
1239 agg_fname=self.conffile("agg.xml")
1240 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1241 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1242 utils.header ("(Over)wrote %s"%agg_fname)
1243 reg_fname=self.conffile("reg.xml")
1244 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1245 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1246 utils.header ("(Over)wrote %s"%reg_fname)
1247 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1248 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1250 def sfa_import(self):
1252 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1253 return self.run_in_guest('sfa-import-plc.py')==0
1254 # not needed anymore
1255 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1257 def sfa_start(self):
1259 return self.run_in_guest('service sfa start')==0
1261 def sfi_configure(self):
1262 "Create /root/.sfi on the plc side for sfi client configuration"
1263 sfa_spec=self.plc_spec['sfa']
1264 dir_name=self.confsubdir("dot-sfi",clean=True,dry_run=self.options.dry_run)
1265 if self.options.dry_run: return True
1266 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1267 fileconf=open(file_name,'w')
1268 fileconf.write (self.plc_spec['keys'][0]['private'])
1270 utils.header ("(Over)wrote %s"%file_name)
1272 file_name=dir_name + os.sep + 'sfi_config'
1273 fileconf=open(file_name,'w')
1274 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1275 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1276 fileconf.write('\n')
1277 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1278 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1279 fileconf.write('\n')
1280 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1281 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1282 fileconf.write('\n')
1283 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1284 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1285 fileconf.write('\n')
1287 utils.header ("(Over)wrote %s"%file_name)
1289 # cannot use sfa_slice_mapper to pass dir_name
1290 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1291 site_spec = self.locate_site (slice_spec['sitename'])
1292 test_site = TestSite(self,site_spec)
1293 test_slice=TestSliceSfa(self,test_site,slice_spec)
1294 test_slice.sfi_config(dir_name)
1296 # push to the remote root's .sfi
1297 location = "root/.sfi"
1298 remote="/vservers/%s/%s"%(self.vservername,location)
1299 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1303 def sfi_clean (self):
1304 "clean up /root/.sfi on the plc side"
1305 self.run_in_guest("rm -rf /root/.sfi")
1308 def sfa_add_user(self):
1309 "run sfi.py add using person.xml"
1310 return TestUserSfa(self).add_user()
1312 def sfa_update_user(self):
1313 "run sfi.py update using person.xml"
1314 return TestUserSfa(self).update_user()
1317 def sfa_add_slice(self):
1318 "run sfi.py add (on Registry) from slice.xml"
1322 def sfa_discover(self):
1323 "discover resources into resouces_in.rspec"
1327 def sfa_create_slice(self):
1328 "run sfi.py create (on SM) - 1st time"
1332 def sfa_check_slice_plc(self):
1333 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1337 def sfa_update_slice(self):
1338 "run sfi.py create (on SM) on existing object"
1342 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1343 sfa_spec=self.plc_spec['sfa']
1344 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1346 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1347 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1348 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1349 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1352 def ssh_slice_sfa(self):
1353 "tries to ssh-enter the SFA slice"
1356 def sfa_delete_user(self):
1357 "run sfi.py delete (on SM) for user"
1358 test_user_sfa=TestUserSfa(self)
1359 return test_user_sfa.delete_user()
1362 def sfa_delete_slice(self):
1363 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1368 self.run_in_guest('service sfa stop')==0
1371 def populate (self):
1372 "creates random entries in the PLCAPI"
1373 # install the stress-test in the plc image
1374 location = "/usr/share/plc_api/plcsh_stress_test.py"
1375 remote="/vservers/%s/%s"%(self.vservername,location)
1376 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1378 command += " -- --preserve --short-names"
1379 local = (self.run_in_guest(command) == 0);
1380 # second run with --foreign
1381 command += ' --foreign'
1382 remote = (self.run_in_guest(command) == 0);
1383 return ( local and remote)
1385 def gather_logs (self):
1386 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1387 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1388 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1389 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1390 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1391 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1393 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1394 self.gather_var_logs ()
1396 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1397 self.gather_pgsql_logs ()
1399 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1400 for site_spec in self.plc_spec['sites']:
1401 test_site = TestSite (self,site_spec)
1402 for node_spec in site_spec['nodes']:
1403 test_node=TestNode(self,test_site,node_spec)
1404 test_node.gather_qemu_logs()
1406 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1407 self.gather_nodes_var_logs()
1409 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1410 self.gather_slivers_var_logs()
1413 def gather_slivers_var_logs(self):
1414 for test_sliver in self.all_sliver_objs():
1415 remote = test_sliver.tar_var_logs()
1416 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1417 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1418 utils.system(command)
1421 def gather_var_logs (self):
1422 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1423 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1424 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1425 utils.system(command)
1426 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1427 utils.system(command)
1429 def gather_pgsql_logs (self):
1430 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1431 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1432 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1433 utils.system(command)
1435 def gather_nodes_var_logs (self):
1436 for site_spec in self.plc_spec['sites']:
1437 test_site = TestSite (self,site_spec)
1438 for node_spec in site_spec['nodes']:
1439 test_node=TestNode(self,test_site,node_spec)
1440 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1441 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1442 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1443 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1444 utils.system(command)
1447 # returns the filename to use for sql dump/restore, using options.dbname if set
1448 def dbfile (self, database):
1449 # uses options.dbname if it is found
1451 name=self.options.dbname
1452 if not isinstance(name,StringTypes):
1455 t=datetime.datetime.now()
1458 return "/root/%s-%s.sql"%(database,name)
1460 def plc_db_dump(self):
1461 'dump the planetlab5 DB in /root in the PLC - filename has time'
1462 dump=self.dbfile("planetab5")
1463 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1464 utils.header('Dumped planetlab5 database in %s'%dump)
1467 def plc_db_restore(self):
1468 'restore the planetlab5 DB - looks broken, but run -n might help'
1469 dump=self.dbfile("planetab5")
1470 ##stop httpd service
1471 self.run_in_guest('service httpd stop')
1472 # xxx - need another wrapper
1473 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1474 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1475 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1476 ##starting httpd service
1477 self.run_in_guest('service httpd start')
1479 utils.header('Database restored from ' + dump)
1482 def standby_1(): pass
1484 def standby_2(): pass
1486 def standby_3(): pass
1488 def standby_4(): pass
1490 def standby_5(): pass
1492 def standby_6(): pass
1494 def standby_7(): pass
1496 def standby_8(): pass
1498 def standby_9(): pass
1500 def standby_10(): pass
1502 def standby_11(): pass
1504 def standby_12(): pass
1506 def standby_13(): pass
1508 def standby_14(): pass
1510 def standby_15(): pass
1512 def standby_16(): pass
1514 def standby_17(): pass
1516 def standby_18(): pass
1518 def standby_19(): pass
1520 def standby_20(): pass